10 #include "snapshot-interface.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
16 #include "threads-model.h"
19 #define INITIAL_THREAD_ID 0
24 bug_message(const char *str) {
25 const char *fmt = " [BUG] %s\n";
26 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27 sprintf(msg, fmt, str);
29 ~bug_message() { if (msg) snapshot_free(msg); }
32 void print() { model_print("%s", msg); }
38 * Structure for holding small ModelChecker members that should be snapshotted
40 struct model_snapshot_members {
41 model_snapshot_members() :
43 /* First thread created will have id INITIAL_THREAD_ID */
44 next_thread_id(INITIAL_THREAD_ID),
45 used_sequence_numbers(0),
49 failed_promise(false),
50 too_many_reads(false),
51 bad_synchronization(false),
55 ~model_snapshot_members() {
56 for (unsigned int i = 0; i < bugs.size(); i++)
61 ModelAction *current_action;
62 unsigned int next_thread_id;
63 modelclock_t used_sequence_numbers;
64 ModelAction *next_backtrack;
65 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
66 struct execution_stats stats;
69 /** @brief Incorrectly-ordered synchronization was made */
70 bool bad_synchronization;
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78 /* Initialize default scheduler */
80 scheduler(new Scheduler()),
82 earliest_diverge(NULL),
83 action_trace(new action_list_t()),
84 thread_map(new HashTable<int, Thread *, int>()),
85 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93 thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94 node_stack(new NodeStack()),
95 priv(new struct model_snapshot_members()),
96 mo_graph(new CycleGraph())
98 /* Initialize a model-checker thread, for special ModelActions */
99 model_thread = new Thread(get_next_id());
100 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
106 for (unsigned int i = 0; i < get_num_threads(); i++)
107 delete thread_map->get(i);
112 delete lock_waiters_map;
113 delete condvar_waiters_map;
116 for (unsigned int i = 0; i < promises->size(); i++)
117 delete (*promises)[i];
120 delete pending_rel_seqs;
122 delete thrd_last_action;
123 delete thrd_last_fence_release;
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
132 action_list_t *tmp = hash->get(ptr);
134 tmp = new action_list_t();
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
142 std::vector<action_list_t> *tmp = hash->get(ptr);
144 tmp = new std::vector<action_list_t>();
151 * Restores user program to initial state and resets all model-checker data
154 void ModelChecker::reset_to_initial_state()
156 DEBUG("+++ Resetting to initial state +++\n");
157 node_stack->reset_execution();
159 /* Print all model-checker output before rollback */
162 snapshot_backtrack_before(0);
165 /** @return a thread ID for a new Thread */
166 thread_id_t ModelChecker::get_next_id()
168 return priv->next_thread_id++;
171 /** @return the number of user threads created during this execution */
172 unsigned int ModelChecker::get_num_threads() const
174 return priv->next_thread_id;
178 * Must be called from user-thread context (e.g., through the global
179 * thread_current() interface)
181 * @return The currently executing Thread.
183 Thread * ModelChecker::get_current_thread() const
185 return scheduler->get_current_thread();
188 /** @return a sequence number for a new ModelAction */
189 modelclock_t ModelChecker::get_next_seq_num()
191 return ++priv->used_sequence_numbers;
194 Node * ModelChecker::get_curr_node() const
196 return node_stack->get_head();
200 * @brief Choose the next thread to execute.
202 * This function chooses the next thread that should execute. It can force the
203 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
204 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
205 * The model-checker may have no preference regarding the next thread (i.e.,
206 * when exploring a new execution ordering), in which case this will return
208 * @param curr The current ModelAction. This action might guide the choice of
210 * @return The next thread to run. If the model-checker has no preference, NULL.
212 Thread * ModelChecker::get_next_thread(ModelAction *curr)
217 /* Do not split atomic actions. */
219 return thread_current();
220 else if (curr->get_type() == THREAD_CREATE)
221 return curr->get_thread_operand();
224 /* Have we completed exploring the preselected path? */
228 /* Else, we are trying to replay an execution */
229 ModelAction *next = node_stack->get_next()->get_action();
231 if (next == diverge) {
232 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
233 earliest_diverge = diverge;
235 Node *nextnode = next->get_node();
236 Node *prevnode = nextnode->get_parent();
237 scheduler->update_sleep_set(prevnode);
239 /* Reached divergence point */
240 if (nextnode->increment_misc()) {
241 /* The next node will try to satisfy a different misc_index values. */
242 tid = next->get_tid();
243 node_stack->pop_restofstack(2);
244 } else if (nextnode->increment_promise()) {
245 /* The next node will try to satisfy a different set of promises. */
246 tid = next->get_tid();
247 node_stack->pop_restofstack(2);
248 } else if (nextnode->increment_read_from()) {
249 /* The next node will read from a different value. */
250 tid = next->get_tid();
251 node_stack->pop_restofstack(2);
252 } else if (nextnode->increment_future_value()) {
253 /* The next node will try to read from a different future value. */
254 tid = next->get_tid();
255 node_stack->pop_restofstack(2);
256 } else if (nextnode->increment_relseq_break()) {
257 /* The next node will try to resolve a release sequence differently */
258 tid = next->get_tid();
259 node_stack->pop_restofstack(2);
262 /* Make a different thread execute for next step */
263 scheduler->add_sleep(get_thread(next->get_tid()));
264 tid = prevnode->get_next_backtrack();
265 /* Make sure the backtracked thread isn't sleeping. */
266 node_stack->pop_restofstack(1);
267 if (diverge == earliest_diverge) {
268 earliest_diverge = prevnode->get_action();
271 /* The correct sleep set is in the parent node. */
274 DEBUG("*** Divergence point ***\n");
278 tid = next->get_tid();
280 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
281 ASSERT(tid != THREAD_ID_T_NONE);
282 return thread_map->get(id_to_int(tid));
286 * We need to know what the next actions of all threads in the sleep
287 * set will be. This method computes them and stores the actions at
288 * the corresponding thread object's pending action.
291 void ModelChecker::execute_sleep_set()
293 for (unsigned int i = 0; i < get_num_threads(); i++) {
294 thread_id_t tid = int_to_id(i);
295 Thread *thr = get_thread(tid);
296 if (scheduler->is_sleep_set(thr) && thr->get_pending() == NULL) {
297 thr->set_state(THREAD_RUNNING);
298 scheduler->next_thread(thr);
299 Thread::swap(&system_context, thr);
300 priv->current_action->set_sleep_flag();
301 thr->set_pending(priv->current_action);
306 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
308 for (unsigned int i = 0; i < get_num_threads(); i++) {
309 Thread *thr = get_thread(int_to_id(i));
310 if (scheduler->is_sleep_set(thr)) {
311 ModelAction *pending_act = thr->get_pending();
312 if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
313 //Remove this thread from sleep set
314 scheduler->remove_sleep(thr);
319 /** @brief Alert the model-checker that an incorrectly-ordered
320 * synchronization was made */
321 void ModelChecker::set_bad_synchronization()
323 priv->bad_synchronization = true;
326 bool ModelChecker::has_asserted() const
328 return priv->asserted;
331 void ModelChecker::set_assert()
333 priv->asserted = true;
337 * Check if we are in a deadlock. Should only be called at the end of an
338 * execution, although it should not give false positives in the middle of an
339 * execution (there should be some ENABLED thread).
341 * @return True if program is in a deadlock; false otherwise
343 bool ModelChecker::is_deadlocked() const
345 bool blocking_threads = false;
346 for (unsigned int i = 0; i < get_num_threads(); i++) {
347 thread_id_t tid = int_to_id(i);
350 Thread *t = get_thread(tid);
351 if (!t->is_model_thread() && t->get_pending())
352 blocking_threads = true;
354 return blocking_threads;
358 * Check if this is a complete execution. That is, have all thread completed
359 * execution (rather than exiting because sleep sets have forced a redundant
362 * @return True if the execution is complete.
364 bool ModelChecker::is_complete_execution() const
366 for (unsigned int i = 0; i < get_num_threads(); i++)
367 if (is_enabled(int_to_id(i)))
373 * @brief Assert a bug in the executing program.
375 * Use this function to assert any sort of bug in the user program. If the
376 * current trace is feasible (actually, a prefix of some feasible execution),
377 * then this execution will be aborted, printing the appropriate message. If
378 * the current trace is not yet feasible, the error message will be stashed and
379 * printed if the execution ever becomes feasible.
381 * @param msg Descriptive message for the bug (do not include newline char)
382 * @return True if bug is immediately-feasible
384 bool ModelChecker::assert_bug(const char *msg)
386 priv->bugs.push_back(new bug_message(msg));
388 if (isfeasibleprefix()) {
396 * @brief Assert a bug in the executing program, asserted by a user thread
397 * @see ModelChecker::assert_bug
398 * @param msg Descriptive message for the bug (do not include newline char)
400 void ModelChecker::assert_user_bug(const char *msg)
402 /* If feasible bug, bail out now */
404 switch_to_master(NULL);
407 /** @return True, if any bugs have been reported for this execution */
408 bool ModelChecker::have_bug_reports() const
410 return priv->bugs.size() != 0;
413 /** @brief Print bug report listing for this execution (if any bugs exist) */
414 void ModelChecker::print_bugs() const
416 if (have_bug_reports()) {
417 model_print("Bug report: %zu bug%s detected\n",
419 priv->bugs.size() > 1 ? "s" : "");
420 for (unsigned int i = 0; i < priv->bugs.size(); i++)
421 priv->bugs[i]->print();
426 * @brief Record end-of-execution stats
428 * Must be run when exiting an execution. Records various stats.
429 * @see struct execution_stats
431 void ModelChecker::record_stats()
434 if (!isfeasibleprefix())
435 stats.num_infeasible++;
436 else if (have_bug_reports())
437 stats.num_buggy_executions++;
438 else if (is_complete_execution())
439 stats.num_complete++;
441 stats.num_redundant++;
444 /** @brief Print execution stats */
445 void ModelChecker::print_stats() const
447 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
448 model_print("Number of redundant executions: %d\n", stats.num_redundant);
449 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
450 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
451 model_print("Total executions: %d\n", stats.num_total);
452 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
456 * @brief End-of-exeuction print
457 * @param printbugs Should any existing bugs be printed?
459 void ModelChecker::print_execution(bool printbugs) const
461 print_program_output();
463 if (DBG_ENABLED() || params.verbose) {
464 model_print("Earliest divergence point since last feasible execution:\n");
465 if (earliest_diverge)
466 earliest_diverge->print();
468 model_print("(Not set)\n");
474 /* Don't print invalid bugs */
483 * Queries the model-checker for more executions to explore and, if one
484 * exists, resets the model-checker state to execute a new execution.
486 * @return If there are more executions to explore, return true. Otherwise,
489 bool ModelChecker::next_execution()
492 /* Is this execution a feasible execution that's worth bug-checking? */
493 bool complete = isfeasibleprefix() && (is_complete_execution() ||
496 /* End-of-execution bug checks */
499 assert_bug("Deadlock detected");
507 if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
508 print_execution(complete);
510 clear_program_output();
513 earliest_diverge = NULL;
515 if ((diverge = get_next_backtrack()) == NULL)
519 model_print("Next execution will diverge at:\n");
523 reset_to_initial_state();
527 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
529 switch (act->get_type()) {
534 /* Optimization: relaxed operations don't need backtracking */
535 if (act->is_relaxed())
537 /* linear search: from most recent to oldest */
538 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
539 action_list_t::reverse_iterator rit;
540 for (rit = list->rbegin(); rit != list->rend(); rit++) {
541 ModelAction *prev = *rit;
542 if (prev->could_synchronize_with(act))
548 case ATOMIC_TRYLOCK: {
549 /* linear search: from most recent to oldest */
550 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
551 action_list_t::reverse_iterator rit;
552 for (rit = list->rbegin(); rit != list->rend(); rit++) {
553 ModelAction *prev = *rit;
554 if (act->is_conflicting_lock(prev))
559 case ATOMIC_UNLOCK: {
560 /* linear search: from most recent to oldest */
561 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
562 action_list_t::reverse_iterator rit;
563 for (rit = list->rbegin(); rit != list->rend(); rit++) {
564 ModelAction *prev = *rit;
565 if (!act->same_thread(prev) && prev->is_failed_trylock())
571 /* linear search: from most recent to oldest */
572 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
573 action_list_t::reverse_iterator rit;
574 for (rit = list->rbegin(); rit != list->rend(); rit++) {
575 ModelAction *prev = *rit;
576 if (!act->same_thread(prev) && prev->is_failed_trylock())
578 if (!act->same_thread(prev) && prev->is_notify())
584 case ATOMIC_NOTIFY_ALL:
585 case ATOMIC_NOTIFY_ONE: {
586 /* linear search: from most recent to oldest */
587 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
588 action_list_t::reverse_iterator rit;
589 for (rit = list->rbegin(); rit != list->rend(); rit++) {
590 ModelAction *prev = *rit;
591 if (!act->same_thread(prev) && prev->is_wait())
602 /** This method finds backtracking points where we should try to
603 * reorder the parameter ModelAction against.
605 * @param the ModelAction to find backtracking points for.
607 void ModelChecker::set_backtracking(ModelAction *act)
609 Thread *t = get_thread(act);
610 ModelAction *prev = get_last_conflict(act);
614 Node *node = prev->get_node()->get_parent();
616 int low_tid, high_tid;
617 if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
618 low_tid = id_to_int(act->get_tid());
619 high_tid = low_tid + 1;
622 high_tid = get_num_threads();
625 for (int i = low_tid; i < high_tid; i++) {
626 thread_id_t tid = int_to_id(i);
628 /* Make sure this thread can be enabled here. */
629 if (i >= node->get_num_threads())
632 /* Don't backtrack into a point where the thread is disabled or sleeping. */
633 if (node->enabled_status(tid) != THREAD_ENABLED)
636 /* Check if this has been explored already */
637 if (node->has_been_explored(tid))
640 /* See if fairness allows */
641 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
643 for (int t = 0; t < node->get_num_threads(); t++) {
644 thread_id_t tother = int_to_id(t);
645 if (node->is_enabled(tother) && node->has_priority(tother)) {
653 /* Cache the latest backtracking point */
654 set_latest_backtrack(prev);
656 /* If this is a new backtracking point, mark the tree */
657 if (!node->set_backtrack(tid))
659 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
660 id_to_int(prev->get_tid()),
661 id_to_int(t->get_id()));
670 * @brief Cache the a backtracking point as the "most recent", if eligible
672 * Note that this does not prepare the NodeStack for this backtracking
673 * operation, it only caches the action on a per-execution basis
675 * @param act The operation at which we should explore a different next action
676 * (i.e., backtracking point)
677 * @return True, if this action is now the most recent backtracking point;
680 bool ModelChecker::set_latest_backtrack(ModelAction *act)
682 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
683 priv->next_backtrack = act;
690 * Returns last backtracking point. The model checker will explore a different
691 * path for this point in the next execution.
692 * @return The ModelAction at which the next execution should diverge.
694 ModelAction * ModelChecker::get_next_backtrack()
696 ModelAction *next = priv->next_backtrack;
697 priv->next_backtrack = NULL;
702 * Processes a read or rmw model action.
703 * @param curr is the read model action to process.
704 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
705 * @return True if processing this read updates the mo_graph.
707 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
709 uint64_t value = VALUE_NONE;
710 bool updated = false;
712 const ModelAction *reads_from = curr->get_node()->get_read_from();
713 if (reads_from != NULL) {
714 mo_graph->startChanges();
716 value = reads_from->get_value();
717 bool r_status = false;
719 if (!second_part_of_rmw) {
720 check_recency(curr, reads_from);
721 r_status = r_modification_order(curr, reads_from);
724 if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
725 mo_graph->rollbackChanges();
726 priv->too_many_reads = false;
730 read_from(curr, reads_from);
731 mo_graph->commitChanges();
732 mo_check_promises(curr, true);
735 } else if (!second_part_of_rmw) {
736 /* Read from future value */
737 struct future_value fv = curr->get_node()->get_future_value();
738 Promise *promise = new Promise(curr, fv);
740 curr->set_read_from_promise(promise);
741 promises->push_back(promise);
742 mo_graph->startChanges();
743 updated = r_modification_order(curr, promise);
744 mo_graph->commitChanges();
746 get_thread(curr)->set_return_value(value);
752 * Processes a lock, trylock, or unlock model action. @param curr is
753 * the read model action to process.
755 * The try lock operation checks whether the lock is taken. If not,
756 * it falls to the normal lock operation case. If so, it returns
759 * The lock operation has already been checked that it is enabled, so
760 * it just grabs the lock and synchronizes with the previous unlock.
762 * The unlock operation has to re-enable all of the threads that are
763 * waiting on the lock.
765 * @return True if synchronization was updated; false otherwise
767 bool ModelChecker::process_mutex(ModelAction *curr)
769 std::mutex *mutex = NULL;
770 struct std::mutex_state *state = NULL;
772 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
773 mutex = (std::mutex *)curr->get_location();
774 state = mutex->get_state();
775 } else if (curr->is_wait()) {
776 mutex = (std::mutex *)curr->get_value();
777 state = mutex->get_state();
780 switch (curr->get_type()) {
781 case ATOMIC_TRYLOCK: {
782 bool success = !state->islocked;
783 curr->set_try_lock(success);
785 get_thread(curr)->set_return_value(0);
788 get_thread(curr)->set_return_value(1);
790 //otherwise fall into the lock case
792 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
793 assert_bug("Lock access before initialization");
794 state->islocked = true;
795 ModelAction *unlock = get_last_unlock(curr);
796 //synchronize with the previous unlock statement
797 if (unlock != NULL) {
798 curr->synchronize_with(unlock);
803 case ATOMIC_UNLOCK: {
805 state->islocked = false;
806 //wake up the other threads
807 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
808 //activate all the waiting threads
809 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
810 scheduler->wake(get_thread(*rit));
817 state->islocked = false;
818 //wake up the other threads
819 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
820 //activate all the waiting threads
821 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
822 scheduler->wake(get_thread(*rit));
825 //check whether we should go to sleep or not...simulate spurious failures
826 if (curr->get_node()->get_misc() == 0) {
827 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
829 scheduler->sleep(get_thread(curr));
833 case ATOMIC_NOTIFY_ALL: {
834 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
835 //activate all the waiting threads
836 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
837 scheduler->wake(get_thread(*rit));
842 case ATOMIC_NOTIFY_ONE: {
843 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
844 int wakeupthread = curr->get_node()->get_misc();
845 action_list_t::iterator it = waiters->begin();
846 advance(it, wakeupthread);
847 scheduler->wake(get_thread(*it));
858 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
860 /* Do more ambitious checks now that mo is more complete */
861 if (mo_may_allow(writer, reader)) {
862 Node *node = reader->get_node();
864 /* Find an ancestor thread which exists at the time of the reader */
865 Thread *write_thread = get_thread(writer);
866 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
867 write_thread = write_thread->get_parent();
869 struct future_value fv = {
871 writer->get_seq_number() + params.maxfuturedelay,
872 write_thread->get_id(),
874 if (node->add_future_value(fv))
875 set_latest_backtrack(reader);
880 * Process a write ModelAction
881 * @param curr The ModelAction to process
882 * @return True if the mo_graph was updated or promises were resolved
884 bool ModelChecker::process_write(ModelAction *curr)
886 bool updated_mod_order = w_modification_order(curr);
887 bool updated_promises = resolve_promises(curr);
889 if (promises->size() == 0) {
890 for (unsigned int i = 0; i < futurevalues->size(); i++) {
891 struct PendingFutureValue pfv = (*futurevalues)[i];
892 add_future_value(pfv.writer, pfv.act);
894 futurevalues->clear();
897 mo_graph->commitChanges();
898 mo_check_promises(curr, false);
900 get_thread(curr)->set_return_value(VALUE_NONE);
901 return updated_mod_order || updated_promises;
905 * Process a fence ModelAction
906 * @param curr The ModelAction to process
907 * @return True if synchronization was updated
909 bool ModelChecker::process_fence(ModelAction *curr)
912 * fence-relaxed: no-op
913 * fence-release: only log the occurence (not in this function), for
914 * use in later synchronization
915 * fence-acquire (this function): search for hypothetical release
918 bool updated = false;
919 if (curr->is_acquire()) {
920 action_list_t *list = action_trace;
921 action_list_t::reverse_iterator rit;
922 /* Find X : is_read(X) && X --sb-> curr */
923 for (rit = list->rbegin(); rit != list->rend(); rit++) {
924 ModelAction *act = *rit;
927 if (act->get_tid() != curr->get_tid())
929 /* Stop at the beginning of the thread */
930 if (act->is_thread_start())
932 /* Stop once we reach a prior fence-acquire */
933 if (act->is_fence() && act->is_acquire())
937 /* read-acquire will find its own release sequences */
938 if (act->is_acquire())
941 /* Establish hypothetical release sequences */
942 rel_heads_list_t release_heads;
943 get_release_seq_heads(curr, act, &release_heads);
944 for (unsigned int i = 0; i < release_heads.size(); i++)
945 if (!curr->synchronize_with(release_heads[i]))
946 set_bad_synchronization();
947 if (release_heads.size() != 0)
955 * @brief Process the current action for thread-related activity
957 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
958 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
959 * synchronization, etc. This function is a no-op for non-THREAD actions
960 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
962 * @param curr The current action
963 * @return True if synchronization was updated or a thread completed
965 bool ModelChecker::process_thread_action(ModelAction *curr)
967 bool updated = false;
969 switch (curr->get_type()) {
970 case THREAD_CREATE: {
971 Thread *th = curr->get_thread_operand();
972 th->set_creation(curr);
973 /* Promises can be satisfied by children */
974 for (unsigned int i = 0; i < promises->size(); i++) {
975 Promise *promise = (*promises)[i];
976 if (promise->thread_is_available(curr->get_tid()))
977 promise->add_thread(th->get_id());
982 Thread *blocking = curr->get_thread_operand();
983 ModelAction *act = get_last_action(blocking->get_id());
984 curr->synchronize_with(act);
985 updated = true; /* trigger rel-seq checks */
988 case THREAD_FINISH: {
989 Thread *th = get_thread(curr);
990 while (!th->wait_list_empty()) {
991 ModelAction *act = th->pop_wait_list();
992 scheduler->wake(get_thread(act));
995 /* Completed thread can't satisfy promises */
996 for (unsigned int i = 0; i < promises->size(); i++) {
997 Promise *promise = (*promises)[i];
998 if (promise->thread_is_available(th->get_id()))
999 if (promise->eliminate_thread(th->get_id()))
1000 priv->failed_promise = true;
1002 updated = true; /* trigger rel-seq checks */
1005 case THREAD_START: {
1006 check_promises(curr->get_tid(), NULL, curr->get_cv());
1017 * @brief Process the current action for release sequence fixup activity
1019 * Performs model-checker release sequence fixups for the current action,
1020 * forcing a single pending release sequence to break (with a given, potential
1021 * "loose" write) or to complete (i.e., synchronize). If a pending release
1022 * sequence forms a complete release sequence, then we must perform the fixup
1023 * synchronization, mo_graph additions, etc.
1025 * @param curr The current action; must be a release sequence fixup action
1026 * @param work_queue The work queue to which to add work items as they are
1029 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1031 const ModelAction *write = curr->get_node()->get_relseq_break();
1032 struct release_seq *sequence = pending_rel_seqs->back();
1033 pending_rel_seqs->pop_back();
1035 ModelAction *acquire = sequence->acquire;
1036 const ModelAction *rf = sequence->rf;
1037 const ModelAction *release = sequence->release;
1041 ASSERT(release->same_thread(rf));
1043 if (write == NULL) {
1045 * @todo Forcing a synchronization requires that we set
1046 * modification order constraints. For instance, we can't allow
1047 * a fixup sequence in which two separate read-acquire
1048 * operations read from the same sequence, where the first one
1049 * synchronizes and the other doesn't. Essentially, we can't
1050 * allow any writes to insert themselves between 'release' and
1054 /* Must synchronize */
1055 if (!acquire->synchronize_with(release)) {
1056 set_bad_synchronization();
1059 /* Re-check all pending release sequences */
1060 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1061 /* Re-check act for mo_graph edges */
1062 work_queue->push_back(MOEdgeWorkEntry(acquire));
1064 /* propagate synchronization to later actions */
1065 action_list_t::reverse_iterator rit = action_trace->rbegin();
1066 for (; (*rit) != acquire; rit++) {
1067 ModelAction *propagate = *rit;
1068 if (acquire->happens_before(propagate)) {
1069 propagate->synchronize_with(acquire);
1070 /* Re-check 'propagate' for mo_graph edges */
1071 work_queue->push_back(MOEdgeWorkEntry(propagate));
1075 /* Break release sequence with new edges:
1076 * release --mo--> write --mo--> rf */
1077 mo_graph->addEdge(release, write);
1078 mo_graph->addEdge(write, rf);
1081 /* See if we have realized a data race */
1086 * Initialize the current action by performing one or more of the following
1087 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1088 * in the NodeStack, manipulating backtracking sets, allocating and
1089 * initializing clock vectors, and computing the promises to fulfill.
1091 * @param curr The current action, as passed from the user context; may be
1092 * freed/invalidated after the execution of this function, with a different
1093 * action "returned" its place (pass-by-reference)
1094 * @return True if curr is a newly-explored action; false otherwise
1096 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1098 ModelAction *newcurr;
1100 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1101 newcurr = process_rmw(*curr);
1104 if (newcurr->is_rmw())
1105 compute_promises(newcurr);
1111 (*curr)->set_seq_number(get_next_seq_num());
1113 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1115 /* First restore type and order in case of RMW operation */
1116 if ((*curr)->is_rmwr())
1117 newcurr->copy_typeandorder(*curr);
1119 ASSERT((*curr)->get_location() == newcurr->get_location());
1120 newcurr->copy_from_new(*curr);
1122 /* Discard duplicate ModelAction; use action from NodeStack */
1125 /* Always compute new clock vector */
1126 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1129 return false; /* Action was explored previously */
1133 /* Always compute new clock vector */
1134 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1136 /* Assign most recent release fence */
1137 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1140 * Perform one-time actions when pushing new ModelAction onto
1143 if (newcurr->is_write())
1144 compute_promises(newcurr);
1145 else if (newcurr->is_relseq_fixup())
1146 compute_relseq_breakwrites(newcurr);
1147 else if (newcurr->is_wait())
1148 newcurr->get_node()->set_misc_max(2);
1149 else if (newcurr->is_notify_one()) {
1150 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1152 return true; /* This was a new ModelAction */
1157 * @brief Establish reads-from relation between two actions
1159 * Perform basic operations involved with establishing a concrete rf relation,
1160 * including setting the ModelAction data and checking for release sequences.
1162 * @param act The action that is reading (must be a read)
1163 * @param rf The action from which we are reading (must be a write)
1165 * @return True if this read established synchronization
1167 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1169 act->set_read_from(rf);
1170 if (rf != NULL && act->is_acquire()) {
1171 rel_heads_list_t release_heads;
1172 get_release_seq_heads(act, act, &release_heads);
1173 int num_heads = release_heads.size();
1174 for (unsigned int i = 0; i < release_heads.size(); i++)
1175 if (!act->synchronize_with(release_heads[i])) {
1176 set_bad_synchronization();
1179 return num_heads > 0;
1185 * @brief Check whether a model action is enabled.
1187 * Checks whether a lock or join operation would be successful (i.e., is the
1188 * lock already locked, or is the joined thread already complete). If not, put
1189 * the action in a waiter list.
1191 * @param curr is the ModelAction to check whether it is enabled.
1192 * @return a bool that indicates whether the action is enabled.
1194 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1195 if (curr->is_lock()) {
1196 std::mutex *lock = (std::mutex *)curr->get_location();
1197 struct std::mutex_state *state = lock->get_state();
1198 if (state->islocked) {
1199 //Stick the action in the appropriate waiting queue
1200 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1203 } else if (curr->get_type() == THREAD_JOIN) {
1204 Thread *blocking = (Thread *)curr->get_location();
1205 if (!blocking->is_complete()) {
1206 blocking->push_wait_list(curr);
1215 * Stores the ModelAction for the current thread action. Call this
1216 * immediately before switching from user- to system-context to pass
1217 * data between them.
1218 * @param act The ModelAction created by the user-thread action
1220 void ModelChecker::set_current_action(ModelAction *act) {
1221 priv->current_action = act;
1225 * This is the heart of the model checker routine. It performs model-checking
1226 * actions corresponding to a given "current action." Among other processes, it
1227 * calculates reads-from relationships, updates synchronization clock vectors,
1228 * forms a memory_order constraints graph, and handles replay/backtrack
1229 * execution when running permutations of previously-observed executions.
1231 * @param curr The current action to process
1232 * @return The ModelAction that is actually executed; may be different than
1233 * curr; may be NULL, if the current action is not enabled to run
1235 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1238 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1240 if (!check_action_enabled(curr)) {
1241 /* Make the execution look like we chose to run this action
1242 * much later, when a lock/join can succeed */
1243 get_thread(curr)->set_pending(curr);
1244 scheduler->sleep(get_thread(curr));
1248 bool newly_explored = initialize_curr_action(&curr);
1254 wake_up_sleeping_actions(curr);
1256 /* Add the action to lists before any other model-checking tasks */
1257 if (!second_part_of_rmw)
1258 add_action_to_lists(curr);
1260 /* Build may_read_from set for newly-created actions */
1261 if (newly_explored && curr->is_read())
1262 build_reads_from_past(curr);
1264 /* Initialize work_queue with the "current action" work */
1265 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1266 while (!work_queue.empty() && !has_asserted()) {
1267 WorkQueueEntry work = work_queue.front();
1268 work_queue.pop_front();
1270 switch (work.type) {
1271 case WORK_CHECK_CURR_ACTION: {
1272 ModelAction *act = work.action;
1273 bool update = false; /* update this location's release seq's */
1274 bool update_all = false; /* update all release seq's */
1276 if (process_thread_action(curr))
1279 if (act->is_read() && process_read(act, second_part_of_rmw))
1282 if (act->is_write() && process_write(act))
1285 if (act->is_fence() && process_fence(act))
1288 if (act->is_mutex_op() && process_mutex(act))
1291 if (act->is_relseq_fixup())
1292 process_relseq_fixup(curr, &work_queue);
1295 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1297 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1300 case WORK_CHECK_RELEASE_SEQ:
1301 resolve_release_sequences(work.location, &work_queue);
1303 case WORK_CHECK_MO_EDGES: {
1304 /** @todo Complete verification of work_queue */
1305 ModelAction *act = work.action;
1306 bool updated = false;
1308 if (act->is_read()) {
1309 const ModelAction *rf = act->get_reads_from();
1310 const Promise *promise = act->get_reads_from_promise();
1312 if (r_modification_order(act, rf))
1314 } else if (promise) {
1315 if (r_modification_order(act, promise))
1319 if (act->is_write()) {
1320 if (w_modification_order(act))
1323 mo_graph->commitChanges();
1326 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1335 check_curr_backtracking(curr);
1336 set_backtracking(curr);
1340 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1342 Node *currnode = curr->get_node();
1343 Node *parnode = currnode->get_parent();
1345 if ((parnode && !parnode->backtrack_empty()) ||
1346 !currnode->misc_empty() ||
1347 !currnode->read_from_empty() ||
1348 !currnode->future_value_empty() ||
1349 !currnode->promise_empty() ||
1350 !currnode->relseq_break_empty()) {
1351 set_latest_backtrack(curr);
1355 bool ModelChecker::promises_expired() const
1357 for (unsigned int i = 0; i < promises->size(); i++) {
1358 Promise *promise = (*promises)[i];
1359 if (promise->get_expiration() < priv->used_sequence_numbers)
1366 * This is the strongest feasibility check available.
1367 * @return whether the current trace (partial or complete) must be a prefix of
1370 bool ModelChecker::isfeasibleprefix() const
1372 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1376 * Print disagnostic information about an infeasible execution
1377 * @param prefix A string to prefix the output with; if NULL, then a default
1378 * message prefix will be provided
1380 void ModelChecker::print_infeasibility(const char *prefix) const
1384 if (mo_graph->checkForCycles())
1385 ptr += sprintf(ptr, "[mo cycle]");
1386 if (priv->failed_promise)
1387 ptr += sprintf(ptr, "[failed promise]");
1388 if (priv->too_many_reads)
1389 ptr += sprintf(ptr, "[too many reads]");
1390 if (priv->bad_synchronization)
1391 ptr += sprintf(ptr, "[bad sw ordering]");
1392 if (promises_expired())
1393 ptr += sprintf(ptr, "[promise expired]");
1394 if (promises->size() != 0)
1395 ptr += sprintf(ptr, "[unresolved promise]");
1397 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1401 * Returns whether the current completed trace is feasible, except for pending
1402 * release sequences.
1404 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1406 return !is_infeasible() && promises->size() == 0;
1410 * Check if the current partial trace is infeasible. Does not check any
1411 * end-of-execution flags, which might rule out the execution. Thus, this is
1412 * useful only for ruling an execution as infeasible.
1413 * @return whether the current partial trace is infeasible.
1415 bool ModelChecker::is_infeasible() const
1417 return mo_graph->checkForCycles() ||
1418 priv->failed_promise ||
1419 priv->too_many_reads ||
1420 priv->bad_synchronization ||
1424 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1425 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1426 ModelAction *lastread = get_last_action(act->get_tid());
1427 lastread->process_rmw(act);
1428 if (act->is_rmw()) {
1429 if (lastread->get_reads_from())
1430 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1432 mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1433 mo_graph->commitChanges();
1439 * Checks whether a thread has read from the same write for too many times
1440 * without seeing the effects of a later write.
1443 * 1) there must a different write that we could read from that would satisfy the modification order,
1444 * 2) we must have read from the same value in excess of maxreads times, and
1445 * 3) that other write must have been in the reads_from set for maxreads times.
1447 * If so, we decide that the execution is no longer feasible.
1449 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1451 if (params.maxreads != 0) {
1452 if (curr->get_node()->get_read_from_size() <= 1)
1454 //Must make sure that execution is currently feasible... We could
1455 //accidentally clear by rolling back
1456 if (is_infeasible())
1458 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1459 int tid = id_to_int(curr->get_tid());
1462 if ((int)thrd_lists->size() <= tid)
1464 action_list_t *list = &(*thrd_lists)[tid];
1466 action_list_t::reverse_iterator rit = list->rbegin();
1467 /* Skip past curr */
1468 for (; (*rit) != curr; rit++)
1470 /* go past curr now */
1473 action_list_t::reverse_iterator ritcopy = rit;
1474 //See if we have enough reads from the same value
1476 for (; count < params.maxreads; rit++, count++) {
1477 if (rit == list->rend())
1479 ModelAction *act = *rit;
1480 if (!act->is_read())
1483 if (act->get_reads_from() != rf)
1485 if (act->get_node()->get_read_from_size() <= 1)
1488 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1490 const ModelAction *write = curr->get_node()->get_read_from_at(i);
1492 /* Need a different write */
1496 /* Test to see whether this is a feasible write to read from */
1497 mo_graph->startChanges();
1498 r_modification_order(curr, write);
1499 bool feasiblereadfrom = !is_infeasible();
1500 mo_graph->rollbackChanges();
1502 if (!feasiblereadfrom)
1506 bool feasiblewrite = true;
1507 //new we need to see if this write works for everyone
1509 for (int loop = count; loop > 0; loop--, rit++) {
1510 ModelAction *act = *rit;
1511 bool foundvalue = false;
1512 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1513 if (act->get_node()->get_read_from_at(j) == write) {
1519 feasiblewrite = false;
1523 if (feasiblewrite) {
1524 priv->too_many_reads = true;
1532 * Updates the mo_graph with the constraints imposed from the current
1535 * Basic idea is the following: Go through each other thread and find
1536 * the last action that happened before our read. Two cases:
1538 * (1) The action is a write => that write must either occur before
1539 * the write we read from or be the write we read from.
1541 * (2) The action is a read => the write that that action read from
1542 * must occur before the write we read from or be the same write.
1544 * @param curr The current action. Must be a read.
1545 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1546 * @return True if modification order edges were added; false otherwise
1548 template <typename rf_type>
1549 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1551 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1554 ASSERT(curr->is_read());
1556 /* Last SC fence in the current thread */
1557 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1559 /* Iterate over all threads */
1560 for (i = 0; i < thrd_lists->size(); i++) {
1561 /* Last SC fence in thread i */
1562 ModelAction *last_sc_fence_thread_local = NULL;
1563 if (int_to_id((int)i) != curr->get_tid())
1564 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1566 /* Last SC fence in thread i, before last SC fence in current thread */
1567 ModelAction *last_sc_fence_thread_before = NULL;
1568 if (last_sc_fence_local)
1569 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1571 /* Iterate over actions in thread, starting from most recent */
1572 action_list_t *list = &(*thrd_lists)[i];
1573 action_list_t::reverse_iterator rit;
1574 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1575 ModelAction *act = *rit;
1577 if (act->is_write() && !act->equals(rf) && act != curr) {
1578 /* C++, Section 29.3 statement 5 */
1579 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1580 *act < *last_sc_fence_thread_local) {
1581 added = mo_graph->addEdge(act, rf) || added;
1584 /* C++, Section 29.3 statement 4 */
1585 else if (act->is_seqcst() && last_sc_fence_local &&
1586 *act < *last_sc_fence_local) {
1587 added = mo_graph->addEdge(act, rf) || added;
1590 /* C++, Section 29.3 statement 6 */
1591 else if (last_sc_fence_thread_before &&
1592 *act < *last_sc_fence_thread_before) {
1593 added = mo_graph->addEdge(act, rf) || added;
1599 * Include at most one act per-thread that "happens
1600 * before" curr. Don't consider reflexively.
1602 if (act->happens_before(curr) && act != curr) {
1603 if (act->is_write()) {
1604 if (!act->equals(rf)) {
1605 added = mo_graph->addEdge(act, rf) || added;
1608 const ModelAction *prevreadfrom = act->get_reads_from();
1609 //if the previous read is unresolved, keep going...
1610 if (prevreadfrom == NULL)
1613 if (!prevreadfrom->equals(rf)) {
1614 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1623 * All compatible, thread-exclusive promises must be ordered after any
1624 * concrete loads from the same thread
1626 for (unsigned int i = 0; i < promises->size(); i++)
1627 if ((*promises)[i]->is_compatible_exclusive(curr))
1628 added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1634 * Updates the mo_graph with the constraints imposed from the current write.
1636 * Basic idea is the following: Go through each other thread and find
1637 * the lastest action that happened before our write. Two cases:
1639 * (1) The action is a write => that write must occur before
1642 * (2) The action is a read => the write that that action read from
1643 * must occur before the current write.
1645 * This method also handles two other issues:
1647 * (I) Sequential Consistency: Making sure that if the current write is
1648 * seq_cst, that it occurs after the previous seq_cst write.
1650 * (II) Sending the write back to non-synchronizing reads.
1652 * @param curr The current action. Must be a write.
1653 * @return True if modification order edges were added; false otherwise
1655 bool ModelChecker::w_modification_order(ModelAction *curr)
1657 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1660 ASSERT(curr->is_write());
1662 if (curr->is_seqcst()) {
1663 /* We have to at least see the last sequentially consistent write,
1664 so we are initialized. */
1665 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1666 if (last_seq_cst != NULL) {
1667 added = mo_graph->addEdge(last_seq_cst, curr) || added;
1671 /* Last SC fence in the current thread */
1672 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1674 /* Iterate over all threads */
1675 for (i = 0; i < thrd_lists->size(); i++) {
1676 /* Last SC fence in thread i, before last SC fence in current thread */
1677 ModelAction *last_sc_fence_thread_before = NULL;
1678 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1679 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1681 /* Iterate over actions in thread, starting from most recent */
1682 action_list_t *list = &(*thrd_lists)[i];
1683 action_list_t::reverse_iterator rit;
1684 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1685 ModelAction *act = *rit;
1688 * 1) If RMW and it actually read from something, then we
1689 * already have all relevant edges, so just skip to next
1692 * 2) If RMW and it didn't read from anything, we should
1693 * whatever edge we can get to speed up convergence.
1695 * 3) If normal write, we need to look at earlier actions, so
1696 * continue processing list.
1698 if (curr->is_rmw()) {
1699 if (curr->get_reads_from() != NULL)
1707 /* C++, Section 29.3 statement 7 */
1708 if (last_sc_fence_thread_before && act->is_write() &&
1709 *act < *last_sc_fence_thread_before) {
1710 added = mo_graph->addEdge(act, curr) || added;
1715 * Include at most one act per-thread that "happens
1718 if (act->happens_before(curr)) {
1720 * Note: if act is RMW, just add edge:
1722 * The following edge should be handled elsewhere:
1723 * readfrom(act) --mo--> act
1725 if (act->is_write())
1726 added = mo_graph->addEdge(act, curr) || added;
1727 else if (act->is_read()) {
1728 //if previous read accessed a null, just keep going
1729 if (act->get_reads_from() == NULL)
1731 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1734 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1735 !act->same_thread(curr)) {
1736 /* We have an action that:
1737 (1) did not happen before us
1738 (2) is a read and we are a write
1739 (3) cannot synchronize with us
1740 (4) is in a different thread
1742 that read could potentially read from our write. Note that
1743 these checks are overly conservative at this point, we'll
1744 do more checks before actually removing the
1748 if (thin_air_constraint_may_allow(curr, act)) {
1749 if (!is_infeasible())
1750 futurevalues->push_back(PendingFutureValue(curr, act));
1751 else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1752 add_future_value(curr, act);
1759 * All compatible, thread-exclusive promises must be ordered after any
1760 * concrete stores to the same thread, or else they can be merged with
1763 for (unsigned int i = 0; i < promises->size(); i++)
1764 if ((*promises)[i]->is_compatible_exclusive(curr))
1765 added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1770 /** Arbitrary reads from the future are not allowed. Section 29.3
1771 * part 9 places some constraints. This method checks one result of constraint
1772 * constraint. Others require compiler support. */
1773 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1775 if (!writer->is_rmw())
1778 if (!reader->is_rmw())
1781 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1782 if (search == reader)
1784 if (search->get_tid() == reader->get_tid() &&
1785 search->happens_before(reader))
1793 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1794 * some constraints. This method checks one the following constraint (others
1795 * require compiler support):
1797 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1799 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1801 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1803 /* Iterate over all threads */
1804 for (i = 0; i < thrd_lists->size(); i++) {
1805 const ModelAction *write_after_read = NULL;
1807 /* Iterate over actions in thread, starting from most recent */
1808 action_list_t *list = &(*thrd_lists)[i];
1809 action_list_t::reverse_iterator rit;
1810 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1811 ModelAction *act = *rit;
1813 /* Don't disallow due to act == reader */
1814 if (!reader->happens_before(act) || reader == act)
1816 else if (act->is_write())
1817 write_after_read = act;
1818 else if (act->is_read() && act->get_reads_from() != NULL)
1819 write_after_read = act->get_reads_from();
1822 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1829 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1830 * The ModelAction under consideration is expected to be taking part in
1831 * release/acquire synchronization as an object of the "reads from" relation.
1832 * Note that this can only provide release sequence support for RMW chains
1833 * which do not read from the future, as those actions cannot be traced until
1834 * their "promise" is fulfilled. Similarly, we may not even establish the
1835 * presence of a release sequence with certainty, as some modification order
1836 * constraints may be decided further in the future. Thus, this function
1837 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1838 * and a boolean representing certainty.
1840 * @param rf The action that might be part of a release sequence. Must be a
1842 * @param release_heads A pass-by-reference style return parameter. After
1843 * execution of this function, release_heads will contain the heads of all the
1844 * relevant release sequences, if any exists with certainty
1845 * @param pending A pass-by-reference style return parameter which is only used
1846 * when returning false (i.e., uncertain). Returns most information regarding
1847 * an uncertain release sequence, including any write operations that might
1848 * break the sequence.
1849 * @return true, if the ModelChecker is certain that release_heads is complete;
1852 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1853 rel_heads_list_t *release_heads,
1854 struct release_seq *pending) const
1856 /* Only check for release sequences if there are no cycles */
1857 if (mo_graph->checkForCycles())
1861 ASSERT(rf->is_write());
1863 if (rf->is_release())
1864 release_heads->push_back(rf);
1865 else if (rf->get_last_fence_release())
1866 release_heads->push_back(rf->get_last_fence_release());
1868 break; /* End of RMW chain */
1870 /** @todo Need to be smarter here... In the linux lock
1871 * example, this will run to the beginning of the program for
1873 /** @todo The way to be smarter here is to keep going until 1
1874 * thread has a release preceded by an acquire and you've seen
1877 /* acq_rel RMW is a sufficient stopping condition */
1878 if (rf->is_acquire() && rf->is_release())
1879 return true; /* complete */
1881 rf = rf->get_reads_from();
1884 /* read from future: need to settle this later */
1886 return false; /* incomplete */
1889 if (rf->is_release())
1890 return true; /* complete */
1892 /* else relaxed write
1893 * - check for fence-release in the same thread (29.8, stmt. 3)
1894 * - check modification order for contiguous subsequence
1895 * -> rf must be same thread as release */
1897 const ModelAction *fence_release = rf->get_last_fence_release();
1898 /* Synchronize with a fence-release unconditionally; we don't need to
1899 * find any more "contiguous subsequence..." for it */
1901 release_heads->push_back(fence_release);
1903 int tid = id_to_int(rf->get_tid());
1904 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1905 action_list_t *list = &(*thrd_lists)[tid];
1906 action_list_t::const_reverse_iterator rit;
1908 /* Find rf in the thread list */
1909 rit = std::find(list->rbegin(), list->rend(), rf);
1910 ASSERT(rit != list->rend());
1912 /* Find the last {write,fence}-release */
1913 for (; rit != list->rend(); rit++) {
1914 if (fence_release && *(*rit) < *fence_release)
1916 if ((*rit)->is_release())
1919 if (rit == list->rend()) {
1920 /* No write-release in this thread */
1921 return true; /* complete */
1922 } else if (fence_release && *(*rit) < *fence_release) {
1923 /* The fence-release is more recent (and so, "stronger") than
1924 * the most recent write-release */
1925 return true; /* complete */
1926 } /* else, need to establish contiguous release sequence */
1927 ModelAction *release = *rit;
1929 ASSERT(rf->same_thread(release));
1931 pending->writes.clear();
1933 bool certain = true;
1934 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1935 if (id_to_int(rf->get_tid()) == (int)i)
1937 list = &(*thrd_lists)[i];
1939 /* Can we ensure no future writes from this thread may break
1940 * the release seq? */
1941 bool future_ordered = false;
1943 ModelAction *last = get_last_action(int_to_id(i));
1944 Thread *th = get_thread(int_to_id(i));
1945 if ((last && rf->happens_before(last)) ||
1948 future_ordered = true;
1950 ASSERT(!th->is_model_thread() || future_ordered);
1952 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1953 const ModelAction *act = *rit;
1954 /* Reach synchronization -> this thread is complete */
1955 if (act->happens_before(release))
1957 if (rf->happens_before(act)) {
1958 future_ordered = true;
1962 /* Only non-RMW writes can break release sequences */
1963 if (!act->is_write() || act->is_rmw())
1966 /* Check modification order */
1967 if (mo_graph->checkReachable(rf, act)) {
1968 /* rf --mo--> act */
1969 future_ordered = true;
1972 if (mo_graph->checkReachable(act, release))
1973 /* act --mo--> release */
1975 if (mo_graph->checkReachable(release, act) &&
1976 mo_graph->checkReachable(act, rf)) {
1977 /* release --mo-> act --mo--> rf */
1978 return true; /* complete */
1980 /* act may break release sequence */
1981 pending->writes.push_back(act);
1984 if (!future_ordered)
1985 certain = false; /* This thread is uncertain */
1989 release_heads->push_back(release);
1990 pending->writes.clear();
1992 pending->release = release;
1999 * An interface for getting the release sequence head(s) with which a
2000 * given ModelAction must synchronize. This function only returns a non-empty
2001 * result when it can locate a release sequence head with certainty. Otherwise,
2002 * it may mark the internal state of the ModelChecker so that it will handle
2003 * the release sequence at a later time, causing @a acquire to update its
2004 * synchronization at some later point in execution.
2006 * @param acquire The 'acquire' action that may synchronize with a release
2008 * @param read The read action that may read from a release sequence; this may
2009 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2010 * when 'acquire' is a fence-acquire)
2011 * @param release_heads A pass-by-reference return parameter. Will be filled
2012 * with the head(s) of the release sequence(s), if they exists with certainty.
2013 * @see ModelChecker::release_seq_heads
2015 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2016 ModelAction *read, rel_heads_list_t *release_heads)
2018 const ModelAction *rf = read->get_reads_from();
2019 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2020 sequence->acquire = acquire;
2021 sequence->read = read;
2023 if (!release_seq_heads(rf, release_heads, sequence)) {
2024 /* add act to 'lazy checking' list */
2025 pending_rel_seqs->push_back(sequence);
2027 snapshot_free(sequence);
2032 * Attempt to resolve all stashed operations that might synchronize with a
2033 * release sequence for a given location. This implements the "lazy" portion of
2034 * determining whether or not a release sequence was contiguous, since not all
2035 * modification order information is present at the time an action occurs.
2037 * @param location The location/object that should be checked for release
2038 * sequence resolutions. A NULL value means to check all locations.
2039 * @param work_queue The work queue to which to add work items as they are
2041 * @return True if any updates occurred (new synchronization, new mo_graph
2044 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2046 bool updated = false;
2047 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2048 while (it != pending_rel_seqs->end()) {
2049 struct release_seq *pending = *it;
2050 ModelAction *acquire = pending->acquire;
2051 const ModelAction *read = pending->read;
2053 /* Only resolve sequences on the given location, if provided */
2054 if (location && read->get_location() != location) {
2059 const ModelAction *rf = read->get_reads_from();
2060 rel_heads_list_t release_heads;
2062 complete = release_seq_heads(rf, &release_heads, pending);
2063 for (unsigned int i = 0; i < release_heads.size(); i++) {
2064 if (!acquire->has_synchronized_with(release_heads[i])) {
2065 if (acquire->synchronize_with(release_heads[i]))
2068 set_bad_synchronization();
2073 /* Re-check all pending release sequences */
2074 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2075 /* Re-check read-acquire for mo_graph edges */
2076 if (acquire->is_read())
2077 work_queue->push_back(MOEdgeWorkEntry(acquire));
2079 /* propagate synchronization to later actions */
2080 action_list_t::reverse_iterator rit = action_trace->rbegin();
2081 for (; (*rit) != acquire; rit++) {
2082 ModelAction *propagate = *rit;
2083 if (acquire->happens_before(propagate)) {
2084 propagate->synchronize_with(acquire);
2085 /* Re-check 'propagate' for mo_graph edges */
2086 work_queue->push_back(MOEdgeWorkEntry(propagate));
2091 it = pending_rel_seqs->erase(it);
2092 snapshot_free(pending);
2098 // If we resolved promises or data races, see if we have realized a data race.
2105 * Performs various bookkeeping operations for the current ModelAction. For
2106 * instance, adds action to the per-object, per-thread action vector and to the
2107 * action trace list of all thread actions.
2109 * @param act is the ModelAction to add.
2111 void ModelChecker::add_action_to_lists(ModelAction *act)
2113 int tid = id_to_int(act->get_tid());
2114 ModelAction *uninit = NULL;
2116 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2117 if (list->empty() && act->is_atomic_var()) {
2118 uninit = new_uninitialized_action(act->get_location());
2119 uninit_id = id_to_int(uninit->get_tid());
2120 list->push_back(uninit);
2122 list->push_back(act);
2124 action_trace->push_back(act);
2126 action_trace->push_front(uninit);
2128 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2129 if (tid >= (int)vec->size())
2130 vec->resize(priv->next_thread_id);
2131 (*vec)[tid].push_back(act);
2133 (*vec)[uninit_id].push_front(uninit);
2135 if ((int)thrd_last_action->size() <= tid)
2136 thrd_last_action->resize(get_num_threads());
2137 (*thrd_last_action)[tid] = act;
2139 (*thrd_last_action)[uninit_id] = uninit;
2141 if (act->is_fence() && act->is_release()) {
2142 if ((int)thrd_last_fence_release->size() <= tid)
2143 thrd_last_fence_release->resize(get_num_threads());
2144 (*thrd_last_fence_release)[tid] = act;
2147 if (act->is_wait()) {
2148 void *mutex_loc = (void *) act->get_value();
2149 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2151 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2152 if (tid >= (int)vec->size())
2153 vec->resize(priv->next_thread_id);
2154 (*vec)[tid].push_back(act);
2159 * @brief Get the last action performed by a particular Thread
2160 * @param tid The thread ID of the Thread in question
2161 * @return The last action in the thread
2163 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2165 int threadid = id_to_int(tid);
2166 if (threadid < (int)thrd_last_action->size())
2167 return (*thrd_last_action)[id_to_int(tid)];
2173 * @brief Get the last fence release performed by a particular Thread
2174 * @param tid The thread ID of the Thread in question
2175 * @return The last fence release in the thread, if one exists; NULL otherwise
2177 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2179 int threadid = id_to_int(tid);
2180 if (threadid < (int)thrd_last_fence_release->size())
2181 return (*thrd_last_fence_release)[id_to_int(tid)];
2187 * Gets the last memory_order_seq_cst write (in the total global sequence)
2188 * performed on a particular object (i.e., memory location), not including the
2190 * @param curr The current ModelAction; also denotes the object location to
2192 * @return The last seq_cst write
2194 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2196 void *location = curr->get_location();
2197 action_list_t *list = get_safe_ptr_action(obj_map, location);
2198 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2199 action_list_t::reverse_iterator rit;
2200 for (rit = list->rbegin(); rit != list->rend(); rit++)
2201 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2207 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2208 * performed in a particular thread, prior to a particular fence.
2209 * @param tid The ID of the thread to check
2210 * @param before_fence The fence from which to begin the search; if NULL, then
2211 * search for the most recent fence in the thread.
2212 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2214 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2216 /* All fences should have NULL location */
2217 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2218 action_list_t::reverse_iterator rit = list->rbegin();
2221 for (; rit != list->rend(); rit++)
2222 if (*rit == before_fence)
2225 ASSERT(*rit == before_fence);
2229 for (; rit != list->rend(); rit++)
2230 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2236 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2237 * location). This function identifies the mutex according to the current
2238 * action, which is presumed to perform on the same mutex.
2239 * @param curr The current ModelAction; also denotes the object location to
2241 * @return The last unlock operation
2243 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2245 void *location = curr->get_location();
2246 action_list_t *list = get_safe_ptr_action(obj_map, location);
2247 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2248 action_list_t::reverse_iterator rit;
2249 for (rit = list->rbegin(); rit != list->rend(); rit++)
2250 if ((*rit)->is_unlock() || (*rit)->is_wait())
2255 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2257 ModelAction *parent = get_last_action(tid);
2259 parent = get_thread(tid)->get_creation();
2264 * Returns the clock vector for a given thread.
2265 * @param tid The thread whose clock vector we want
2266 * @return Desired clock vector
2268 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2270 return get_parent_action(tid)->get_cv();
2274 * Resolve a set of Promises with a current write. The set is provided in the
2275 * Node corresponding to @a write.
2276 * @param write The ModelAction that is fulfilling Promises
2277 * @return True if promises were resolved; false otherwise
2279 bool ModelChecker::resolve_promises(ModelAction *write)
2281 bool haveResolved = false;
2282 std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2283 promise_list_t mustResolve, resolved;
2285 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2286 Promise *promise = (*promises)[promise_index];
2287 if (write->get_node()->get_promise(i)) {
2288 ModelAction *read = promise->get_action();
2289 read_from(read, write);
2290 //Make sure the promise's value matches the write's value
2291 ASSERT(promise->is_compatible(write));
2292 mo_graph->resolvePromise(read, write, &mustResolve);
2294 resolved.push_back(promise);
2295 promises->erase(promises->begin() + promise_index);
2296 actions_to_check.push_back(read);
2298 haveResolved = true;
2303 for (unsigned int i = 0; i < mustResolve.size(); i++) {
2304 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2306 priv->failed_promise = true;
2308 for (unsigned int i = 0; i < resolved.size(); i++)
2310 //Check whether reading these writes has made threads unable to
2313 for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2314 ModelAction *read = actions_to_check[i];
2315 mo_check_promises(read, true);
2318 return haveResolved;
2322 * Compute the set of promises that could potentially be satisfied by this
2323 * action. Note that the set computation actually appears in the Node, not in
2325 * @param curr The ModelAction that may satisfy promises
2327 void ModelChecker::compute_promises(ModelAction *curr)
2329 for (unsigned int i = 0; i < promises->size(); i++) {
2330 Promise *promise = (*promises)[i];
2331 const ModelAction *act = promise->get_action();
2332 if (!act->happens_before(curr) &&
2334 !act->could_synchronize_with(curr) &&
2335 !act->same_thread(curr) &&
2336 act->get_location() == curr->get_location() &&
2337 promise->get_value() == curr->get_value()) {
2338 curr->get_node()->set_promise(i, act->is_rmw());
2343 /** Checks promises in response to change in ClockVector Threads. */
2344 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2346 for (unsigned int i = 0; i < promises->size(); i++) {
2347 Promise *promise = (*promises)[i];
2348 const ModelAction *act = promise->get_action();
2349 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2350 merge_cv->synchronized_since(act)) {
2351 if (promise->eliminate_thread(tid)) {
2352 //Promise has failed
2353 priv->failed_promise = true;
2360 void ModelChecker::check_promises_thread_disabled()
2362 for (unsigned int i = 0; i < promises->size(); i++) {
2363 Promise *promise = (*promises)[i];
2364 if (promise->has_failed()) {
2365 priv->failed_promise = true;
2372 * @brief Checks promises in response to addition to modification order for
2377 * pthread is the thread that performed the read that created the promise
2379 * pread is the read that created the promise
2381 * pwrite is either the first write to same location as pread by
2382 * pthread that is sequenced after pread or the write read by the
2383 * first read to the same location as pread by pthread that is
2384 * sequenced after pread.
2386 * 1. If tid=pthread, then we check what other threads are reachable
2387 * through the mod order starting with pwrite. Those threads cannot
2388 * perform a write that will resolve the promise due to modification
2389 * order constraints.
2391 * 2. If the tid is not pthread, we check whether pwrite can reach the
2392 * action write through the modification order. If so, that thread
2393 * cannot perform a future write that will resolve the promise due to
2394 * modificatin order constraints.
2396 * @param tid The thread that either read from the model action write, or
2397 * actually did the model action write.
2399 * @param write The ModelAction representing the relevant write.
2400 * @param read The ModelAction that reads a promised write, or NULL otherwise.
2402 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2404 thread_id_t tid = act->get_tid();
2405 const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2407 for (unsigned int i = 0; i < promises->size(); i++) {
2408 Promise *promise = (*promises)[i];
2409 const ModelAction *pread = promise->get_action();
2411 // Is this promise on the same location?
2412 if (!pread->same_var(write))
2415 // same thread as pread
2416 if (pread->get_tid() == tid) {
2417 // make sure that the reader of this write happens after the promise
2418 if (!is_read_check || (pread->happens_before(act))) {
2419 // do we have a pwrite for the promise, if not, set it
2420 if (promise->get_write() == NULL) {
2421 promise->set_write(write);
2422 // The pwrite cannot happen before pread
2423 if (write->happens_before(pread) && (write != pread)) {
2424 priv->failed_promise = true;
2429 if (mo_graph->checkPromise(write, promise)) {
2430 priv->failed_promise = true;
2436 // Don't do any lookups twice for the same thread
2437 if (!promise->thread_is_available(tid))
2440 const ModelAction *pwrite = promise->get_write();
2441 if (pwrite && mo_graph->checkReachable(pwrite, write)) {
2442 if (promise->eliminate_thread(tid)) {
2443 priv->failed_promise = true;
2451 * Compute the set of writes that may break the current pending release
2452 * sequence. This information is extracted from previou release sequence
2455 * @param curr The current ModelAction. Must be a release sequence fixup
2458 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2460 if (pending_rel_seqs->empty())
2463 struct release_seq *pending = pending_rel_seqs->back();
2464 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2465 const ModelAction *write = pending->writes[i];
2466 curr->get_node()->add_relseq_break(write);
2469 /* NULL means don't break the sequence; just synchronize */
2470 curr->get_node()->add_relseq_break(NULL);
2474 * Build up an initial set of all past writes that this 'read' action may read
2475 * from. This set is determined by the clock vector's "happens before"
2477 * @param curr is the current ModelAction that we are exploring; it must be a
2480 void ModelChecker::build_reads_from_past(ModelAction *curr)
2482 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2484 ASSERT(curr->is_read());
2486 ModelAction *last_sc_write = NULL;
2488 if (curr->is_seqcst())
2489 last_sc_write = get_last_seq_cst_write(curr);
2491 /* Iterate over all threads */
2492 for (i = 0; i < thrd_lists->size(); i++) {
2493 /* Iterate over actions in thread, starting from most recent */
2494 action_list_t *list = &(*thrd_lists)[i];
2495 action_list_t::reverse_iterator rit;
2496 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2497 ModelAction *act = *rit;
2499 /* Only consider 'write' actions */
2500 if (!act->is_write() || act == curr)
2503 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2504 bool allow_read = true;
2506 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2508 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2512 curr->get_node()->add_read_from(act);
2514 /* Include at most one act per-thread that "happens before" curr */
2515 if (act->happens_before(curr))
2520 if (DBG_ENABLED()) {
2521 model_print("Reached read action:\n");
2523 model_print("Printing may_read_from\n");
2524 curr->get_node()->print_may_read_from();
2525 model_print("End printing may_read_from\n");
2529 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2532 /* UNINIT actions don't have a Node, and they never sleep */
2533 if (write->is_uninitialized())
2535 Node *prevnode = write->get_node()->get_parent();
2537 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2538 if (write->is_release() && thread_sleep)
2540 if (!write->is_rmw()) {
2543 if (write->get_reads_from() == NULL)
2545 write = write->get_reads_from();
2550 * @brief Create a new action representing an uninitialized atomic
2551 * @param location The memory location of the atomic object
2552 * @return A pointer to a new ModelAction
2554 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2556 ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2557 act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2558 act->create_cv(NULL);
2562 static void print_list(action_list_t *list)
2564 action_list_t::iterator it;
2566 model_print("---------------------------------------------------------------------\n");
2568 unsigned int hash = 0;
2570 for (it = list->begin(); it != list->end(); it++) {
2572 hash = hash^(hash<<3)^((*it)->hash());
2574 model_print("HASH %u\n", hash);
2575 model_print("---------------------------------------------------------------------\n");
2578 #if SUPPORT_MOD_ORDER_DUMP
2579 void ModelChecker::dumpGraph(char *filename) const
2582 sprintf(buffer, "%s.dot", filename);
2583 FILE *file = fopen(buffer, "w");
2584 fprintf(file, "digraph %s {\n", filename);
2585 mo_graph->dumpNodes(file);
2586 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2588 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2589 ModelAction *action = *it;
2590 if (action->is_read()) {
2591 fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2592 if (action->get_reads_from() != NULL)
2593 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2595 if (thread_array[action->get_tid()] != NULL) {
2596 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2599 thread_array[action->get_tid()] = action;
2601 fprintf(file, "}\n");
2602 model_free(thread_array);
2607 /** @brief Prints an execution trace summary. */
2608 void ModelChecker::print_summary() const
2610 #if SUPPORT_MOD_ORDER_DUMP
2611 char buffername[100];
2612 sprintf(buffername, "exec%04u", stats.num_total);
2613 mo_graph->dumpGraphToFile(buffername);
2614 sprintf(buffername, "graph%04u", stats.num_total);
2615 dumpGraph(buffername);
2618 model_print("Execution %d:", stats.num_total);
2619 if (isfeasibleprefix())
2622 print_infeasibility(" INFEASIBLE");
2623 print_list(action_trace);
2628 * Add a Thread to the system for the first time. Should only be called once
2630 * @param t The Thread to add
2632 void ModelChecker::add_thread(Thread *t)
2634 thread_map->put(id_to_int(t->get_id()), t);
2635 scheduler->add_thread(t);
2639 * Removes a thread from the scheduler.
2640 * @param the thread to remove.
2642 void ModelChecker::remove_thread(Thread *t)
2644 scheduler->remove_thread(t);
2648 * @brief Get a Thread reference by its ID
2649 * @param tid The Thread's ID
2650 * @return A Thread reference
2652 Thread * ModelChecker::get_thread(thread_id_t tid) const
2654 return thread_map->get(id_to_int(tid));
2658 * @brief Get a reference to the Thread in which a ModelAction was executed
2659 * @param act The ModelAction
2660 * @return A Thread reference
2662 Thread * ModelChecker::get_thread(const ModelAction *act) const
2664 return get_thread(act->get_tid());
2668 * @brief Check if a Thread is currently enabled
2669 * @param t The Thread to check
2670 * @return True if the Thread is currently enabled
2672 bool ModelChecker::is_enabled(Thread *t) const
2674 return scheduler->is_enabled(t);
2678 * @brief Check if a Thread is currently enabled
2679 * @param tid The ID of the Thread to check
2680 * @return True if the Thread is currently enabled
2682 bool ModelChecker::is_enabled(thread_id_t tid) const
2684 return scheduler->is_enabled(tid);
2688 * Switch from a user-context to the "master thread" context (a.k.a. system
2689 * context). This switch is made with the intention of exploring a particular
2690 * model-checking action (described by a ModelAction object). Must be called
2691 * from a user-thread context.
2693 * @param act The current action that will be explored. May be NULL only if
2694 * trace is exiting via an assertion (see ModelChecker::set_assert and
2695 * ModelChecker::has_asserted).
2696 * @return Return the value returned by the current action
2698 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2701 Thread *old = thread_current();
2702 set_current_action(act);
2703 old->set_state(THREAD_READY);
2704 if (Thread::swap(old, &system_context) < 0) {
2705 perror("swap threads");
2708 return old->get_return_value();
2712 * Takes the next step in the execution, if possible.
2713 * @param curr The current step to take
2714 * @return Returns true (success) if a step was taken and false otherwise.
2716 bool ModelChecker::take_step(ModelAction *curr)
2721 Thread *curr_thrd = get_thread(curr);
2722 ASSERT(curr_thrd->get_state() == THREAD_READY);
2724 curr = check_current_action(curr);
2726 /* Infeasible -> don't take any more steps */
2727 if (is_infeasible())
2729 else if (isfeasibleprefix() && have_bug_reports()) {
2734 if (params.bound != 0)
2735 if (priv->used_sequence_numbers > params.bound)
2738 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2739 scheduler->remove_thread(curr_thrd);
2741 Thread *next_thrd = get_next_thread(curr);
2742 next_thrd = scheduler->next_thread(next_thrd);
2744 DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2745 next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2748 * Launch end-of-execution release sequence fixups only when there are:
2750 * (1) no more user threads to run (or when execution replay chooses
2751 * the 'model_thread')
2752 * (2) pending release sequences
2753 * (3) pending assertions (i.e., data races)
2754 * (4) no pending promises
2756 if (!pending_rel_seqs->empty() && (!next_thrd || next_thrd->is_model_thread()) &&
2757 is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2758 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2759 pending_rel_seqs->size());
2760 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2761 std::memory_order_seq_cst, NULL, VALUE_NONE,
2763 set_current_action(fixup);
2767 /* next_thrd == NULL -> don't take any more steps */
2771 next_thrd->set_state(THREAD_RUNNING);
2773 if (next_thrd->get_pending() != NULL) {
2774 /* restart a pending action */
2775 set_current_action(next_thrd->get_pending());
2776 next_thrd->set_pending(NULL);
2777 next_thrd->set_state(THREAD_READY);
2781 /* Return false only if swap fails with an error */
2782 return (Thread::swap(&system_context, next_thrd) == 0);
2785 /** Wrapper to run the user's main function, with appropriate arguments */
2786 void user_main_wrapper(void *)
2788 user_main(model->params.argc, model->params.argv);
2791 /** @brief Run ModelChecker for the user program */
2792 void ModelChecker::run()
2796 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2800 /* Run user thread up to its first action */
2801 scheduler->next_thread(t);
2802 Thread::swap(&system_context, t);
2804 /* Wait for all threads to complete */
2805 while (take_step(priv->current_action));
2806 } while (next_execution());