10 #include "snapshot-interface.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
16 #include "threads-model.h"
19 #define INITIAL_THREAD_ID 0
24 bug_message(const char *str) {
25 const char *fmt = " [BUG] %s\n";
26 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27 sprintf(msg, fmt, str);
29 ~bug_message() { if (msg) snapshot_free(msg); }
32 void print() { model_print("%s", msg); }
38 * Structure for holding small ModelChecker members that should be snapshotted
40 struct model_snapshot_members {
41 model_snapshot_members() :
42 /* First thread created will have id INITIAL_THREAD_ID */
43 next_thread_id(INITIAL_THREAD_ID),
44 used_sequence_numbers(0),
48 failed_promise(false),
49 too_many_reads(false),
50 no_valid_reads(false),
51 bad_synchronization(false),
55 ~model_snapshot_members() {
56 for (unsigned int i = 0; i < bugs.size(); i++)
61 unsigned int next_thread_id;
62 modelclock_t used_sequence_numbers;
63 ModelAction *next_backtrack;
64 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65 struct execution_stats stats;
69 /** @brief Incorrectly-ordered synchronization was made */
70 bool bad_synchronization;
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78 /* Initialize default scheduler */
80 scheduler(new Scheduler()),
82 earliest_diverge(NULL),
83 action_trace(new action_list_t()),
84 thread_map(new HashTable<int, Thread *, int>()),
85 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93 thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94 node_stack(new NodeStack()),
95 priv(new struct model_snapshot_members()),
96 mo_graph(new CycleGraph())
98 /* Initialize a model-checker thread, for special ModelActions */
99 model_thread = new Thread(get_next_id());
100 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
106 for (unsigned int i = 0; i < get_num_threads(); i++)
107 delete thread_map->get(i);
112 delete lock_waiters_map;
113 delete condvar_waiters_map;
116 for (unsigned int i = 0; i < promises->size(); i++)
117 delete (*promises)[i];
120 delete pending_rel_seqs;
122 delete thrd_last_action;
123 delete thrd_last_fence_release;
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
132 action_list_t *tmp = hash->get(ptr);
134 tmp = new action_list_t();
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
142 std::vector<action_list_t> *tmp = hash->get(ptr);
144 tmp = new std::vector<action_list_t>();
151 * Restores user program to initial state and resets all model-checker data
154 void ModelChecker::reset_to_initial_state()
156 DEBUG("+++ Resetting to initial state +++\n");
157 node_stack->reset_execution();
159 /* Print all model-checker output before rollback */
163 * FIXME: if we utilize partial rollback, we will need to free only
164 * those pending actions which were NOT pending before the rollback
167 for (unsigned int i = 0; i < get_num_threads(); i++)
168 delete get_thread(int_to_id(i))->get_pending();
170 snapshot_backtrack_before(0);
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
176 return priv->next_thread_id++;
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
182 return priv->next_thread_id;
186 * Must be called from user-thread context (e.g., through the global
187 * thread_current() interface)
189 * @return The currently executing Thread.
191 Thread * ModelChecker::get_current_thread() const
193 return scheduler->get_current_thread();
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
199 return ++priv->used_sequence_numbers;
202 Node * ModelChecker::get_curr_node() const
204 return node_stack->get_head();
208 * @brief Choose the next thread to execute.
210 * This function chooses the next thread that should execute. It can force the
211 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213 * The model-checker may have no preference regarding the next thread (i.e.,
214 * when exploring a new execution ordering), in which case we defer to the
217 * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218 * might guide the choice of next thread (i.e., THREAD_CREATE should be
219 * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220 * @return The next chosen thread to run, if any exist. Or else if no threads
221 * remain to be executed, return NULL.
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
228 /* Do not split atomic actions. */
230 return get_thread(curr);
231 else if (curr->get_type() == THREAD_CREATE)
232 return curr->get_thread_operand();
236 * Have we completed exploring the preselected path? Then let the
240 return scheduler->select_next_thread();
242 /* Else, we are trying to replay an execution */
243 ModelAction *next = node_stack->get_next()->get_action();
245 if (next == diverge) {
246 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247 earliest_diverge = diverge;
249 Node *nextnode = next->get_node();
250 Node *prevnode = nextnode->get_parent();
251 scheduler->update_sleep_set(prevnode);
253 /* Reached divergence point */
254 if (nextnode->increment_misc()) {
255 /* The next node will try to satisfy a different misc_index values. */
256 tid = next->get_tid();
257 node_stack->pop_restofstack(2);
258 } else if (nextnode->increment_promise()) {
259 /* The next node will try to satisfy a different set of promises. */
260 tid = next->get_tid();
261 node_stack->pop_restofstack(2);
262 } else if (nextnode->increment_read_from()) {
263 /* The next node will read from a different value. */
264 tid = next->get_tid();
265 node_stack->pop_restofstack(2);
266 } else if (nextnode->increment_future_value()) {
267 /* The next node will try to read from a different future value. */
268 tid = next->get_tid();
269 node_stack->pop_restofstack(2);
270 } else if (nextnode->increment_relseq_break()) {
271 /* The next node will try to resolve a release sequence differently */
272 tid = next->get_tid();
273 node_stack->pop_restofstack(2);
276 /* Make a different thread execute for next step */
277 scheduler->add_sleep(get_thread(next->get_tid()));
278 tid = prevnode->get_next_backtrack();
279 /* Make sure the backtracked thread isn't sleeping. */
280 node_stack->pop_restofstack(1);
281 if (diverge == earliest_diverge) {
282 earliest_diverge = prevnode->get_action();
285 /* The correct sleep set is in the parent node. */
288 DEBUG("*** Divergence point ***\n");
292 tid = next->get_tid();
294 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
295 ASSERT(tid != THREAD_ID_T_NONE);
296 return thread_map->get(id_to_int(tid));
300 * We need to know what the next actions of all threads in the sleep
301 * set will be. This method computes them and stores the actions at
302 * the corresponding thread object's pending action.
305 void ModelChecker::execute_sleep_set()
307 for (unsigned int i = 0; i < get_num_threads(); i++) {
308 thread_id_t tid = int_to_id(i);
309 Thread *thr = get_thread(tid);
310 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
311 thr->get_pending()->set_sleep_flag();
316 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
318 for (unsigned int i = 0; i < get_num_threads(); i++) {
319 Thread *thr = get_thread(int_to_id(i));
320 if (scheduler->is_sleep_set(thr)) {
321 ModelAction *pending_act = thr->get_pending();
322 if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
323 //Remove this thread from sleep set
324 scheduler->remove_sleep(thr);
329 /** @brief Alert the model-checker that an incorrectly-ordered
330 * synchronization was made */
331 void ModelChecker::set_bad_synchronization()
333 priv->bad_synchronization = true;
337 * Check whether the current trace has triggered an assertion which should halt
340 * @return True, if the execution should be aborted; false otherwise
342 bool ModelChecker::has_asserted() const
344 return priv->asserted;
348 * Trigger a trace assertion which should cause this execution to be halted.
349 * This can be due to a detected bug or due to an infeasibility that should
352 void ModelChecker::set_assert()
354 priv->asserted = true;
358 * Check if we are in a deadlock. Should only be called at the end of an
359 * execution, although it should not give false positives in the middle of an
360 * execution (there should be some ENABLED thread).
362 * @return True if program is in a deadlock; false otherwise
364 bool ModelChecker::is_deadlocked() const
366 bool blocking_threads = false;
367 for (unsigned int i = 0; i < get_num_threads(); i++) {
368 thread_id_t tid = int_to_id(i);
371 Thread *t = get_thread(tid);
372 if (!t->is_model_thread() && t->get_pending())
373 blocking_threads = true;
375 return blocking_threads;
379 * Check if this is a complete execution. That is, have all thread completed
380 * execution (rather than exiting because sleep sets have forced a redundant
383 * @return True if the execution is complete.
385 bool ModelChecker::is_complete_execution() const
387 for (unsigned int i = 0; i < get_num_threads(); i++)
388 if (is_enabled(int_to_id(i)))
394 * @brief Assert a bug in the executing program.
396 * Use this function to assert any sort of bug in the user program. If the
397 * current trace is feasible (actually, a prefix of some feasible execution),
398 * then this execution will be aborted, printing the appropriate message. If
399 * the current trace is not yet feasible, the error message will be stashed and
400 * printed if the execution ever becomes feasible.
402 * @param msg Descriptive message for the bug (do not include newline char)
403 * @return True if bug is immediately-feasible
405 bool ModelChecker::assert_bug(const char *msg)
407 priv->bugs.push_back(new bug_message(msg));
409 if (isfeasibleprefix()) {
417 * @brief Assert a bug in the executing program, asserted by a user thread
418 * @see ModelChecker::assert_bug
419 * @param msg Descriptive message for the bug (do not include newline char)
421 void ModelChecker::assert_user_bug(const char *msg)
423 /* If feasible bug, bail out now */
425 switch_to_master(NULL);
428 /** @return True, if any bugs have been reported for this execution */
429 bool ModelChecker::have_bug_reports() const
431 return priv->bugs.size() != 0;
434 /** @brief Print bug report listing for this execution (if any bugs exist) */
435 void ModelChecker::print_bugs() const
437 if (have_bug_reports()) {
438 model_print("Bug report: %zu bug%s detected\n",
440 priv->bugs.size() > 1 ? "s" : "");
441 for (unsigned int i = 0; i < priv->bugs.size(); i++)
442 priv->bugs[i]->print();
447 * @brief Record end-of-execution stats
449 * Must be run when exiting an execution. Records various stats.
450 * @see struct execution_stats
452 void ModelChecker::record_stats()
455 if (!isfeasibleprefix())
456 stats.num_infeasible++;
457 else if (have_bug_reports())
458 stats.num_buggy_executions++;
459 else if (is_complete_execution())
460 stats.num_complete++;
462 stats.num_redundant++;
465 /** @brief Print execution stats */
466 void ModelChecker::print_stats() const
468 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
469 model_print("Number of redundant executions: %d\n", stats.num_redundant);
470 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
471 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
472 model_print("Total executions: %d\n", stats.num_total);
473 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
477 * @brief End-of-exeuction print
478 * @param printbugs Should any existing bugs be printed?
480 void ModelChecker::print_execution(bool printbugs) const
482 print_program_output();
484 if (DBG_ENABLED() || params.verbose) {
485 model_print("Earliest divergence point since last feasible execution:\n");
486 if (earliest_diverge)
487 earliest_diverge->print();
489 model_print("(Not set)\n");
495 /* Don't print invalid bugs */
504 * Queries the model-checker for more executions to explore and, if one
505 * exists, resets the model-checker state to execute a new execution.
507 * @return If there are more executions to explore, return true. Otherwise,
510 bool ModelChecker::next_execution()
513 /* Is this execution a feasible execution that's worth bug-checking? */
514 bool complete = isfeasibleprefix() && (is_complete_execution() ||
517 /* End-of-execution bug checks */
520 assert_bug("Deadlock detected");
528 if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
529 print_execution(complete);
531 clear_program_output();
534 earliest_diverge = NULL;
536 if ((diverge = get_next_backtrack()) == NULL)
540 model_print("Next execution will diverge at:\n");
544 reset_to_initial_state();
548 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
550 switch (act->get_type()) {
555 /* Optimization: relaxed operations don't need backtracking */
556 if (act->is_relaxed())
558 /* linear search: from most recent to oldest */
559 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
560 action_list_t::reverse_iterator rit;
561 for (rit = list->rbegin(); rit != list->rend(); rit++) {
562 ModelAction *prev = *rit;
563 if (prev->could_synchronize_with(act))
569 case ATOMIC_TRYLOCK: {
570 /* linear search: from most recent to oldest */
571 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
572 action_list_t::reverse_iterator rit;
573 for (rit = list->rbegin(); rit != list->rend(); rit++) {
574 ModelAction *prev = *rit;
575 if (act->is_conflicting_lock(prev))
580 case ATOMIC_UNLOCK: {
581 /* linear search: from most recent to oldest */
582 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
583 action_list_t::reverse_iterator rit;
584 for (rit = list->rbegin(); rit != list->rend(); rit++) {
585 ModelAction *prev = *rit;
586 if (!act->same_thread(prev) && prev->is_failed_trylock())
592 /* linear search: from most recent to oldest */
593 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
594 action_list_t::reverse_iterator rit;
595 for (rit = list->rbegin(); rit != list->rend(); rit++) {
596 ModelAction *prev = *rit;
597 if (!act->same_thread(prev) && prev->is_failed_trylock())
599 if (!act->same_thread(prev) && prev->is_notify())
605 case ATOMIC_NOTIFY_ALL:
606 case ATOMIC_NOTIFY_ONE: {
607 /* linear search: from most recent to oldest */
608 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
609 action_list_t::reverse_iterator rit;
610 for (rit = list->rbegin(); rit != list->rend(); rit++) {
611 ModelAction *prev = *rit;
612 if (!act->same_thread(prev) && prev->is_wait())
623 /** This method finds backtracking points where we should try to
624 * reorder the parameter ModelAction against.
626 * @param the ModelAction to find backtracking points for.
628 void ModelChecker::set_backtracking(ModelAction *act)
630 Thread *t = get_thread(act);
631 ModelAction *prev = get_last_conflict(act);
635 Node *node = prev->get_node()->get_parent();
637 int low_tid, high_tid;
638 if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
639 low_tid = id_to_int(act->get_tid());
640 high_tid = low_tid + 1;
643 high_tid = get_num_threads();
646 for (int i = low_tid; i < high_tid; i++) {
647 thread_id_t tid = int_to_id(i);
649 /* Make sure this thread can be enabled here. */
650 if (i >= node->get_num_threads())
653 /* Don't backtrack into a point where the thread is disabled or sleeping. */
654 if (node->enabled_status(tid) != THREAD_ENABLED)
657 /* Check if this has been explored already */
658 if (node->has_been_explored(tid))
661 /* See if fairness allows */
662 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
664 for (int t = 0; t < node->get_num_threads(); t++) {
665 thread_id_t tother = int_to_id(t);
666 if (node->is_enabled(tother) && node->has_priority(tother)) {
674 /* Cache the latest backtracking point */
675 set_latest_backtrack(prev);
677 /* If this is a new backtracking point, mark the tree */
678 if (!node->set_backtrack(tid))
680 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
681 id_to_int(prev->get_tid()),
682 id_to_int(t->get_id()));
691 * @brief Cache the a backtracking point as the "most recent", if eligible
693 * Note that this does not prepare the NodeStack for this backtracking
694 * operation, it only caches the action on a per-execution basis
696 * @param act The operation at which we should explore a different next action
697 * (i.e., backtracking point)
698 * @return True, if this action is now the most recent backtracking point;
701 bool ModelChecker::set_latest_backtrack(ModelAction *act)
703 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
704 priv->next_backtrack = act;
711 * Returns last backtracking point. The model checker will explore a different
712 * path for this point in the next execution.
713 * @return The ModelAction at which the next execution should diverge.
715 ModelAction * ModelChecker::get_next_backtrack()
717 ModelAction *next = priv->next_backtrack;
718 priv->next_backtrack = NULL;
723 * Processes a read or rmw model action.
724 * @param curr is the read model action to process.
725 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
726 * @return True if processing this read updates the mo_graph.
728 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
730 uint64_t value = VALUE_NONE;
731 bool updated = false;
733 const ModelAction *reads_from = curr->get_node()->get_read_from();
734 if (reads_from != NULL) {
735 mo_graph->startChanges();
737 value = reads_from->get_value();
738 bool r_status = false;
740 if (!second_part_of_rmw) {
741 check_recency(curr, reads_from);
742 r_status = r_modification_order(curr, reads_from);
745 if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
746 mo_graph->rollbackChanges();
747 priv->too_many_reads = false;
751 read_from(curr, reads_from);
752 mo_graph->commitChanges();
753 mo_check_promises(curr, true);
756 } else if (!second_part_of_rmw) {
757 /* Read from future value */
758 struct future_value fv = curr->get_node()->get_future_value();
759 Promise *promise = new Promise(curr, fv);
761 curr->set_read_from_promise(promise);
762 promises->push_back(promise);
763 mo_graph->startChanges();
764 updated = r_modification_order(curr, promise);
765 mo_graph->commitChanges();
767 get_thread(curr)->set_return_value(value);
773 * Processes a lock, trylock, or unlock model action. @param curr is
774 * the read model action to process.
776 * The try lock operation checks whether the lock is taken. If not,
777 * it falls to the normal lock operation case. If so, it returns
780 * The lock operation has already been checked that it is enabled, so
781 * it just grabs the lock and synchronizes with the previous unlock.
783 * The unlock operation has to re-enable all of the threads that are
784 * waiting on the lock.
786 * @return True if synchronization was updated; false otherwise
788 bool ModelChecker::process_mutex(ModelAction *curr)
790 std::mutex *mutex = NULL;
791 struct std::mutex_state *state = NULL;
793 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
794 mutex = (std::mutex *)curr->get_location();
795 state = mutex->get_state();
796 } else if (curr->is_wait()) {
797 mutex = (std::mutex *)curr->get_value();
798 state = mutex->get_state();
801 switch (curr->get_type()) {
802 case ATOMIC_TRYLOCK: {
803 bool success = !state->islocked;
804 curr->set_try_lock(success);
806 get_thread(curr)->set_return_value(0);
809 get_thread(curr)->set_return_value(1);
811 //otherwise fall into the lock case
813 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
814 assert_bug("Lock access before initialization");
815 state->islocked = true;
816 ModelAction *unlock = get_last_unlock(curr);
817 //synchronize with the previous unlock statement
818 if (unlock != NULL) {
819 curr->synchronize_with(unlock);
824 case ATOMIC_UNLOCK: {
826 state->islocked = false;
827 //wake up the other threads
828 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
829 //activate all the waiting threads
830 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
831 scheduler->wake(get_thread(*rit));
838 state->islocked = false;
839 //wake up the other threads
840 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
841 //activate all the waiting threads
842 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
843 scheduler->wake(get_thread(*rit));
846 //check whether we should go to sleep or not...simulate spurious failures
847 if (curr->get_node()->get_misc() == 0) {
848 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
850 scheduler->sleep(get_thread(curr));
854 case ATOMIC_NOTIFY_ALL: {
855 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
856 //activate all the waiting threads
857 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
858 scheduler->wake(get_thread(*rit));
863 case ATOMIC_NOTIFY_ONE: {
864 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
865 int wakeupthread = curr->get_node()->get_misc();
866 action_list_t::iterator it = waiters->begin();
867 advance(it, wakeupthread);
868 scheduler->wake(get_thread(*it));
879 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
881 /* Do more ambitious checks now that mo is more complete */
882 if (mo_may_allow(writer, reader)) {
883 Node *node = reader->get_node();
885 /* Find an ancestor thread which exists at the time of the reader */
886 Thread *write_thread = get_thread(writer);
887 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
888 write_thread = write_thread->get_parent();
890 struct future_value fv = {
892 writer->get_seq_number() + params.maxfuturedelay,
893 write_thread->get_id(),
895 if (node->add_future_value(fv))
896 set_latest_backtrack(reader);
901 * Process a write ModelAction
902 * @param curr The ModelAction to process
903 * @return True if the mo_graph was updated or promises were resolved
905 bool ModelChecker::process_write(ModelAction *curr)
907 bool updated_mod_order = w_modification_order(curr);
908 bool updated_promises = resolve_promises(curr);
910 if (promises->size() == 0) {
911 for (unsigned int i = 0; i < futurevalues->size(); i++) {
912 struct PendingFutureValue pfv = (*futurevalues)[i];
913 add_future_value(pfv.writer, pfv.act);
915 futurevalues->clear();
918 mo_graph->commitChanges();
919 mo_check_promises(curr, false);
921 get_thread(curr)->set_return_value(VALUE_NONE);
922 return updated_mod_order || updated_promises;
926 * Process a fence ModelAction
927 * @param curr The ModelAction to process
928 * @return True if synchronization was updated
930 bool ModelChecker::process_fence(ModelAction *curr)
933 * fence-relaxed: no-op
934 * fence-release: only log the occurence (not in this function), for
935 * use in later synchronization
936 * fence-acquire (this function): search for hypothetical release
939 bool updated = false;
940 if (curr->is_acquire()) {
941 action_list_t *list = action_trace;
942 action_list_t::reverse_iterator rit;
943 /* Find X : is_read(X) && X --sb-> curr */
944 for (rit = list->rbegin(); rit != list->rend(); rit++) {
945 ModelAction *act = *rit;
948 if (act->get_tid() != curr->get_tid())
950 /* Stop at the beginning of the thread */
951 if (act->is_thread_start())
953 /* Stop once we reach a prior fence-acquire */
954 if (act->is_fence() && act->is_acquire())
958 /* read-acquire will find its own release sequences */
959 if (act->is_acquire())
962 /* Establish hypothetical release sequences */
963 rel_heads_list_t release_heads;
964 get_release_seq_heads(curr, act, &release_heads);
965 for (unsigned int i = 0; i < release_heads.size(); i++)
966 if (!curr->synchronize_with(release_heads[i]))
967 set_bad_synchronization();
968 if (release_heads.size() != 0)
976 * @brief Process the current action for thread-related activity
978 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
979 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
980 * synchronization, etc. This function is a no-op for non-THREAD actions
981 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
983 * @param curr The current action
984 * @return True if synchronization was updated or a thread completed
986 bool ModelChecker::process_thread_action(ModelAction *curr)
988 bool updated = false;
990 switch (curr->get_type()) {
991 case THREAD_CREATE: {
992 thrd_t *thrd = (thrd_t *)curr->get_location();
993 struct thread_params *params = (struct thread_params *)curr->get_value();
994 Thread *th = new Thread(thrd, params->func, params->arg);
996 th->set_creation(curr);
997 /* Promises can be satisfied by children */
998 for (unsigned int i = 0; i < promises->size(); i++) {
999 Promise *promise = (*promises)[i];
1000 if (promise->thread_is_available(curr->get_tid()))
1001 promise->add_thread(th->get_id());
1006 Thread *blocking = curr->get_thread_operand();
1007 ModelAction *act = get_last_action(blocking->get_id());
1008 curr->synchronize_with(act);
1009 updated = true; /* trigger rel-seq checks */
1012 case THREAD_FINISH: {
1013 Thread *th = get_thread(curr);
1014 while (!th->wait_list_empty()) {
1015 ModelAction *act = th->pop_wait_list();
1016 scheduler->wake(get_thread(act));
1019 /* Completed thread can't satisfy promises */
1020 for (unsigned int i = 0; i < promises->size(); i++) {
1021 Promise *promise = (*promises)[i];
1022 if (promise->thread_is_available(th->get_id()))
1023 if (promise->eliminate_thread(th->get_id()))
1024 priv->failed_promise = true;
1026 updated = true; /* trigger rel-seq checks */
1029 case THREAD_START: {
1030 check_promises(curr->get_tid(), NULL, curr->get_cv());
1041 * @brief Process the current action for release sequence fixup activity
1043 * Performs model-checker release sequence fixups for the current action,
1044 * forcing a single pending release sequence to break (with a given, potential
1045 * "loose" write) or to complete (i.e., synchronize). If a pending release
1046 * sequence forms a complete release sequence, then we must perform the fixup
1047 * synchronization, mo_graph additions, etc.
1049 * @param curr The current action; must be a release sequence fixup action
1050 * @param work_queue The work queue to which to add work items as they are
1053 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1055 const ModelAction *write = curr->get_node()->get_relseq_break();
1056 struct release_seq *sequence = pending_rel_seqs->back();
1057 pending_rel_seqs->pop_back();
1059 ModelAction *acquire = sequence->acquire;
1060 const ModelAction *rf = sequence->rf;
1061 const ModelAction *release = sequence->release;
1065 ASSERT(release->same_thread(rf));
1067 if (write == NULL) {
1069 * @todo Forcing a synchronization requires that we set
1070 * modification order constraints. For instance, we can't allow
1071 * a fixup sequence in which two separate read-acquire
1072 * operations read from the same sequence, where the first one
1073 * synchronizes and the other doesn't. Essentially, we can't
1074 * allow any writes to insert themselves between 'release' and
1078 /* Must synchronize */
1079 if (!acquire->synchronize_with(release)) {
1080 set_bad_synchronization();
1083 /* Re-check all pending release sequences */
1084 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1085 /* Re-check act for mo_graph edges */
1086 work_queue->push_back(MOEdgeWorkEntry(acquire));
1088 /* propagate synchronization to later actions */
1089 action_list_t::reverse_iterator rit = action_trace->rbegin();
1090 for (; (*rit) != acquire; rit++) {
1091 ModelAction *propagate = *rit;
1092 if (acquire->happens_before(propagate)) {
1093 propagate->synchronize_with(acquire);
1094 /* Re-check 'propagate' for mo_graph edges */
1095 work_queue->push_back(MOEdgeWorkEntry(propagate));
1099 /* Break release sequence with new edges:
1100 * release --mo--> write --mo--> rf */
1101 mo_graph->addEdge(release, write);
1102 mo_graph->addEdge(write, rf);
1105 /* See if we have realized a data race */
1110 * Initialize the current action by performing one or more of the following
1111 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1112 * in the NodeStack, manipulating backtracking sets, allocating and
1113 * initializing clock vectors, and computing the promises to fulfill.
1115 * @param curr The current action, as passed from the user context; may be
1116 * freed/invalidated after the execution of this function, with a different
1117 * action "returned" its place (pass-by-reference)
1118 * @return True if curr is a newly-explored action; false otherwise
1120 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1122 ModelAction *newcurr;
1124 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1125 newcurr = process_rmw(*curr);
1128 if (newcurr->is_rmw())
1129 compute_promises(newcurr);
1135 (*curr)->set_seq_number(get_next_seq_num());
1137 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1139 /* First restore type and order in case of RMW operation */
1140 if ((*curr)->is_rmwr())
1141 newcurr->copy_typeandorder(*curr);
1143 ASSERT((*curr)->get_location() == newcurr->get_location());
1144 newcurr->copy_from_new(*curr);
1146 /* Discard duplicate ModelAction; use action from NodeStack */
1149 /* Always compute new clock vector */
1150 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1153 return false; /* Action was explored previously */
1157 /* Always compute new clock vector */
1158 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1160 /* Assign most recent release fence */
1161 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1164 * Perform one-time actions when pushing new ModelAction onto
1167 if (newcurr->is_write())
1168 compute_promises(newcurr);
1169 else if (newcurr->is_relseq_fixup())
1170 compute_relseq_breakwrites(newcurr);
1171 else if (newcurr->is_wait())
1172 newcurr->get_node()->set_misc_max(2);
1173 else if (newcurr->is_notify_one()) {
1174 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1176 return true; /* This was a new ModelAction */
1181 * @brief Establish reads-from relation between two actions
1183 * Perform basic operations involved with establishing a concrete rf relation,
1184 * including setting the ModelAction data and checking for release sequences.
1186 * @param act The action that is reading (must be a read)
1187 * @param rf The action from which we are reading (must be a write)
1189 * @return True if this read established synchronization
1191 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1193 act->set_read_from(rf);
1194 if (rf != NULL && act->is_acquire()) {
1195 rel_heads_list_t release_heads;
1196 get_release_seq_heads(act, act, &release_heads);
1197 int num_heads = release_heads.size();
1198 for (unsigned int i = 0; i < release_heads.size(); i++)
1199 if (!act->synchronize_with(release_heads[i])) {
1200 set_bad_synchronization();
1203 return num_heads > 0;
1209 * @brief Check whether a model action is enabled.
1211 * Checks whether a lock or join operation would be successful (i.e., is the
1212 * lock already locked, or is the joined thread already complete). If not, put
1213 * the action in a waiter list.
1215 * @param curr is the ModelAction to check whether it is enabled.
1216 * @return a bool that indicates whether the action is enabled.
1218 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1219 if (curr->is_lock()) {
1220 std::mutex *lock = (std::mutex *)curr->get_location();
1221 struct std::mutex_state *state = lock->get_state();
1222 if (state->islocked) {
1223 //Stick the action in the appropriate waiting queue
1224 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1227 } else if (curr->get_type() == THREAD_JOIN) {
1228 Thread *blocking = (Thread *)curr->get_location();
1229 if (!blocking->is_complete()) {
1230 blocking->push_wait_list(curr);
1239 * This is the heart of the model checker routine. It performs model-checking
1240 * actions corresponding to a given "current action." Among other processes, it
1241 * calculates reads-from relationships, updates synchronization clock vectors,
1242 * forms a memory_order constraints graph, and handles replay/backtrack
1243 * execution when running permutations of previously-observed executions.
1245 * @param curr The current action to process
1246 * @return The ModelAction that is actually executed; may be different than
1247 * curr; may be NULL, if the current action is not enabled to run
1249 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1252 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1254 if (!check_action_enabled(curr)) {
1255 /* Make the execution look like we chose to run this action
1256 * much later, when a lock/join can succeed */
1257 get_thread(curr)->set_pending(curr);
1258 scheduler->sleep(get_thread(curr));
1262 bool newly_explored = initialize_curr_action(&curr);
1268 wake_up_sleeping_actions(curr);
1270 /* Add the action to lists before any other model-checking tasks */
1271 if (!second_part_of_rmw)
1272 add_action_to_lists(curr);
1274 /* Build may_read_from set for newly-created actions */
1275 if (newly_explored && curr->is_read())
1276 build_reads_from_past(curr);
1278 /* Initialize work_queue with the "current action" work */
1279 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1280 while (!work_queue.empty() && !has_asserted()) {
1281 WorkQueueEntry work = work_queue.front();
1282 work_queue.pop_front();
1284 switch (work.type) {
1285 case WORK_CHECK_CURR_ACTION: {
1286 ModelAction *act = work.action;
1287 bool update = false; /* update this location's release seq's */
1288 bool update_all = false; /* update all release seq's */
1290 if (process_thread_action(curr))
1293 if (act->is_read() && process_read(act, second_part_of_rmw))
1296 if (act->is_write() && process_write(act))
1299 if (act->is_fence() && process_fence(act))
1302 if (act->is_mutex_op() && process_mutex(act))
1305 if (act->is_relseq_fixup())
1306 process_relseq_fixup(curr, &work_queue);
1309 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1311 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1314 case WORK_CHECK_RELEASE_SEQ:
1315 resolve_release_sequences(work.location, &work_queue);
1317 case WORK_CHECK_MO_EDGES: {
1318 /** @todo Complete verification of work_queue */
1319 ModelAction *act = work.action;
1320 bool updated = false;
1322 if (act->is_read()) {
1323 const ModelAction *rf = act->get_reads_from();
1324 const Promise *promise = act->get_reads_from_promise();
1326 if (r_modification_order(act, rf))
1328 } else if (promise) {
1329 if (r_modification_order(act, promise))
1333 if (act->is_write()) {
1334 if (w_modification_order(act))
1337 mo_graph->commitChanges();
1340 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1349 check_curr_backtracking(curr);
1350 set_backtracking(curr);
1354 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1356 Node *currnode = curr->get_node();
1357 Node *parnode = currnode->get_parent();
1359 if ((parnode && !parnode->backtrack_empty()) ||
1360 !currnode->misc_empty() ||
1361 !currnode->read_from_empty() ||
1362 !currnode->future_value_empty() ||
1363 !currnode->promise_empty() ||
1364 !currnode->relseq_break_empty()) {
1365 set_latest_backtrack(curr);
1369 bool ModelChecker::promises_expired() const
1371 for (unsigned int i = 0; i < promises->size(); i++) {
1372 Promise *promise = (*promises)[i];
1373 if (promise->get_expiration() < priv->used_sequence_numbers)
1380 * This is the strongest feasibility check available.
1381 * @return whether the current trace (partial or complete) must be a prefix of
1384 bool ModelChecker::isfeasibleprefix() const
1386 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1390 * Print disagnostic information about an infeasible execution
1391 * @param prefix A string to prefix the output with; if NULL, then a default
1392 * message prefix will be provided
1394 void ModelChecker::print_infeasibility(const char *prefix) const
1398 if (mo_graph->checkForCycles())
1399 ptr += sprintf(ptr, "[mo cycle]");
1400 if (priv->failed_promise)
1401 ptr += sprintf(ptr, "[failed promise]");
1402 if (priv->too_many_reads)
1403 ptr += sprintf(ptr, "[too many reads]");
1404 if (priv->no_valid_reads)
1405 ptr += sprintf(ptr, "[no valid reads-from]");
1406 if (priv->bad_synchronization)
1407 ptr += sprintf(ptr, "[bad sw ordering]");
1408 if (promises_expired())
1409 ptr += sprintf(ptr, "[promise expired]");
1410 if (promises->size() != 0)
1411 ptr += sprintf(ptr, "[unresolved promise]");
1413 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1417 * Returns whether the current completed trace is feasible, except for pending
1418 * release sequences.
1420 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1422 return !is_infeasible() && promises->size() == 0;
1426 * Check if the current partial trace is infeasible. Does not check any
1427 * end-of-execution flags, which might rule out the execution. Thus, this is
1428 * useful only for ruling an execution as infeasible.
1429 * @return whether the current partial trace is infeasible.
1431 bool ModelChecker::is_infeasible() const
1433 return mo_graph->checkForCycles() ||
1434 priv->no_valid_reads ||
1435 priv->failed_promise ||
1436 priv->too_many_reads ||
1437 priv->bad_synchronization ||
1441 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1442 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1443 ModelAction *lastread = get_last_action(act->get_tid());
1444 lastread->process_rmw(act);
1445 if (act->is_rmw()) {
1446 if (lastread->get_reads_from())
1447 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1449 mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1450 mo_graph->commitChanges();
1456 * Checks whether a thread has read from the same write for too many times
1457 * without seeing the effects of a later write.
1460 * 1) there must a different write that we could read from that would satisfy the modification order,
1461 * 2) we must have read from the same value in excess of maxreads times, and
1462 * 3) that other write must have been in the reads_from set for maxreads times.
1464 * If so, we decide that the execution is no longer feasible.
1466 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1468 if (params.maxreads != 0) {
1469 if (curr->get_node()->get_read_from_size() <= 1)
1471 //Must make sure that execution is currently feasible... We could
1472 //accidentally clear by rolling back
1473 if (is_infeasible())
1475 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1476 int tid = id_to_int(curr->get_tid());
1479 if ((int)thrd_lists->size() <= tid)
1481 action_list_t *list = &(*thrd_lists)[tid];
1483 action_list_t::reverse_iterator rit = list->rbegin();
1484 /* Skip past curr */
1485 for (; (*rit) != curr; rit++)
1487 /* go past curr now */
1490 action_list_t::reverse_iterator ritcopy = rit;
1491 //See if we have enough reads from the same value
1493 for (; count < params.maxreads; rit++, count++) {
1494 if (rit == list->rend())
1496 ModelAction *act = *rit;
1497 if (!act->is_read())
1500 if (act->get_reads_from() != rf)
1502 if (act->get_node()->get_read_from_size() <= 1)
1505 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1507 const ModelAction *write = curr->get_node()->get_read_from_at(i);
1509 /* Need a different write */
1513 /* Test to see whether this is a feasible write to read from */
1514 /** NOTE: all members of read-from set should be
1515 * feasible, so we no longer check it here **/
1519 bool feasiblewrite = true;
1520 //new we need to see if this write works for everyone
1522 for (int loop = count; loop > 0; loop--, rit++) {
1523 ModelAction *act = *rit;
1524 bool foundvalue = false;
1525 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1526 if (act->get_node()->get_read_from_at(j) == write) {
1532 feasiblewrite = false;
1536 if (feasiblewrite) {
1537 priv->too_many_reads = true;
1545 * Updates the mo_graph with the constraints imposed from the current
1548 * Basic idea is the following: Go through each other thread and find
1549 * the last action that happened before our read. Two cases:
1551 * (1) The action is a write => that write must either occur before
1552 * the write we read from or be the write we read from.
1554 * (2) The action is a read => the write that that action read from
1555 * must occur before the write we read from or be the same write.
1557 * @param curr The current action. Must be a read.
1558 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1559 * @return True if modification order edges were added; false otherwise
1561 template <typename rf_type>
1562 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1564 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1567 ASSERT(curr->is_read());
1569 /* Last SC fence in the current thread */
1570 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1572 /* Iterate over all threads */
1573 for (i = 0; i < thrd_lists->size(); i++) {
1574 /* Last SC fence in thread i */
1575 ModelAction *last_sc_fence_thread_local = NULL;
1576 if (int_to_id((int)i) != curr->get_tid())
1577 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1579 /* Last SC fence in thread i, before last SC fence in current thread */
1580 ModelAction *last_sc_fence_thread_before = NULL;
1581 if (last_sc_fence_local)
1582 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1584 /* Iterate over actions in thread, starting from most recent */
1585 action_list_t *list = &(*thrd_lists)[i];
1586 action_list_t::reverse_iterator rit;
1587 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1588 ModelAction *act = *rit;
1590 if (act->is_write() && !act->equals(rf) && act != curr) {
1591 /* C++, Section 29.3 statement 5 */
1592 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1593 *act < *last_sc_fence_thread_local) {
1594 added = mo_graph->addEdge(act, rf) || added;
1597 /* C++, Section 29.3 statement 4 */
1598 else if (act->is_seqcst() && last_sc_fence_local &&
1599 *act < *last_sc_fence_local) {
1600 added = mo_graph->addEdge(act, rf) || added;
1603 /* C++, Section 29.3 statement 6 */
1604 else if (last_sc_fence_thread_before &&
1605 *act < *last_sc_fence_thread_before) {
1606 added = mo_graph->addEdge(act, rf) || added;
1612 * Include at most one act per-thread that "happens
1613 * before" curr. Don't consider reflexively.
1615 if (act->happens_before(curr) && act != curr) {
1616 if (act->is_write()) {
1617 if (!act->equals(rf)) {
1618 added = mo_graph->addEdge(act, rf) || added;
1621 const ModelAction *prevreadfrom = act->get_reads_from();
1622 //if the previous read is unresolved, keep going...
1623 if (prevreadfrom == NULL)
1626 if (!prevreadfrom->equals(rf)) {
1627 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1636 * All compatible, thread-exclusive promises must be ordered after any
1637 * concrete loads from the same thread
1639 for (unsigned int i = 0; i < promises->size(); i++)
1640 if ((*promises)[i]->is_compatible_exclusive(curr))
1641 added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1647 * Updates the mo_graph with the constraints imposed from the current write.
1649 * Basic idea is the following: Go through each other thread and find
1650 * the lastest action that happened before our write. Two cases:
1652 * (1) The action is a write => that write must occur before
1655 * (2) The action is a read => the write that that action read from
1656 * must occur before the current write.
1658 * This method also handles two other issues:
1660 * (I) Sequential Consistency: Making sure that if the current write is
1661 * seq_cst, that it occurs after the previous seq_cst write.
1663 * (II) Sending the write back to non-synchronizing reads.
1665 * @param curr The current action. Must be a write.
1666 * @return True if modification order edges were added; false otherwise
1668 bool ModelChecker::w_modification_order(ModelAction *curr)
1670 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1673 ASSERT(curr->is_write());
1675 if (curr->is_seqcst()) {
1676 /* We have to at least see the last sequentially consistent write,
1677 so we are initialized. */
1678 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1679 if (last_seq_cst != NULL) {
1680 added = mo_graph->addEdge(last_seq_cst, curr) || added;
1684 /* Last SC fence in the current thread */
1685 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1687 /* Iterate over all threads */
1688 for (i = 0; i < thrd_lists->size(); i++) {
1689 /* Last SC fence in thread i, before last SC fence in current thread */
1690 ModelAction *last_sc_fence_thread_before = NULL;
1691 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1692 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1694 /* Iterate over actions in thread, starting from most recent */
1695 action_list_t *list = &(*thrd_lists)[i];
1696 action_list_t::reverse_iterator rit;
1697 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1698 ModelAction *act = *rit;
1701 * 1) If RMW and it actually read from something, then we
1702 * already have all relevant edges, so just skip to next
1705 * 2) If RMW and it didn't read from anything, we should
1706 * whatever edge we can get to speed up convergence.
1708 * 3) If normal write, we need to look at earlier actions, so
1709 * continue processing list.
1711 if (curr->is_rmw()) {
1712 if (curr->get_reads_from() != NULL)
1720 /* C++, Section 29.3 statement 7 */
1721 if (last_sc_fence_thread_before && act->is_write() &&
1722 *act < *last_sc_fence_thread_before) {
1723 added = mo_graph->addEdge(act, curr) || added;
1728 * Include at most one act per-thread that "happens
1731 if (act->happens_before(curr)) {
1733 * Note: if act is RMW, just add edge:
1735 * The following edge should be handled elsewhere:
1736 * readfrom(act) --mo--> act
1738 if (act->is_write())
1739 added = mo_graph->addEdge(act, curr) || added;
1740 else if (act->is_read()) {
1741 //if previous read accessed a null, just keep going
1742 if (act->get_reads_from() == NULL)
1744 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1747 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1748 !act->same_thread(curr)) {
1749 /* We have an action that:
1750 (1) did not happen before us
1751 (2) is a read and we are a write
1752 (3) cannot synchronize with us
1753 (4) is in a different thread
1755 that read could potentially read from our write. Note that
1756 these checks are overly conservative at this point, we'll
1757 do more checks before actually removing the
1761 if (thin_air_constraint_may_allow(curr, act)) {
1762 if (!is_infeasible())
1763 futurevalues->push_back(PendingFutureValue(curr, act));
1764 else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1765 add_future_value(curr, act);
1772 * All compatible, thread-exclusive promises must be ordered after any
1773 * concrete stores to the same thread, or else they can be merged with
1776 for (unsigned int i = 0; i < promises->size(); i++)
1777 if ((*promises)[i]->is_compatible_exclusive(curr))
1778 added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1783 /** Arbitrary reads from the future are not allowed. Section 29.3
1784 * part 9 places some constraints. This method checks one result of constraint
1785 * constraint. Others require compiler support. */
1786 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1788 if (!writer->is_rmw())
1791 if (!reader->is_rmw())
1794 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1795 if (search == reader)
1797 if (search->get_tid() == reader->get_tid() &&
1798 search->happens_before(reader))
1806 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1807 * some constraints. This method checks one the following constraint (others
1808 * require compiler support):
1810 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1812 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1814 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1816 /* Iterate over all threads */
1817 for (i = 0; i < thrd_lists->size(); i++) {
1818 const ModelAction *write_after_read = NULL;
1820 /* Iterate over actions in thread, starting from most recent */
1821 action_list_t *list = &(*thrd_lists)[i];
1822 action_list_t::reverse_iterator rit;
1823 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1824 ModelAction *act = *rit;
1826 /* Don't disallow due to act == reader */
1827 if (!reader->happens_before(act) || reader == act)
1829 else if (act->is_write())
1830 write_after_read = act;
1831 else if (act->is_read() && act->get_reads_from() != NULL)
1832 write_after_read = act->get_reads_from();
1835 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1842 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1843 * The ModelAction under consideration is expected to be taking part in
1844 * release/acquire synchronization as an object of the "reads from" relation.
1845 * Note that this can only provide release sequence support for RMW chains
1846 * which do not read from the future, as those actions cannot be traced until
1847 * their "promise" is fulfilled. Similarly, we may not even establish the
1848 * presence of a release sequence with certainty, as some modification order
1849 * constraints may be decided further in the future. Thus, this function
1850 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1851 * and a boolean representing certainty.
1853 * @param rf The action that might be part of a release sequence. Must be a
1855 * @param release_heads A pass-by-reference style return parameter. After
1856 * execution of this function, release_heads will contain the heads of all the
1857 * relevant release sequences, if any exists with certainty
1858 * @param pending A pass-by-reference style return parameter which is only used
1859 * when returning false (i.e., uncertain). Returns most information regarding
1860 * an uncertain release sequence, including any write operations that might
1861 * break the sequence.
1862 * @return true, if the ModelChecker is certain that release_heads is complete;
1865 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1866 rel_heads_list_t *release_heads,
1867 struct release_seq *pending) const
1869 /* Only check for release sequences if there are no cycles */
1870 if (mo_graph->checkForCycles())
1874 ASSERT(rf->is_write());
1876 if (rf->is_release())
1877 release_heads->push_back(rf);
1878 else if (rf->get_last_fence_release())
1879 release_heads->push_back(rf->get_last_fence_release());
1881 break; /* End of RMW chain */
1883 /** @todo Need to be smarter here... In the linux lock
1884 * example, this will run to the beginning of the program for
1886 /** @todo The way to be smarter here is to keep going until 1
1887 * thread has a release preceded by an acquire and you've seen
1890 /* acq_rel RMW is a sufficient stopping condition */
1891 if (rf->is_acquire() && rf->is_release())
1892 return true; /* complete */
1894 rf = rf->get_reads_from();
1897 /* read from future: need to settle this later */
1899 return false; /* incomplete */
1902 if (rf->is_release())
1903 return true; /* complete */
1905 /* else relaxed write
1906 * - check for fence-release in the same thread (29.8, stmt. 3)
1907 * - check modification order for contiguous subsequence
1908 * -> rf must be same thread as release */
1910 const ModelAction *fence_release = rf->get_last_fence_release();
1911 /* Synchronize with a fence-release unconditionally; we don't need to
1912 * find any more "contiguous subsequence..." for it */
1914 release_heads->push_back(fence_release);
1916 int tid = id_to_int(rf->get_tid());
1917 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1918 action_list_t *list = &(*thrd_lists)[tid];
1919 action_list_t::const_reverse_iterator rit;
1921 /* Find rf in the thread list */
1922 rit = std::find(list->rbegin(), list->rend(), rf);
1923 ASSERT(rit != list->rend());
1925 /* Find the last {write,fence}-release */
1926 for (; rit != list->rend(); rit++) {
1927 if (fence_release && *(*rit) < *fence_release)
1929 if ((*rit)->is_release())
1932 if (rit == list->rend()) {
1933 /* No write-release in this thread */
1934 return true; /* complete */
1935 } else if (fence_release && *(*rit) < *fence_release) {
1936 /* The fence-release is more recent (and so, "stronger") than
1937 * the most recent write-release */
1938 return true; /* complete */
1939 } /* else, need to establish contiguous release sequence */
1940 ModelAction *release = *rit;
1942 ASSERT(rf->same_thread(release));
1944 pending->writes.clear();
1946 bool certain = true;
1947 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1948 if (id_to_int(rf->get_tid()) == (int)i)
1950 list = &(*thrd_lists)[i];
1952 /* Can we ensure no future writes from this thread may break
1953 * the release seq? */
1954 bool future_ordered = false;
1956 ModelAction *last = get_last_action(int_to_id(i));
1957 Thread *th = get_thread(int_to_id(i));
1958 if ((last && rf->happens_before(last)) ||
1961 future_ordered = true;
1963 ASSERT(!th->is_model_thread() || future_ordered);
1965 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1966 const ModelAction *act = *rit;
1967 /* Reach synchronization -> this thread is complete */
1968 if (act->happens_before(release))
1970 if (rf->happens_before(act)) {
1971 future_ordered = true;
1975 /* Only non-RMW writes can break release sequences */
1976 if (!act->is_write() || act->is_rmw())
1979 /* Check modification order */
1980 if (mo_graph->checkReachable(rf, act)) {
1981 /* rf --mo--> act */
1982 future_ordered = true;
1985 if (mo_graph->checkReachable(act, release))
1986 /* act --mo--> release */
1988 if (mo_graph->checkReachable(release, act) &&
1989 mo_graph->checkReachable(act, rf)) {
1990 /* release --mo-> act --mo--> rf */
1991 return true; /* complete */
1993 /* act may break release sequence */
1994 pending->writes.push_back(act);
1997 if (!future_ordered)
1998 certain = false; /* This thread is uncertain */
2002 release_heads->push_back(release);
2003 pending->writes.clear();
2005 pending->release = release;
2012 * An interface for getting the release sequence head(s) with which a
2013 * given ModelAction must synchronize. This function only returns a non-empty
2014 * result when it can locate a release sequence head with certainty. Otherwise,
2015 * it may mark the internal state of the ModelChecker so that it will handle
2016 * the release sequence at a later time, causing @a acquire to update its
2017 * synchronization at some later point in execution.
2019 * @param acquire The 'acquire' action that may synchronize with a release
2021 * @param read The read action that may read from a release sequence; this may
2022 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2023 * when 'acquire' is a fence-acquire)
2024 * @param release_heads A pass-by-reference return parameter. Will be filled
2025 * with the head(s) of the release sequence(s), if they exists with certainty.
2026 * @see ModelChecker::release_seq_heads
2028 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2029 ModelAction *read, rel_heads_list_t *release_heads)
2031 const ModelAction *rf = read->get_reads_from();
2032 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2033 sequence->acquire = acquire;
2034 sequence->read = read;
2036 if (!release_seq_heads(rf, release_heads, sequence)) {
2037 /* add act to 'lazy checking' list */
2038 pending_rel_seqs->push_back(sequence);
2040 snapshot_free(sequence);
2045 * Attempt to resolve all stashed operations that might synchronize with a
2046 * release sequence for a given location. This implements the "lazy" portion of
2047 * determining whether or not a release sequence was contiguous, since not all
2048 * modification order information is present at the time an action occurs.
2050 * @param location The location/object that should be checked for release
2051 * sequence resolutions. A NULL value means to check all locations.
2052 * @param work_queue The work queue to which to add work items as they are
2054 * @return True if any updates occurred (new synchronization, new mo_graph
2057 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2059 bool updated = false;
2060 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2061 while (it != pending_rel_seqs->end()) {
2062 struct release_seq *pending = *it;
2063 ModelAction *acquire = pending->acquire;
2064 const ModelAction *read = pending->read;
2066 /* Only resolve sequences on the given location, if provided */
2067 if (location && read->get_location() != location) {
2072 const ModelAction *rf = read->get_reads_from();
2073 rel_heads_list_t release_heads;
2075 complete = release_seq_heads(rf, &release_heads, pending);
2076 for (unsigned int i = 0; i < release_heads.size(); i++) {
2077 if (!acquire->has_synchronized_with(release_heads[i])) {
2078 if (acquire->synchronize_with(release_heads[i]))
2081 set_bad_synchronization();
2086 /* Re-check all pending release sequences */
2087 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2088 /* Re-check read-acquire for mo_graph edges */
2089 if (acquire->is_read())
2090 work_queue->push_back(MOEdgeWorkEntry(acquire));
2092 /* propagate synchronization to later actions */
2093 action_list_t::reverse_iterator rit = action_trace->rbegin();
2094 for (; (*rit) != acquire; rit++) {
2095 ModelAction *propagate = *rit;
2096 if (acquire->happens_before(propagate)) {
2097 propagate->synchronize_with(acquire);
2098 /* Re-check 'propagate' for mo_graph edges */
2099 work_queue->push_back(MOEdgeWorkEntry(propagate));
2104 it = pending_rel_seqs->erase(it);
2105 snapshot_free(pending);
2111 // If we resolved promises or data races, see if we have realized a data race.
2118 * Performs various bookkeeping operations for the current ModelAction. For
2119 * instance, adds action to the per-object, per-thread action vector and to the
2120 * action trace list of all thread actions.
2122 * @param act is the ModelAction to add.
2124 void ModelChecker::add_action_to_lists(ModelAction *act)
2126 int tid = id_to_int(act->get_tid());
2127 ModelAction *uninit = NULL;
2129 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2130 if (list->empty() && act->is_atomic_var()) {
2131 uninit = new_uninitialized_action(act->get_location());
2132 uninit_id = id_to_int(uninit->get_tid());
2133 list->push_back(uninit);
2135 list->push_back(act);
2137 action_trace->push_back(act);
2139 action_trace->push_front(uninit);
2141 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2142 if (tid >= (int)vec->size())
2143 vec->resize(priv->next_thread_id);
2144 (*vec)[tid].push_back(act);
2146 (*vec)[uninit_id].push_front(uninit);
2148 if ((int)thrd_last_action->size() <= tid)
2149 thrd_last_action->resize(get_num_threads());
2150 (*thrd_last_action)[tid] = act;
2152 (*thrd_last_action)[uninit_id] = uninit;
2154 if (act->is_fence() && act->is_release()) {
2155 if ((int)thrd_last_fence_release->size() <= tid)
2156 thrd_last_fence_release->resize(get_num_threads());
2157 (*thrd_last_fence_release)[tid] = act;
2160 if (act->is_wait()) {
2161 void *mutex_loc = (void *) act->get_value();
2162 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2164 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2165 if (tid >= (int)vec->size())
2166 vec->resize(priv->next_thread_id);
2167 (*vec)[tid].push_back(act);
2172 * @brief Get the last action performed by a particular Thread
2173 * @param tid The thread ID of the Thread in question
2174 * @return The last action in the thread
2176 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2178 int threadid = id_to_int(tid);
2179 if (threadid < (int)thrd_last_action->size())
2180 return (*thrd_last_action)[id_to_int(tid)];
2186 * @brief Get the last fence release performed by a particular Thread
2187 * @param tid The thread ID of the Thread in question
2188 * @return The last fence release in the thread, if one exists; NULL otherwise
2190 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2192 int threadid = id_to_int(tid);
2193 if (threadid < (int)thrd_last_fence_release->size())
2194 return (*thrd_last_fence_release)[id_to_int(tid)];
2200 * Gets the last memory_order_seq_cst write (in the total global sequence)
2201 * performed on a particular object (i.e., memory location), not including the
2203 * @param curr The current ModelAction; also denotes the object location to
2205 * @return The last seq_cst write
2207 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2209 void *location = curr->get_location();
2210 action_list_t *list = get_safe_ptr_action(obj_map, location);
2211 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2212 action_list_t::reverse_iterator rit;
2213 for (rit = list->rbegin(); rit != list->rend(); rit++)
2214 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2220 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2221 * performed in a particular thread, prior to a particular fence.
2222 * @param tid The ID of the thread to check
2223 * @param before_fence The fence from which to begin the search; if NULL, then
2224 * search for the most recent fence in the thread.
2225 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2227 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2229 /* All fences should have NULL location */
2230 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2231 action_list_t::reverse_iterator rit = list->rbegin();
2234 for (; rit != list->rend(); rit++)
2235 if (*rit == before_fence)
2238 ASSERT(*rit == before_fence);
2242 for (; rit != list->rend(); rit++)
2243 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2249 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2250 * location). This function identifies the mutex according to the current
2251 * action, which is presumed to perform on the same mutex.
2252 * @param curr The current ModelAction; also denotes the object location to
2254 * @return The last unlock operation
2256 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2258 void *location = curr->get_location();
2259 action_list_t *list = get_safe_ptr_action(obj_map, location);
2260 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2261 action_list_t::reverse_iterator rit;
2262 for (rit = list->rbegin(); rit != list->rend(); rit++)
2263 if ((*rit)->is_unlock() || (*rit)->is_wait())
2268 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2270 ModelAction *parent = get_last_action(tid);
2272 parent = get_thread(tid)->get_creation();
2277 * Returns the clock vector for a given thread.
2278 * @param tid The thread whose clock vector we want
2279 * @return Desired clock vector
2281 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2283 return get_parent_action(tid)->get_cv();
2287 * Resolve a set of Promises with a current write. The set is provided in the
2288 * Node corresponding to @a write.
2289 * @param write The ModelAction that is fulfilling Promises
2290 * @return True if promises were resolved; false otherwise
2292 bool ModelChecker::resolve_promises(ModelAction *write)
2294 bool haveResolved = false;
2295 std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2296 promise_list_t mustResolve, resolved;
2298 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2299 Promise *promise = (*promises)[promise_index];
2300 if (write->get_node()->get_promise(i)) {
2301 ModelAction *read = promise->get_action();
2302 read_from(read, write);
2303 //Make sure the promise's value matches the write's value
2304 ASSERT(promise->is_compatible(write));
2305 mo_graph->resolvePromise(read, write, &mustResolve);
2307 resolved.push_back(promise);
2308 promises->erase(promises->begin() + promise_index);
2309 actions_to_check.push_back(read);
2311 haveResolved = true;
2316 for (unsigned int i = 0; i < mustResolve.size(); i++) {
2317 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2319 priv->failed_promise = true;
2321 for (unsigned int i = 0; i < resolved.size(); i++)
2323 //Check whether reading these writes has made threads unable to
2326 for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2327 ModelAction *read = actions_to_check[i];
2328 mo_check_promises(read, true);
2331 return haveResolved;
2335 * Compute the set of promises that could potentially be satisfied by this
2336 * action. Note that the set computation actually appears in the Node, not in
2338 * @param curr The ModelAction that may satisfy promises
2340 void ModelChecker::compute_promises(ModelAction *curr)
2342 for (unsigned int i = 0; i < promises->size(); i++) {
2343 Promise *promise = (*promises)[i];
2344 const ModelAction *act = promise->get_action();
2345 if (!act->happens_before(curr) &&
2347 !act->could_synchronize_with(curr) &&
2348 !act->same_thread(curr) &&
2349 act->get_location() == curr->get_location() &&
2350 promise->get_value() == curr->get_value()) {
2351 curr->get_node()->set_promise(i, act->is_rmw());
2356 /** Checks promises in response to change in ClockVector Threads. */
2357 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2359 for (unsigned int i = 0; i < promises->size(); i++) {
2360 Promise *promise = (*promises)[i];
2361 const ModelAction *act = promise->get_action();
2362 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2363 merge_cv->synchronized_since(act)) {
2364 if (promise->eliminate_thread(tid)) {
2365 //Promise has failed
2366 priv->failed_promise = true;
2373 void ModelChecker::check_promises_thread_disabled()
2375 for (unsigned int i = 0; i < promises->size(); i++) {
2376 Promise *promise = (*promises)[i];
2377 if (promise->has_failed()) {
2378 priv->failed_promise = true;
2385 * @brief Checks promises in response to addition to modification order for
2388 * We test whether threads are still available for satisfying promises after an
2389 * addition to our modification order constraints. Those that are unavailable
2390 * are "eliminated". Once all threads are eliminated from satisfying a promise,
2391 * that promise has failed.
2393 * @param act The ModelAction which updated the modification order
2394 * @param is_read_check Should be true if act is a read and we must check for
2395 * updates to the store from which it read (there is a distinction here for
2396 * RMW's, which are both a load and a store)
2398 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2400 const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2402 for (unsigned int i = 0; i < promises->size(); i++) {
2403 Promise *promise = (*promises)[i];
2404 const ModelAction *pread = promise->get_action();
2406 // Is this promise on the same location?
2407 if (!pread->same_var(write))
2410 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2411 priv->failed_promise = true;
2415 // Don't do any lookups twice for the same thread
2416 if (!promise->thread_is_available(act->get_tid()))
2419 if (mo_graph->checkReachable(promise, write)) {
2420 if (mo_graph->checkPromise(write, promise)) {
2421 priv->failed_promise = true;
2429 * Compute the set of writes that may break the current pending release
2430 * sequence. This information is extracted from previou release sequence
2433 * @param curr The current ModelAction. Must be a release sequence fixup
2436 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2438 if (pending_rel_seqs->empty())
2441 struct release_seq *pending = pending_rel_seqs->back();
2442 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2443 const ModelAction *write = pending->writes[i];
2444 curr->get_node()->add_relseq_break(write);
2447 /* NULL means don't break the sequence; just synchronize */
2448 curr->get_node()->add_relseq_break(NULL);
2452 * Build up an initial set of all past writes that this 'read' action may read
2453 * from. This set is determined by the clock vector's "happens before"
2455 * @param curr is the current ModelAction that we are exploring; it must be a
2458 void ModelChecker::build_reads_from_past(ModelAction *curr)
2460 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2462 ASSERT(curr->is_read());
2464 ModelAction *last_sc_write = NULL;
2466 if (curr->is_seqcst())
2467 last_sc_write = get_last_seq_cst_write(curr);
2469 /* Iterate over all threads */
2470 for (i = 0; i < thrd_lists->size(); i++) {
2471 /* Iterate over actions in thread, starting from most recent */
2472 action_list_t *list = &(*thrd_lists)[i];
2473 action_list_t::reverse_iterator rit;
2474 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2475 ModelAction *act = *rit;
2477 /* Only consider 'write' actions */
2478 if (!act->is_write() || act == curr)
2481 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2482 bool allow_read = true;
2484 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2486 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2490 /* Only add feasible reads */
2491 mo_graph->startChanges();
2492 r_modification_order(curr, act);
2493 if (!is_infeasible())
2494 curr->get_node()->add_read_from(act);
2495 mo_graph->rollbackChanges();
2498 /* Include at most one act per-thread that "happens before" curr */
2499 if (act->happens_before(curr))
2503 /* We may find no valid may-read-from only if the execution is doomed */
2504 if (!curr->get_node()->get_read_from_size()) {
2505 priv->no_valid_reads = true;
2509 if (DBG_ENABLED()) {
2510 model_print("Reached read action:\n");
2512 model_print("Printing may_read_from\n");
2513 curr->get_node()->print_may_read_from();
2514 model_print("End printing may_read_from\n");
2518 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2521 /* UNINIT actions don't have a Node, and they never sleep */
2522 if (write->is_uninitialized())
2524 Node *prevnode = write->get_node()->get_parent();
2526 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2527 if (write->is_release() && thread_sleep)
2529 if (!write->is_rmw()) {
2532 if (write->get_reads_from() == NULL)
2534 write = write->get_reads_from();
2539 * @brief Create a new action representing an uninitialized atomic
2540 * @param location The memory location of the atomic object
2541 * @return A pointer to a new ModelAction
2543 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2545 ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2546 act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2547 act->create_cv(NULL);
2551 static void print_list(action_list_t *list)
2553 action_list_t::iterator it;
2555 model_print("---------------------------------------------------------------------\n");
2557 unsigned int hash = 0;
2559 for (it = list->begin(); it != list->end(); it++) {
2561 hash = hash^(hash<<3)^((*it)->hash());
2563 model_print("HASH %u\n", hash);
2564 model_print("---------------------------------------------------------------------\n");
2567 #if SUPPORT_MOD_ORDER_DUMP
2568 void ModelChecker::dumpGraph(char *filename) const
2571 sprintf(buffer, "%s.dot", filename);
2572 FILE *file = fopen(buffer, "w");
2573 fprintf(file, "digraph %s {\n", filename);
2574 mo_graph->dumpNodes(file);
2575 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2577 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2578 ModelAction *action = *it;
2579 if (action->is_read()) {
2580 fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2581 if (action->get_reads_from() != NULL)
2582 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2584 if (thread_array[action->get_tid()] != NULL) {
2585 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2588 thread_array[action->get_tid()] = action;
2590 fprintf(file, "}\n");
2591 model_free(thread_array);
2596 /** @brief Prints an execution trace summary. */
2597 void ModelChecker::print_summary() const
2599 #if SUPPORT_MOD_ORDER_DUMP
2600 char buffername[100];
2601 sprintf(buffername, "exec%04u", stats.num_total);
2602 mo_graph->dumpGraphToFile(buffername);
2603 sprintf(buffername, "graph%04u", stats.num_total);
2604 dumpGraph(buffername);
2607 model_print("Execution %d:", stats.num_total);
2608 if (isfeasibleprefix())
2611 print_infeasibility(" INFEASIBLE");
2612 print_list(action_trace);
2617 * Add a Thread to the system for the first time. Should only be called once
2619 * @param t The Thread to add
2621 void ModelChecker::add_thread(Thread *t)
2623 thread_map->put(id_to_int(t->get_id()), t);
2624 scheduler->add_thread(t);
2628 * Removes a thread from the scheduler.
2629 * @param the thread to remove.
2631 void ModelChecker::remove_thread(Thread *t)
2633 scheduler->remove_thread(t);
2637 * @brief Get a Thread reference by its ID
2638 * @param tid The Thread's ID
2639 * @return A Thread reference
2641 Thread * ModelChecker::get_thread(thread_id_t tid) const
2643 return thread_map->get(id_to_int(tid));
2647 * @brief Get a reference to the Thread in which a ModelAction was executed
2648 * @param act The ModelAction
2649 * @return A Thread reference
2651 Thread * ModelChecker::get_thread(const ModelAction *act) const
2653 return get_thread(act->get_tid());
2657 * @brief Check if a Thread is currently enabled
2658 * @param t The Thread to check
2659 * @return True if the Thread is currently enabled
2661 bool ModelChecker::is_enabled(Thread *t) const
2663 return scheduler->is_enabled(t);
2667 * @brief Check if a Thread is currently enabled
2668 * @param tid The ID of the Thread to check
2669 * @return True if the Thread is currently enabled
2671 bool ModelChecker::is_enabled(thread_id_t tid) const
2673 return scheduler->is_enabled(tid);
2677 * Switch from a model-checker context to a user-thread context. This is the
2678 * complement of ModelChecker::switch_to_master and must be called from the
2679 * model-checker context
2681 * @param thread The user-thread to switch to
2683 void ModelChecker::switch_from_master(Thread *thread)
2685 scheduler->set_current_thread(thread);
2686 Thread::swap(&system_context, thread);
2690 * Switch from a user-context to the "master thread" context (a.k.a. system
2691 * context). This switch is made with the intention of exploring a particular
2692 * model-checking action (described by a ModelAction object). Must be called
2693 * from a user-thread context.
2695 * @param act The current action that will be explored. May be NULL only if
2696 * trace is exiting via an assertion (see ModelChecker::set_assert and
2697 * ModelChecker::has_asserted).
2698 * @return Return the value returned by the current action
2700 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2703 Thread *old = thread_current();
2704 ASSERT(!old->get_pending());
2705 old->set_pending(act);
2706 if (Thread::swap(old, &system_context) < 0) {
2707 perror("swap threads");
2710 return old->get_return_value();
2714 * Takes the next step in the execution, if possible.
2715 * @param curr The current step to take
2716 * @return Returns the next Thread to run, if any; NULL if this execution
2719 Thread * ModelChecker::take_step(ModelAction *curr)
2721 Thread *curr_thrd = get_thread(curr);
2722 ASSERT(curr_thrd->get_state() == THREAD_READY);
2724 curr = check_current_action(curr);
2726 /* Infeasible -> don't take any more steps */
2727 if (is_infeasible())
2729 else if (isfeasibleprefix() && have_bug_reports()) {
2734 if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2737 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2738 scheduler->remove_thread(curr_thrd);
2740 Thread *next_thrd = get_next_thread(curr);
2742 DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2743 next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2748 /** Wrapper to run the user's main function, with appropriate arguments */
2749 void user_main_wrapper(void *)
2751 user_main(model->params.argc, model->params.argv);
2754 /** @brief Run ModelChecker for the user program */
2755 void ModelChecker::run()
2759 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2764 * Stash next pending action(s) for thread(s). There
2765 * should only need to stash one thread's action--the
2766 * thread which just took a step--plus the first step
2767 * for any newly-created thread
2769 for (unsigned int i = 0; i < get_num_threads(); i++) {
2770 thread_id_t tid = int_to_id(i);
2771 Thread *thr = get_thread(tid);
2772 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2773 switch_from_master(thr);
2777 /* Catch assertions from prior take_step or from
2778 * between-ModelAction bugs (e.g., data races) */
2782 /* Consume the next action for a Thread */
2783 ModelAction *curr = t->get_pending();
2784 t->set_pending(NULL);
2785 t = take_step(curr);
2786 } while (t && !t->is_model_thread());
2789 * Launch end-of-execution release sequence fixups only when
2790 * the execution is otherwise feasible AND there are:
2792 * (1) pending release sequences
2793 * (2) pending assertions that could be invalidated by a change
2794 * in clock vectors (i.e., data races)
2795 * (3) no pending promises
2797 while (!pending_rel_seqs->empty() &&
2798 is_feasible_prefix_ignore_relseq() &&
2799 !unrealizedraces.empty()) {
2800 model_print("*** WARNING: release sequence fixup action "
2801 "(%zu pending release seuqence(s)) ***\n",
2802 pending_rel_seqs->size());
2803 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2804 std::memory_order_seq_cst, NULL, VALUE_NONE,
2808 } while (next_execution());