9 #include "snapshot-interface.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
15 #include "threads-model.h"
17 #define INITIAL_THREAD_ID 0
22 * Structure for holding small ModelChecker members that should be snapshotted
24 struct model_snapshot_members {
25 ModelAction *current_action;
26 unsigned int next_thread_id;
27 modelclock_t used_sequence_numbers;
29 ModelAction *next_backtrack;
32 /** @brief Constructor */
33 ModelChecker::ModelChecker(struct model_params params) :
34 /* Initialize default scheduler */
36 scheduler(new Scheduler()),
38 num_feasible_executions(0),
40 earliest_diverge(NULL),
41 action_trace(new action_list_t()),
42 thread_map(new HashTable<int, Thread *, int>()),
43 obj_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
44 lock_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
45 condvar_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
46 obj_thrd_map(new HashTable<void *, std::vector<action_list_t>, uintptr_t, 4 >()),
47 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
48 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
49 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
50 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
51 node_stack(new NodeStack()),
52 mo_graph(new CycleGraph()),
53 failed_promise(false),
54 too_many_reads(false),
56 bad_synchronization(false)
58 /* Allocate this "size" on the snapshotting heap */
59 priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
60 /* First thread created will have id INITIAL_THREAD_ID */
61 priv->next_thread_id = INITIAL_THREAD_ID;
63 /* Initialize a model-checker thread, for special ModelActions */
64 model_thread = new Thread(get_next_id());
65 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
68 /** @brief Destructor */
69 ModelChecker::~ModelChecker()
71 for (unsigned int i = 0; i < get_num_threads(); i++)
72 delete thread_map->get(i);
77 delete lock_waiters_map;
78 delete condvar_waiters_map;
81 for (unsigned int i = 0; i < promises->size(); i++)
82 delete (*promises)[i];
85 delete pending_rel_seqs;
87 delete thrd_last_action;
94 * Restores user program to initial state and resets all model-checker data
97 void ModelChecker::reset_to_initial_state()
99 DEBUG("+++ Resetting to initial state +++\n");
100 node_stack->reset_execution();
101 failed_promise = false;
102 too_many_reads = false;
103 bad_synchronization = false;
105 snapshotObject->backTrackBeforeStep(0);
108 /** @return a thread ID for a new Thread */
109 thread_id_t ModelChecker::get_next_id()
111 return priv->next_thread_id++;
114 /** @return the number of user threads created during this execution */
115 unsigned int ModelChecker::get_num_threads() const
117 return priv->next_thread_id;
120 /** @return The currently executing Thread. */
121 Thread * ModelChecker::get_current_thread()
123 return scheduler->get_current_thread();
126 /** @return a sequence number for a new ModelAction */
127 modelclock_t ModelChecker::get_next_seq_num()
129 return ++priv->used_sequence_numbers;
132 Node * ModelChecker::get_curr_node() {
133 return node_stack->get_head();
137 * @brief Choose the next thread to execute.
139 * This function chooses the next thread that should execute. It can force the
140 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
141 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
142 * The model-checker may have no preference regarding the next thread (i.e.,
143 * when exploring a new execution ordering), in which case this will return
145 * @param curr The current ModelAction. This action might guide the choice of
147 * @return The next thread to run. If the model-checker has no preference, NULL.
149 Thread * ModelChecker::get_next_thread(ModelAction *curr)
154 /* Do not split atomic actions. */
156 return thread_current();
157 /* The THREAD_CREATE action points to the created Thread */
158 else if (curr->get_type() == THREAD_CREATE)
159 return (Thread *)curr->get_location();
162 /* Have we completed exploring the preselected path? */
166 /* Else, we are trying to replay an execution */
167 ModelAction *next = node_stack->get_next()->get_action();
169 if (next == diverge) {
170 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
171 earliest_diverge=diverge;
173 Node *nextnode = next->get_node();
174 Node *prevnode = nextnode->get_parent();
175 scheduler->update_sleep_set(prevnode);
177 /* Reached divergence point */
178 if (nextnode->increment_misc()) {
179 /* The next node will try to satisfy a different misc_index values. */
180 tid = next->get_tid();
181 node_stack->pop_restofstack(2);
182 } else if (nextnode->increment_promise()) {
183 /* The next node will try to satisfy a different set of promises. */
184 tid = next->get_tid();
185 node_stack->pop_restofstack(2);
186 } else if (nextnode->increment_read_from()) {
187 /* The next node will read from a different value. */
188 tid = next->get_tid();
189 node_stack->pop_restofstack(2);
190 } else if (nextnode->increment_future_value()) {
191 /* The next node will try to read from a different future value. */
192 tid = next->get_tid();
193 node_stack->pop_restofstack(2);
194 } else if (nextnode->increment_relseq_break()) {
195 /* The next node will try to resolve a release sequence differently */
196 tid = next->get_tid();
197 node_stack->pop_restofstack(2);
199 /* Make a different thread execute for next step */
200 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
201 tid = prevnode->get_next_backtrack();
202 /* Make sure the backtracked thread isn't sleeping. */
203 node_stack->pop_restofstack(1);
204 if (diverge==earliest_diverge) {
205 earliest_diverge=prevnode->get_action();
208 /* The correct sleep set is in the parent node. */
211 DEBUG("*** Divergence point ***\n");
215 tid = next->get_tid();
217 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
218 ASSERT(tid != THREAD_ID_T_NONE);
219 return thread_map->get(id_to_int(tid));
223 * We need to know what the next actions of all threads in the sleep
224 * set will be. This method computes them and stores the actions at
225 * the corresponding thread object's pending action.
228 void ModelChecker::execute_sleep_set() {
229 for(unsigned int i=0;i<get_num_threads();i++) {
230 thread_id_t tid=int_to_id(i);
231 Thread *thr=get_thread(tid);
232 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
233 thr->get_pending() == NULL ) {
234 thr->set_state(THREAD_RUNNING);
235 scheduler->next_thread(thr);
236 Thread::swap(&system_context, thr);
237 priv->current_action->set_sleep_flag();
238 thr->set_pending(priv->current_action);
241 priv->current_action = NULL;
244 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
245 for(unsigned int i=0;i<get_num_threads();i++) {
246 thread_id_t tid=int_to_id(i);
247 Thread *thr=get_thread(tid);
248 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
249 ModelAction *pending_act=thr->get_pending();
250 if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
251 //Remove this thread from sleep set
252 scheduler->remove_sleep(thr);
259 * Check if we are in a deadlock. Should only be called at the end of an
260 * execution, although it should not give false positives in the middle of an
261 * execution (there should be some ENABLED thread).
263 * @return True if program is in a deadlock; false otherwise
265 bool ModelChecker::is_deadlocked() const
267 bool blocking_threads = false;
268 for (unsigned int i = 0; i < get_num_threads(); i++) {
269 thread_id_t tid = int_to_id(i);
272 Thread *t = get_thread(tid);
273 if (!t->is_model_thread() && t->get_pending())
274 blocking_threads = true;
276 return blocking_threads;
280 * Queries the model-checker for more executions to explore and, if one
281 * exists, resets the model-checker state to execute a new execution.
283 * @return If there are more executions to explore, return true. Otherwise,
286 bool ModelChecker::next_execution()
293 printf("ERROR: DEADLOCK\n");
294 if (isfinalfeasible()) {
295 printf("Earliest divergence point since last feasible execution:\n");
296 if (earliest_diverge)
297 earliest_diverge->print();
299 printf("(Not set)\n");
301 earliest_diverge = NULL;
302 num_feasible_executions++;
305 DEBUG("Number of acquires waiting on pending release sequences: %zu\n",
306 pending_rel_seqs->size());
309 if (isfinalfeasible() || (params.bound != 0 && priv->used_sequence_numbers > params.bound ) || DBG_ENABLED() ) {
314 if ((diverge = get_next_backtrack()) == NULL)
318 printf("Next execution will diverge at:\n");
322 reset_to_initial_state();
326 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
328 switch (act->get_type()) {
332 /* linear search: from most recent to oldest */
333 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
334 action_list_t::reverse_iterator rit;
335 for (rit = list->rbegin(); rit != list->rend(); rit++) {
336 ModelAction *prev = *rit;
337 if (prev->could_synchronize_with(act))
343 case ATOMIC_TRYLOCK: {
344 /* linear search: from most recent to oldest */
345 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
346 action_list_t::reverse_iterator rit;
347 for (rit = list->rbegin(); rit != list->rend(); rit++) {
348 ModelAction *prev = *rit;
349 if (act->is_conflicting_lock(prev))
354 case ATOMIC_UNLOCK: {
355 /* linear search: from most recent to oldest */
356 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
357 action_list_t::reverse_iterator rit;
358 for (rit = list->rbegin(); rit != list->rend(); rit++) {
359 ModelAction *prev = *rit;
360 if (!act->same_thread(prev)&&prev->is_failed_trylock())
366 /* linear search: from most recent to oldest */
367 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
368 action_list_t::reverse_iterator rit;
369 for (rit = list->rbegin(); rit != list->rend(); rit++) {
370 ModelAction *prev = *rit;
371 if (!act->same_thread(prev)&&prev->is_failed_trylock())
373 if (!act->same_thread(prev)&&prev->is_notify())
379 case ATOMIC_NOTIFY_ALL:
380 case ATOMIC_NOTIFY_ONE: {
381 /* linear search: from most recent to oldest */
382 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
383 action_list_t::reverse_iterator rit;
384 for (rit = list->rbegin(); rit != list->rend(); rit++) {
385 ModelAction *prev = *rit;
386 if (!act->same_thread(prev)&&prev->is_wait())
397 /** This method finds backtracking points where we should try to
398 * reorder the parameter ModelAction against.
400 * @param the ModelAction to find backtracking points for.
402 void ModelChecker::set_backtracking(ModelAction *act)
404 Thread *t = get_thread(act);
405 ModelAction * prev = get_last_conflict(act);
409 Node * node = prev->get_node()->get_parent();
411 int low_tid, high_tid;
412 if (node->is_enabled(t)) {
413 low_tid = id_to_int(act->get_tid());
414 high_tid = low_tid+1;
417 high_tid = get_num_threads();
420 for(int i = low_tid; i < high_tid; i++) {
421 thread_id_t tid = int_to_id(i);
423 /* Make sure this thread can be enabled here. */
424 if (i >= node->get_num_threads())
427 /* Don't backtrack into a point where the thread is disabled or sleeping. */
428 if (node->enabled_status(tid)!=THREAD_ENABLED)
431 /* Check if this has been explored already */
432 if (node->has_been_explored(tid))
435 /* See if fairness allows */
436 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
438 for(int t=0;t<node->get_num_threads();t++) {
439 thread_id_t tother=int_to_id(t);
440 if (node->is_enabled(tother) && node->has_priority(tother)) {
448 /* Cache the latest backtracking point */
449 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
450 priv->next_backtrack = prev;
452 /* If this is a new backtracking point, mark the tree */
453 if (!node->set_backtrack(tid))
455 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
456 id_to_int(prev->get_tid()),
457 id_to_int(t->get_id()));
466 * Returns last backtracking point. The model checker will explore a different
467 * path for this point in the next execution.
468 * @return The ModelAction at which the next execution should diverge.
470 ModelAction * ModelChecker::get_next_backtrack()
472 ModelAction *next = priv->next_backtrack;
473 priv->next_backtrack = NULL;
478 * Processes a read or rmw model action.
479 * @param curr is the read model action to process.
480 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
481 * @return True if processing this read updates the mo_graph.
483 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
485 uint64_t value = VALUE_NONE;
486 bool updated = false;
488 const ModelAction *reads_from = curr->get_node()->get_read_from();
489 if (reads_from != NULL) {
490 mo_graph->startChanges();
492 value = reads_from->get_value();
493 bool r_status = false;
495 if (!second_part_of_rmw) {
496 check_recency(curr, reads_from);
497 r_status = r_modification_order(curr, reads_from);
501 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
502 mo_graph->rollbackChanges();
503 too_many_reads = false;
507 curr->read_from(reads_from);
508 mo_graph->commitChanges();
509 mo_check_promises(curr->get_tid(), reads_from);
512 } else if (!second_part_of_rmw) {
513 /* Read from future value */
514 value = curr->get_node()->get_future_value();
515 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
516 curr->read_from(NULL);
517 Promise *valuepromise = new Promise(curr, value, expiration);
518 promises->push_back(valuepromise);
520 get_thread(curr)->set_return_value(value);
526 * Processes a lock, trylock, or unlock model action. @param curr is
527 * the read model action to process.
529 * The try lock operation checks whether the lock is taken. If not,
530 * it falls to the normal lock operation case. If so, it returns
533 * The lock operation has already been checked that it is enabled, so
534 * it just grabs the lock and synchronizes with the previous unlock.
536 * The unlock operation has to re-enable all of the threads that are
537 * waiting on the lock.
539 * @return True if synchronization was updated; false otherwise
541 bool ModelChecker::process_mutex(ModelAction *curr) {
542 std::mutex *mutex=NULL;
543 struct std::mutex_state *state=NULL;
545 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
546 mutex = (std::mutex *)curr->get_location();
547 state = mutex->get_state();
548 } else if(curr->is_wait()) {
549 mutex = (std::mutex *)curr->get_value();
550 state = mutex->get_state();
553 switch (curr->get_type()) {
554 case ATOMIC_TRYLOCK: {
555 bool success = !state->islocked;
556 curr->set_try_lock(success);
558 get_thread(curr)->set_return_value(0);
561 get_thread(curr)->set_return_value(1);
563 //otherwise fall into the lock case
565 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
566 printf("Lock access before initialization\n");
569 state->islocked = true;
570 ModelAction *unlock = get_last_unlock(curr);
571 //synchronize with the previous unlock statement
572 if (unlock != NULL) {
573 curr->synchronize_with(unlock);
578 case ATOMIC_UNLOCK: {
580 state->islocked = false;
581 //wake up the other threads
582 action_list_t *waiters = lock_waiters_map->get_safe_ptr(curr->get_location());
583 //activate all the waiting threads
584 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
585 scheduler->wake(get_thread(*rit));
592 state->islocked = false;
593 //wake up the other threads
594 action_list_t *waiters = lock_waiters_map->get_safe_ptr((void *) curr->get_value());
595 //activate all the waiting threads
596 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
597 scheduler->wake(get_thread(*rit));
600 //check whether we should go to sleep or not...simulate spurious failures
601 if (curr->get_node()->get_misc()==0) {
602 condvar_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
604 scheduler->sleep(get_current_thread());
608 case ATOMIC_NOTIFY_ALL: {
609 action_list_t *waiters = condvar_waiters_map->get_safe_ptr(curr->get_location());
610 //activate all the waiting threads
611 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
612 scheduler->wake(get_thread(*rit));
617 case ATOMIC_NOTIFY_ONE: {
618 action_list_t *waiters = condvar_waiters_map->get_safe_ptr(curr->get_location());
619 int wakeupthread=curr->get_node()->get_misc();
620 action_list_t::iterator it = waiters->begin();
621 advance(it, wakeupthread);
622 scheduler->wake(get_thread(*it));
634 * Process a write ModelAction
635 * @param curr The ModelAction to process
636 * @return True if the mo_graph was updated or promises were resolved
638 bool ModelChecker::process_write(ModelAction *curr)
640 bool updated_mod_order = w_modification_order(curr);
641 bool updated_promises = resolve_promises(curr);
643 if (promises->size() == 0) {
644 for (unsigned int i = 0; i < futurevalues->size(); i++) {
645 struct PendingFutureValue pfv = (*futurevalues)[i];
646 //Do more ambitious checks now that mo is more complete
647 if (mo_may_allow(pfv.writer, pfv.act)&&
648 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
649 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
650 priv->next_backtrack = pfv.act;
652 futurevalues->resize(0);
655 mo_graph->commitChanges();
656 mo_check_promises(curr->get_tid(), curr);
658 get_thread(curr)->set_return_value(VALUE_NONE);
659 return updated_mod_order || updated_promises;
663 * @brief Process the current action for thread-related activity
665 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
666 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
667 * synchronization, etc. This function is a no-op for non-THREAD actions
668 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
670 * @param curr The current action
671 * @return True if synchronization was updated or a thread completed
673 bool ModelChecker::process_thread_action(ModelAction *curr)
675 bool updated = false;
677 switch (curr->get_type()) {
678 case THREAD_CREATE: {
679 Thread *th = (Thread *)curr->get_location();
680 th->set_creation(curr);
684 Thread *blocking = (Thread *)curr->get_location();
685 ModelAction *act = get_last_action(blocking->get_id());
686 curr->synchronize_with(act);
687 updated = true; /* trigger rel-seq checks */
690 case THREAD_FINISH: {
691 Thread *th = get_thread(curr);
692 while (!th->wait_list_empty()) {
693 ModelAction *act = th->pop_wait_list();
694 scheduler->wake(get_thread(act));
697 updated = true; /* trigger rel-seq checks */
701 check_promises(curr->get_tid(), NULL, curr->get_cv());
712 * @brief Process the current action for release sequence fixup activity
714 * Performs model-checker release sequence fixups for the current action,
715 * forcing a single pending release sequence to break (with a given, potential
716 * "loose" write) or to complete (i.e., synchronize). If a pending release
717 * sequence forms a complete release sequence, then we must perform the fixup
718 * synchronization, mo_graph additions, etc.
720 * @param curr The current action; must be a release sequence fixup action
721 * @param work_queue The work queue to which to add work items as they are
724 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
726 const ModelAction *write = curr->get_node()->get_relseq_break();
727 struct release_seq *sequence = pending_rel_seqs->back();
728 pending_rel_seqs->pop_back();
730 ModelAction *acquire = sequence->acquire;
731 const ModelAction *rf = sequence->rf;
732 const ModelAction *release = sequence->release;
736 ASSERT(release->same_thread(rf));
740 * @todo Forcing a synchronization requires that we set
741 * modification order constraints. For instance, we can't allow
742 * a fixup sequence in which two separate read-acquire
743 * operations read from the same sequence, where the first one
744 * synchronizes and the other doesn't. Essentially, we can't
745 * allow any writes to insert themselves between 'release' and
749 /* Must synchronize */
750 if (!acquire->synchronize_with(release)) {
751 set_bad_synchronization();
754 /* Re-check all pending release sequences */
755 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
756 /* Re-check act for mo_graph edges */
757 work_queue->push_back(MOEdgeWorkEntry(acquire));
759 /* propagate synchronization to later actions */
760 action_list_t::reverse_iterator rit = action_trace->rbegin();
761 for (; (*rit) != acquire; rit++) {
762 ModelAction *propagate = *rit;
763 if (acquire->happens_before(propagate)) {
764 propagate->synchronize_with(acquire);
765 /* Re-check 'propagate' for mo_graph edges */
766 work_queue->push_back(MOEdgeWorkEntry(propagate));
770 /* Break release sequence with new edges:
771 * release --mo--> write --mo--> rf */
772 mo_graph->addEdge(release, write);
773 mo_graph->addEdge(write, rf);
776 /* See if we have realized a data race */
777 if (checkDataRaces())
782 * Initialize the current action by performing one or more of the following
783 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
784 * in the NodeStack, manipulating backtracking sets, allocating and
785 * initializing clock vectors, and computing the promises to fulfill.
787 * @param curr The current action, as passed from the user context; may be
788 * freed/invalidated after the execution of this function, with a different
789 * action "returned" its place (pass-by-reference)
790 * @return True if curr is a newly-explored action; false otherwise
792 bool ModelChecker::initialize_curr_action(ModelAction **curr)
794 ModelAction *newcurr;
796 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
797 newcurr = process_rmw(*curr);
800 if (newcurr->is_rmw())
801 compute_promises(newcurr);
807 (*curr)->set_seq_number(get_next_seq_num());
809 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled());
811 /* First restore type and order in case of RMW operation */
812 if ((*curr)->is_rmwr())
813 newcurr->copy_typeandorder(*curr);
815 ASSERT((*curr)->get_location() == newcurr->get_location());
816 newcurr->copy_from_new(*curr);
818 /* Discard duplicate ModelAction; use action from NodeStack */
821 /* Always compute new clock vector */
822 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
825 return false; /* Action was explored previously */
829 /* Always compute new clock vector */
830 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
832 * Perform one-time actions when pushing new ModelAction onto
835 if (newcurr->is_write())
836 compute_promises(newcurr);
837 else if (newcurr->is_relseq_fixup())
838 compute_relseq_breakwrites(newcurr);
839 else if (newcurr->is_wait())
840 newcurr->get_node()->set_misc_max(2);
841 else if (newcurr->is_notify_one()) {
842 newcurr->get_node()->set_misc_max(condvar_waiters_map->get_safe_ptr(newcurr->get_location())->size());
844 return true; /* This was a new ModelAction */
849 * @brief Check whether a model action is enabled.
851 * Checks whether a lock or join operation would be successful (i.e., is the
852 * lock already locked, or is the joined thread already complete). If not, put
853 * the action in a waiter list.
855 * @param curr is the ModelAction to check whether it is enabled.
856 * @return a bool that indicates whether the action is enabled.
858 bool ModelChecker::check_action_enabled(ModelAction *curr) {
859 if (curr->is_lock()) {
860 std::mutex * lock = (std::mutex *)curr->get_location();
861 struct std::mutex_state * state = lock->get_state();
862 if (state->islocked) {
863 //Stick the action in the appropriate waiting queue
864 lock_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
867 } else if (curr->get_type() == THREAD_JOIN) {
868 Thread *blocking = (Thread *)curr->get_location();
869 if (!blocking->is_complete()) {
870 blocking->push_wait_list(curr);
879 * Stores the ModelAction for the current thread action. Call this
880 * immediately before switching from user- to system-context to pass
882 * @param act The ModelAction created by the user-thread action
884 void ModelChecker::set_current_action(ModelAction *act) {
885 priv->current_action = act;
889 * This is the heart of the model checker routine. It performs model-checking
890 * actions corresponding to a given "current action." Among other processes, it
891 * calculates reads-from relationships, updates synchronization clock vectors,
892 * forms a memory_order constraints graph, and handles replay/backtrack
893 * execution when running permutations of previously-observed executions.
895 * @param curr The current action to process
896 * @return The next Thread that must be executed. May be NULL if ModelChecker
897 * makes no choice (e.g., according to replay execution, combining RMW actions,
900 Thread * ModelChecker::check_current_action(ModelAction *curr)
903 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
905 if (!check_action_enabled(curr)) {
906 /* Make the execution look like we chose to run this action
907 * much later, when a lock/join can succeed */
908 get_current_thread()->set_pending(curr);
909 scheduler->sleep(get_current_thread());
910 return get_next_thread(NULL);
913 bool newly_explored = initialize_curr_action(&curr);
915 wake_up_sleeping_actions(curr);
917 /* Add the action to lists before any other model-checking tasks */
918 if (!second_part_of_rmw)
919 add_action_to_lists(curr);
921 /* Build may_read_from set for newly-created actions */
922 if (newly_explored && curr->is_read())
923 build_reads_from_past(curr);
925 /* Initialize work_queue with the "current action" work */
926 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
927 while (!work_queue.empty()) {
928 WorkQueueEntry work = work_queue.front();
929 work_queue.pop_front();
932 case WORK_CHECK_CURR_ACTION: {
933 ModelAction *act = work.action;
934 bool update = false; /* update this location's release seq's */
935 bool update_all = false; /* update all release seq's */
937 if (process_thread_action(curr))
940 if (act->is_read() && process_read(act, second_part_of_rmw))
943 if (act->is_write() && process_write(act))
946 if (act->is_mutex_op() && process_mutex(act))
949 if (act->is_relseq_fixup())
950 process_relseq_fixup(curr, &work_queue);
953 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
955 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
958 case WORK_CHECK_RELEASE_SEQ:
959 resolve_release_sequences(work.location, &work_queue);
961 case WORK_CHECK_MO_EDGES: {
962 /** @todo Complete verification of work_queue */
963 ModelAction *act = work.action;
964 bool updated = false;
966 if (act->is_read()) {
967 const ModelAction *rf = act->get_reads_from();
968 if (rf != NULL && r_modification_order(act, rf))
971 if (act->is_write()) {
972 if (w_modification_order(act))
975 mo_graph->commitChanges();
978 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
987 check_curr_backtracking(curr);
988 set_backtracking(curr);
989 return get_next_thread(curr);
992 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
993 Node *currnode = curr->get_node();
994 Node *parnode = currnode->get_parent();
996 if ((!parnode->backtrack_empty() ||
997 !currnode->misc_empty() ||
998 !currnode->read_from_empty() ||
999 !currnode->future_value_empty() ||
1000 !currnode->promise_empty() ||
1001 !currnode->relseq_break_empty())
1002 && (!priv->next_backtrack ||
1003 *curr > *priv->next_backtrack)) {
1004 priv->next_backtrack = curr;
1008 bool ModelChecker::promises_expired() {
1009 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1010 Promise *promise = (*promises)[promise_index];
1011 if (promise->get_expiration()<priv->used_sequence_numbers) {
1018 /** @return whether the current partial trace must be a prefix of a
1019 * feasible trace. */
1020 bool ModelChecker::isfeasibleprefix() {
1021 return promises->size() == 0 && pending_rel_seqs->size() == 0 && isfeasible();
1024 /** @return whether the current partial trace is feasible. */
1025 bool ModelChecker::isfeasible() {
1026 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1027 DEBUG("Infeasible: RMW violation\n");
1029 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
1032 /** @return whether the current partial trace is feasible other than
1033 * multiple RMW reading from the same store. */
1034 bool ModelChecker::isfeasibleotherthanRMW() {
1035 if (DBG_ENABLED()) {
1036 if (mo_graph->checkForCycles())
1037 DEBUG("Infeasible: modification order cycles\n");
1039 DEBUG("Infeasible: failed promise\n");
1041 DEBUG("Infeasible: too many reads\n");
1042 if (bad_synchronization)
1043 DEBUG("Infeasible: bad synchronization ordering\n");
1044 if (promises_expired())
1045 DEBUG("Infeasible: promises expired\n");
1047 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
1050 /** Returns whether the current completed trace is feasible. */
1051 bool ModelChecker::isfinalfeasible() {
1052 if (DBG_ENABLED() && promises->size() != 0)
1053 DEBUG("Infeasible: unrevolved promises\n");
1055 return isfeasible() && promises->size() == 0;
1058 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1059 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1060 ModelAction *lastread = get_last_action(act->get_tid());
1061 lastread->process_rmw(act);
1062 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1063 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1064 mo_graph->commitChanges();
1070 * Checks whether a thread has read from the same write for too many times
1071 * without seeing the effects of a later write.
1074 * 1) there must a different write that we could read from that would satisfy the modification order,
1075 * 2) we must have read from the same value in excess of maxreads times, and
1076 * 3) that other write must have been in the reads_from set for maxreads times.
1078 * If so, we decide that the execution is no longer feasible.
1080 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1081 if (params.maxreads != 0) {
1083 if (curr->get_node()->get_read_from_size() <= 1)
1085 //Must make sure that execution is currently feasible... We could
1086 //accidentally clear by rolling back
1089 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1090 int tid = id_to_int(curr->get_tid());
1093 if ((int)thrd_lists->size() <= tid)
1095 action_list_t *list = &(*thrd_lists)[tid];
1097 action_list_t::reverse_iterator rit = list->rbegin();
1098 /* Skip past curr */
1099 for (; (*rit) != curr; rit++)
1101 /* go past curr now */
1104 action_list_t::reverse_iterator ritcopy = rit;
1105 //See if we have enough reads from the same value
1107 for (; count < params.maxreads; rit++,count++) {
1108 if (rit==list->rend())
1110 ModelAction *act = *rit;
1111 if (!act->is_read())
1114 if (act->get_reads_from() != rf)
1116 if (act->get_node()->get_read_from_size() <= 1)
1119 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1121 const ModelAction * write = curr->get_node()->get_read_from_at(i);
1123 //Need a different write
1127 /* Test to see whether this is a feasible write to read from*/
1128 mo_graph->startChanges();
1129 r_modification_order(curr, write);
1130 bool feasiblereadfrom = isfeasible();
1131 mo_graph->rollbackChanges();
1133 if (!feasiblereadfrom)
1137 bool feasiblewrite = true;
1138 //new we need to see if this write works for everyone
1140 for (int loop = count; loop>0; loop--,rit++) {
1141 ModelAction *act=*rit;
1142 bool foundvalue = false;
1143 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1144 if (act->get_node()->get_read_from_at(j)==write) {
1150 feasiblewrite = false;
1154 if (feasiblewrite) {
1155 too_many_reads = true;
1163 * Updates the mo_graph with the constraints imposed from the current
1166 * Basic idea is the following: Go through each other thread and find
1167 * the lastest action that happened before our read. Two cases:
1169 * (1) The action is a write => that write must either occur before
1170 * the write we read from or be the write we read from.
1172 * (2) The action is a read => the write that that action read from
1173 * must occur before the write we read from or be the same write.
1175 * @param curr The current action. Must be a read.
1176 * @param rf The action that curr reads from. Must be a write.
1177 * @return True if modification order edges were added; false otherwise
1179 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1181 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1184 ASSERT(curr->is_read());
1186 /* Iterate over all threads */
1187 for (i = 0; i < thrd_lists->size(); i++) {
1188 /* Iterate over actions in thread, starting from most recent */
1189 action_list_t *list = &(*thrd_lists)[i];
1190 action_list_t::reverse_iterator rit;
1191 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1192 ModelAction *act = *rit;
1195 * Include at most one act per-thread that "happens
1196 * before" curr. Don't consider reflexively.
1198 if (act->happens_before(curr) && act != curr) {
1199 if (act->is_write()) {
1201 mo_graph->addEdge(act, rf);
1205 const ModelAction *prevreadfrom = act->get_reads_from();
1206 //if the previous read is unresolved, keep going...
1207 if (prevreadfrom == NULL)
1210 if (rf != prevreadfrom) {
1211 mo_graph->addEdge(prevreadfrom, rf);
1223 /** This method fixes up the modification order when we resolve a
1224 * promises. The basic problem is that actions that occur after the
1225 * read curr could not property add items to the modification order
1228 * So for each thread, we find the earliest item that happens after
1229 * the read curr. This is the item we have to fix up with additional
1230 * constraints. If that action is write, we add a MO edge between
1231 * the Action rf and that action. If the action is a read, we add a
1232 * MO edge between the Action rf, and whatever the read accessed.
1234 * @param curr is the read ModelAction that we are fixing up MO edges for.
1235 * @param rf is the write ModelAction that curr reads from.
1238 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1240 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1242 ASSERT(curr->is_read());
1244 /* Iterate over all threads */
1245 for (i = 0; i < thrd_lists->size(); i++) {
1246 /* Iterate over actions in thread, starting from most recent */
1247 action_list_t *list = &(*thrd_lists)[i];
1248 action_list_t::reverse_iterator rit;
1249 ModelAction *lastact = NULL;
1251 /* Find last action that happens after curr that is either not curr or a rmw */
1252 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1253 ModelAction *act = *rit;
1254 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1260 /* Include at most one act per-thread that "happens before" curr */
1261 if (lastact != NULL) {
1262 if (lastact==curr) {
1263 //Case 1: The resolved read is a RMW, and we need to make sure
1264 //that the write portion of the RMW mod order after rf
1266 mo_graph->addEdge(rf, lastact);
1267 } else if (lastact->is_read()) {
1268 //Case 2: The resolved read is a normal read and the next
1269 //operation is a read, and we need to make sure the value read
1270 //is mod ordered after rf
1272 const ModelAction *postreadfrom = lastact->get_reads_from();
1273 if (postreadfrom != NULL&&rf != postreadfrom)
1274 mo_graph->addEdge(rf, postreadfrom);
1276 //Case 3: The resolved read is a normal read and the next
1277 //operation is a write, and we need to make sure that the
1278 //write is mod ordered after rf
1280 mo_graph->addEdge(rf, lastact);
1288 * Updates the mo_graph with the constraints imposed from the current write.
1290 * Basic idea is the following: Go through each other thread and find
1291 * the lastest action that happened before our write. Two cases:
1293 * (1) The action is a write => that write must occur before
1296 * (2) The action is a read => the write that that action read from
1297 * must occur before the current write.
1299 * This method also handles two other issues:
1301 * (I) Sequential Consistency: Making sure that if the current write is
1302 * seq_cst, that it occurs after the previous seq_cst write.
1304 * (II) Sending the write back to non-synchronizing reads.
1306 * @param curr The current action. Must be a write.
1307 * @return True if modification order edges were added; false otherwise
1309 bool ModelChecker::w_modification_order(ModelAction *curr)
1311 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1314 ASSERT(curr->is_write());
1316 if (curr->is_seqcst()) {
1317 /* We have to at least see the last sequentially consistent write,
1318 so we are initialized. */
1319 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1320 if (last_seq_cst != NULL) {
1321 mo_graph->addEdge(last_seq_cst, curr);
1326 /* Iterate over all threads */
1327 for (i = 0; i < thrd_lists->size(); i++) {
1328 /* Iterate over actions in thread, starting from most recent */
1329 action_list_t *list = &(*thrd_lists)[i];
1330 action_list_t::reverse_iterator rit;
1331 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1332 ModelAction *act = *rit;
1335 * 1) If RMW and it actually read from something, then we
1336 * already have all relevant edges, so just skip to next
1339 * 2) If RMW and it didn't read from anything, we should
1340 * whatever edge we can get to speed up convergence.
1342 * 3) If normal write, we need to look at earlier actions, so
1343 * continue processing list.
1345 if (curr->is_rmw()) {
1346 if (curr->get_reads_from()!=NULL)
1355 * Include at most one act per-thread that "happens
1358 if (act->happens_before(curr)) {
1360 * Note: if act is RMW, just add edge:
1362 * The following edge should be handled elsewhere:
1363 * readfrom(act) --mo--> act
1365 if (act->is_write())
1366 mo_graph->addEdge(act, curr);
1367 else if (act->is_read()) {
1368 //if previous read accessed a null, just keep going
1369 if (act->get_reads_from() == NULL)
1371 mo_graph->addEdge(act->get_reads_from(), curr);
1375 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1376 !act->same_thread(curr)) {
1377 /* We have an action that:
1378 (1) did not happen before us
1379 (2) is a read and we are a write
1380 (3) cannot synchronize with us
1381 (4) is in a different thread
1383 that read could potentially read from our write. Note that
1384 these checks are overly conservative at this point, we'll
1385 do more checks before actually removing the
1389 if (thin_air_constraint_may_allow(curr, act)) {
1391 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1392 struct PendingFutureValue pfv = {curr,act};
1393 futurevalues->push_back(pfv);
1403 /** Arbitrary reads from the future are not allowed. Section 29.3
1404 * part 9 places some constraints. This method checks one result of constraint
1405 * constraint. Others require compiler support. */
1406 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1407 if (!writer->is_rmw())
1410 if (!reader->is_rmw())
1413 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1414 if (search == reader)
1416 if (search->get_tid() == reader->get_tid() &&
1417 search->happens_before(reader))
1425 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1426 * some constraints. This method checks one the following constraint (others
1427 * require compiler support):
1429 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1431 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1433 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(reader->get_location());
1435 /* Iterate over all threads */
1436 for (i = 0; i < thrd_lists->size(); i++) {
1437 const ModelAction *write_after_read = NULL;
1439 /* Iterate over actions in thread, starting from most recent */
1440 action_list_t *list = &(*thrd_lists)[i];
1441 action_list_t::reverse_iterator rit;
1442 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1443 ModelAction *act = *rit;
1445 if (!reader->happens_before(act))
1447 else if (act->is_write())
1448 write_after_read = act;
1449 else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1450 write_after_read = act->get_reads_from();
1454 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1461 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1462 * The ModelAction under consideration is expected to be taking part in
1463 * release/acquire synchronization as an object of the "reads from" relation.
1464 * Note that this can only provide release sequence support for RMW chains
1465 * which do not read from the future, as those actions cannot be traced until
1466 * their "promise" is fulfilled. Similarly, we may not even establish the
1467 * presence of a release sequence with certainty, as some modification order
1468 * constraints may be decided further in the future. Thus, this function
1469 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1470 * and a boolean representing certainty.
1472 * @param rf The action that might be part of a release sequence. Must be a
1474 * @param release_heads A pass-by-reference style return parameter. After
1475 * execution of this function, release_heads will contain the heads of all the
1476 * relevant release sequences, if any exists with certainty
1477 * @param pending A pass-by-reference style return parameter which is only used
1478 * when returning false (i.e., uncertain). Returns most information regarding
1479 * an uncertain release sequence, including any write operations that might
1480 * break the sequence.
1481 * @return true, if the ModelChecker is certain that release_heads is complete;
1484 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1485 rel_heads_list_t *release_heads,
1486 struct release_seq *pending) const
1488 /* Only check for release sequences if there are no cycles */
1489 if (mo_graph->checkForCycles())
1493 ASSERT(rf->is_write());
1495 if (rf->is_release())
1496 release_heads->push_back(rf);
1498 break; /* End of RMW chain */
1500 /** @todo Need to be smarter here... In the linux lock
1501 * example, this will run to the beginning of the program for
1503 /** @todo The way to be smarter here is to keep going until 1
1504 * thread has a release preceded by an acquire and you've seen
1507 /* acq_rel RMW is a sufficient stopping condition */
1508 if (rf->is_acquire() && rf->is_release())
1509 return true; /* complete */
1511 rf = rf->get_reads_from();
1514 /* read from future: need to settle this later */
1516 return false; /* incomplete */
1519 if (rf->is_release())
1520 return true; /* complete */
1522 /* else relaxed write; check modification order for contiguous subsequence
1523 * -> rf must be same thread as release */
1524 int tid = id_to_int(rf->get_tid());
1525 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(rf->get_location());
1526 action_list_t *list = &(*thrd_lists)[tid];
1527 action_list_t::const_reverse_iterator rit;
1529 /* Find rf in the thread list */
1530 rit = std::find(list->rbegin(), list->rend(), rf);
1531 ASSERT(rit != list->rend());
1533 /* Find the last write/release */
1534 for (; rit != list->rend(); rit++)
1535 if ((*rit)->is_release())
1537 if (rit == list->rend()) {
1538 /* No write-release in this thread */
1539 return true; /* complete */
1541 ModelAction *release = *rit;
1543 ASSERT(rf->same_thread(release));
1545 pending->writes.clear();
1547 bool certain = true;
1548 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1549 if (id_to_int(rf->get_tid()) == (int)i)
1551 list = &(*thrd_lists)[i];
1553 /* Can we ensure no future writes from this thread may break
1554 * the release seq? */
1555 bool future_ordered = false;
1557 ModelAction *last = get_last_action(int_to_id(i));
1558 Thread *th = get_thread(int_to_id(i));
1559 if ((last && rf->happens_before(last)) ||
1560 !scheduler->is_enabled(th) ||
1562 future_ordered = true;
1564 ASSERT(!th->is_model_thread() || future_ordered);
1566 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1567 const ModelAction *act = *rit;
1568 /* Reach synchronization -> this thread is complete */
1569 if (act->happens_before(release))
1571 if (rf->happens_before(act)) {
1572 future_ordered = true;
1576 /* Only non-RMW writes can break release sequences */
1577 if (!act->is_write() || act->is_rmw())
1580 /* Check modification order */
1581 if (mo_graph->checkReachable(rf, act)) {
1582 /* rf --mo--> act */
1583 future_ordered = true;
1586 if (mo_graph->checkReachable(act, release))
1587 /* act --mo--> release */
1589 if (mo_graph->checkReachable(release, act) &&
1590 mo_graph->checkReachable(act, rf)) {
1591 /* release --mo-> act --mo--> rf */
1592 return true; /* complete */
1594 /* act may break release sequence */
1595 pending->writes.push_back(act);
1598 if (!future_ordered)
1599 certain = false; /* This thread is uncertain */
1603 release_heads->push_back(release);
1604 pending->writes.clear();
1606 pending->release = release;
1613 * A public interface for getting the release sequence head(s) with which a
1614 * given ModelAction must synchronize. This function only returns a non-empty
1615 * result when it can locate a release sequence head with certainty. Otherwise,
1616 * it may mark the internal state of the ModelChecker so that it will handle
1617 * the release sequence at a later time, causing @a act to update its
1618 * synchronization at some later point in execution.
1619 * @param act The 'acquire' action that may read from a release sequence
1620 * @param release_heads A pass-by-reference return parameter. Will be filled
1621 * with the head(s) of the release sequence(s), if they exists with certainty.
1622 * @see ModelChecker::release_seq_heads
1624 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1626 const ModelAction *rf = act->get_reads_from();
1627 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1628 sequence->acquire = act;
1630 if (!release_seq_heads(rf, release_heads, sequence)) {
1631 /* add act to 'lazy checking' list */
1632 pending_rel_seqs->push_back(sequence);
1634 snapshot_free(sequence);
1639 * Attempt to resolve all stashed operations that might synchronize with a
1640 * release sequence for a given location. This implements the "lazy" portion of
1641 * determining whether or not a release sequence was contiguous, since not all
1642 * modification order information is present at the time an action occurs.
1644 * @param location The location/object that should be checked for release
1645 * sequence resolutions. A NULL value means to check all locations.
1646 * @param work_queue The work queue to which to add work items as they are
1648 * @return True if any updates occurred (new synchronization, new mo_graph
1651 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1653 bool updated = false;
1654 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1655 while (it != pending_rel_seqs->end()) {
1656 struct release_seq *pending = *it;
1657 ModelAction *act = pending->acquire;
1659 /* Only resolve sequences on the given location, if provided */
1660 if (location && act->get_location() != location) {
1665 const ModelAction *rf = act->get_reads_from();
1666 rel_heads_list_t release_heads;
1668 complete = release_seq_heads(rf, &release_heads, pending);
1669 for (unsigned int i = 0; i < release_heads.size(); i++) {
1670 if (!act->has_synchronized_with(release_heads[i])) {
1671 if (act->synchronize_with(release_heads[i]))
1674 set_bad_synchronization();
1679 /* Re-check all pending release sequences */
1680 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1681 /* Re-check act for mo_graph edges */
1682 work_queue->push_back(MOEdgeWorkEntry(act));
1684 /* propagate synchronization to later actions */
1685 action_list_t::reverse_iterator rit = action_trace->rbegin();
1686 for (; (*rit) != act; rit++) {
1687 ModelAction *propagate = *rit;
1688 if (act->happens_before(propagate)) {
1689 propagate->synchronize_with(act);
1690 /* Re-check 'propagate' for mo_graph edges */
1691 work_queue->push_back(MOEdgeWorkEntry(propagate));
1696 it = pending_rel_seqs->erase(it);
1697 snapshot_free(pending);
1703 // If we resolved promises or data races, see if we have realized a data race.
1704 if (checkDataRaces()) {
1712 * Performs various bookkeeping operations for the current ModelAction. For
1713 * instance, adds action to the per-object, per-thread action vector and to the
1714 * action trace list of all thread actions.
1716 * @param act is the ModelAction to add.
1718 void ModelChecker::add_action_to_lists(ModelAction *act)
1720 int tid = id_to_int(act->get_tid());
1721 action_trace->push_back(act);
1723 obj_map->get_safe_ptr(act->get_location())->push_back(act);
1725 std::vector<action_list_t> *vec = obj_thrd_map->get_safe_ptr(act->get_location());
1726 if (tid >= (int)vec->size())
1727 vec->resize(priv->next_thread_id);
1728 (*vec)[tid].push_back(act);
1730 if ((int)thrd_last_action->size() <= tid)
1731 thrd_last_action->resize(get_num_threads());
1732 (*thrd_last_action)[tid] = act;
1734 if (act->is_wait()) {
1735 void *mutex_loc=(void *) act->get_value();
1736 obj_map->get_safe_ptr(mutex_loc)->push_back(act);
1738 std::vector<action_list_t> *vec = obj_thrd_map->get_safe_ptr(mutex_loc);
1739 if (tid >= (int)vec->size())
1740 vec->resize(priv->next_thread_id);
1741 (*vec)[tid].push_back(act);
1743 if ((int)thrd_last_action->size() <= tid)
1744 thrd_last_action->resize(get_num_threads());
1745 (*thrd_last_action)[tid] = act;
1750 * @brief Get the last action performed by a particular Thread
1751 * @param tid The thread ID of the Thread in question
1752 * @return The last action in the thread
1754 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1756 int threadid = id_to_int(tid);
1757 if (threadid < (int)thrd_last_action->size())
1758 return (*thrd_last_action)[id_to_int(tid)];
1764 * Gets the last memory_order_seq_cst write (in the total global sequence)
1765 * performed on a particular object (i.e., memory location), not including the
1767 * @param curr The current ModelAction; also denotes the object location to
1769 * @return The last seq_cst write
1771 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1773 void *location = curr->get_location();
1774 action_list_t *list = obj_map->get_safe_ptr(location);
1775 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1776 action_list_t::reverse_iterator rit;
1777 for (rit = list->rbegin(); rit != list->rend(); rit++)
1778 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1784 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1785 * location). This function identifies the mutex according to the current
1786 * action, which is presumed to perform on the same mutex.
1787 * @param curr The current ModelAction; also denotes the object location to
1789 * @return The last unlock operation
1791 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1793 void *location = curr->get_location();
1794 action_list_t *list = obj_map->get_safe_ptr(location);
1795 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1796 action_list_t::reverse_iterator rit;
1797 for (rit = list->rbegin(); rit != list->rend(); rit++)
1798 if ((*rit)->is_unlock() || (*rit)->is_wait())
1803 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1805 ModelAction *parent = get_last_action(tid);
1807 parent = get_thread(tid)->get_creation();
1812 * Returns the clock vector for a given thread.
1813 * @param tid The thread whose clock vector we want
1814 * @return Desired clock vector
1816 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1818 return get_parent_action(tid)->get_cv();
1822 * Resolve a set of Promises with a current write. The set is provided in the
1823 * Node corresponding to @a write.
1824 * @param write The ModelAction that is fulfilling Promises
1825 * @return True if promises were resolved; false otherwise
1827 bool ModelChecker::resolve_promises(ModelAction *write)
1829 bool resolved = false;
1830 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
1832 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1833 Promise *promise = (*promises)[promise_index];
1834 if (write->get_node()->get_promise(i)) {
1835 ModelAction *read = promise->get_action();
1836 if (read->is_rmw()) {
1837 mo_graph->addRMWEdge(write, read);
1839 read->read_from(write);
1840 //First fix up the modification order for actions that happened
1842 r_modification_order(read, write);
1843 //Next fix up the modification order for actions that happened
1845 post_r_modification_order(read, write);
1846 //Make sure the promise's value matches the write's value
1847 ASSERT(promise->get_value() == write->get_value());
1850 promises->erase(promises->begin() + promise_index);
1851 threads_to_check.push_back(read->get_tid());
1858 //Check whether reading these writes has made threads unable to
1861 for(unsigned int i=0;i<threads_to_check.size();i++)
1862 mo_check_promises(threads_to_check[i], write);
1868 * Compute the set of promises that could potentially be satisfied by this
1869 * action. Note that the set computation actually appears in the Node, not in
1871 * @param curr The ModelAction that may satisfy promises
1873 void ModelChecker::compute_promises(ModelAction *curr)
1875 for (unsigned int i = 0; i < promises->size(); i++) {
1876 Promise *promise = (*promises)[i];
1877 const ModelAction *act = promise->get_action();
1878 if (!act->happens_before(curr) &&
1880 !act->could_synchronize_with(curr) &&
1881 !act->same_thread(curr) &&
1882 act->get_location() == curr->get_location() &&
1883 promise->get_value() == curr->get_value()) {
1884 curr->get_node()->set_promise(i, act->is_rmw());
1889 /** Checks promises in response to change in ClockVector Threads. */
1890 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
1892 for (unsigned int i = 0; i < promises->size(); i++) {
1893 Promise *promise = (*promises)[i];
1894 const ModelAction *act = promise->get_action();
1895 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1896 merge_cv->synchronized_since(act)) {
1897 if (promise->increment_threads(tid)) {
1898 //Promise has failed
1899 failed_promise = true;
1906 void ModelChecker::check_promises_thread_disabled() {
1907 for (unsigned int i = 0; i < promises->size(); i++) {
1908 Promise *promise = (*promises)[i];
1909 if (promise->check_promise()) {
1910 failed_promise = true;
1916 /** Checks promises in response to addition to modification order for threads.
1918 * pthread is the thread that performed the read that created the promise
1920 * pread is the read that created the promise
1922 * pwrite is either the first write to same location as pread by
1923 * pthread that is sequenced after pread or the value read by the
1924 * first read to the same lcoation as pread by pthread that is
1925 * sequenced after pread..
1927 * 1. If tid=pthread, then we check what other threads are reachable
1928 * through the mode order starting with pwrite. Those threads cannot
1929 * perform a write that will resolve the promise due to modification
1930 * order constraints.
1932 * 2. If the tid is not pthread, we check whether pwrite can reach the
1933 * action write through the modification order. If so, that thread
1934 * cannot perform a future write that will resolve the promise due to
1935 * modificatin order constraints.
1937 * @parem tid The thread that either read from the model action
1938 * write, or actually did the model action write.
1940 * @parem write The ModelAction representing the relevant write.
1943 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
1944 void * location = write->get_location();
1945 for (unsigned int i = 0; i < promises->size(); i++) {
1946 Promise *promise = (*promises)[i];
1947 const ModelAction *act = promise->get_action();
1949 //Is this promise on the same location?
1950 if ( act->get_location() != location )
1953 //same thread as the promise
1954 if ( act->get_tid()==tid ) {
1956 //do we have a pwrite for the promise, if not, set it
1957 if (promise->get_write() == NULL ) {
1958 promise->set_write(write);
1959 //The pwrite cannot happen before the promise
1960 if (write->happens_before(act) && (write != act)) {
1961 failed_promise = true;
1965 if (mo_graph->checkPromise(write, promise)) {
1966 failed_promise = true;
1971 //Don't do any lookups twice for the same thread
1972 if (promise->has_sync_thread(tid))
1975 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
1976 if (promise->increment_threads(tid)) {
1977 failed_promise = true;
1985 * Compute the set of writes that may break the current pending release
1986 * sequence. This information is extracted from previou release sequence
1989 * @param curr The current ModelAction. Must be a release sequence fixup
1992 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
1994 if (pending_rel_seqs->empty())
1997 struct release_seq *pending = pending_rel_seqs->back();
1998 for (unsigned int i = 0; i < pending->writes.size(); i++) {
1999 const ModelAction *write = pending->writes[i];
2000 curr->get_node()->add_relseq_break(write);
2003 /* NULL means don't break the sequence; just synchronize */
2004 curr->get_node()->add_relseq_break(NULL);
2008 * Build up an initial set of all past writes that this 'read' action may read
2009 * from. This set is determined by the clock vector's "happens before"
2011 * @param curr is the current ModelAction that we are exploring; it must be a
2014 void ModelChecker::build_reads_from_past(ModelAction *curr)
2016 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
2018 ASSERT(curr->is_read());
2020 ModelAction *last_seq_cst = NULL;
2022 /* Track whether this object has been initialized */
2023 bool initialized = false;
2025 if (curr->is_seqcst()) {
2026 last_seq_cst = get_last_seq_cst(curr);
2027 /* We have to at least see the last sequentially consistent write,
2028 so we are initialized. */
2029 if (last_seq_cst != NULL)
2033 /* Iterate over all threads */
2034 for (i = 0; i < thrd_lists->size(); i++) {
2035 /* Iterate over actions in thread, starting from most recent */
2036 action_list_t *list = &(*thrd_lists)[i];
2037 action_list_t::reverse_iterator rit;
2038 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2039 ModelAction *act = *rit;
2041 /* Only consider 'write' actions */
2042 if (!act->is_write() || act == curr)
2045 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2046 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
2047 if (!curr->get_sleep_flag() || curr->is_seqcst() || sleep_can_read_from(curr, act)) {
2048 DEBUG("Adding action to may_read_from:\n");
2049 if (DBG_ENABLED()) {
2053 curr->get_node()->add_read_from(act);
2057 /* Include at most one act per-thread that "happens before" curr */
2058 if (act->happens_before(curr)) {
2066 /** @todo Need a more informative way of reporting errors. */
2067 printf("ERROR: may read from uninitialized atomic\n");
2071 if (DBG_ENABLED() || !initialized) {
2072 printf("Reached read action:\n");
2074 printf("Printing may_read_from\n");
2075 curr->get_node()->print_may_read_from();
2076 printf("End printing may_read_from\n");
2080 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2082 Node *prevnode=write->get_node()->get_parent();
2084 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2085 if (write->is_release()&&thread_sleep)
2087 if (!write->is_rmw()) {
2090 if (write->get_reads_from()==NULL)
2092 write=write->get_reads_from();
2096 static void print_list(action_list_t *list)
2098 action_list_t::iterator it;
2100 printf("---------------------------------------------------------------------\n");
2102 unsigned int hash=0;
2104 for (it = list->begin(); it != list->end(); it++) {
2106 hash=hash^(hash<<3)^((*it)->hash());
2108 printf("HASH %u\n", hash);
2109 printf("---------------------------------------------------------------------\n");
2112 #if SUPPORT_MOD_ORDER_DUMP
2113 void ModelChecker::dumpGraph(char *filename) {
2115 sprintf(buffer, "%s.dot",filename);
2116 FILE *file=fopen(buffer, "w");
2117 fprintf(file, "digraph %s {\n",filename);
2118 mo_graph->dumpNodes(file);
2119 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2121 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2122 ModelAction *action=*it;
2123 if (action->is_read()) {
2124 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2125 if (action->get_reads_from()!=NULL)
2126 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2128 if (thread_array[action->get_tid()] != NULL) {
2129 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2132 thread_array[action->get_tid()]=action;
2134 fprintf(file,"}\n");
2135 model_free(thread_array);
2140 void ModelChecker::print_summary()
2143 printf("Number of executions: %d\n", num_executions);
2144 printf("Number of feasible executions: %d\n", num_feasible_executions);
2145 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
2147 #if SUPPORT_MOD_ORDER_DUMP
2149 char buffername[100];
2150 sprintf(buffername, "exec%04u", num_executions);
2151 mo_graph->dumpGraphToFile(buffername);
2152 sprintf(buffername, "graph%04u", num_executions);
2153 dumpGraph(buffername);
2156 if (!isfinalfeasible())
2157 printf("INFEASIBLE EXECUTION!\n");
2158 print_list(action_trace);
2163 * Add a Thread to the system for the first time. Should only be called once
2165 * @param t The Thread to add
2167 void ModelChecker::add_thread(Thread *t)
2169 thread_map->put(id_to_int(t->get_id()), t);
2170 scheduler->add_thread(t);
2174 * Removes a thread from the scheduler.
2175 * @param the thread to remove.
2177 void ModelChecker::remove_thread(Thread *t)
2179 scheduler->remove_thread(t);
2183 * @brief Get a Thread reference by its ID
2184 * @param tid The Thread's ID
2185 * @return A Thread reference
2187 Thread * ModelChecker::get_thread(thread_id_t tid) const
2189 return thread_map->get(id_to_int(tid));
2193 * @brief Get a reference to the Thread in which a ModelAction was executed
2194 * @param act The ModelAction
2195 * @return A Thread reference
2197 Thread * ModelChecker::get_thread(ModelAction *act) const
2199 return get_thread(act->get_tid());
2203 * @brief Check if a Thread is currently enabled
2204 * @param t The Thread to check
2205 * @return True if the Thread is currently enabled
2207 bool ModelChecker::is_enabled(Thread *t) const
2209 return scheduler->is_enabled(t);
2213 * @brief Check if a Thread is currently enabled
2214 * @param tid The ID of the Thread to check
2215 * @return True if the Thread is currently enabled
2217 bool ModelChecker::is_enabled(thread_id_t tid) const
2219 return scheduler->is_enabled(tid);
2223 * Switch from a user-context to the "master thread" context (a.k.a. system
2224 * context). This switch is made with the intention of exploring a particular
2225 * model-checking action (described by a ModelAction object). Must be called
2226 * from a user-thread context.
2228 * @param act The current action that will be explored. May be NULL only if
2229 * trace is exiting via an assertion (see ModelChecker::set_assert and
2230 * ModelChecker::has_asserted).
2231 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2233 int ModelChecker::switch_to_master(ModelAction *act)
2236 Thread *old = thread_current();
2237 set_current_action(act);
2238 old->set_state(THREAD_READY);
2239 return Thread::swap(old, &system_context);
2243 * Takes the next step in the execution, if possible.
2244 * @return Returns true (success) if a step was taken and false otherwise.
2246 bool ModelChecker::take_step() {
2250 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2252 if (curr->get_state() == THREAD_READY) {
2253 ASSERT(priv->current_action);
2255 priv->nextThread = check_current_action(priv->current_action);
2256 priv->current_action = NULL;
2258 if (curr->is_blocked() || curr->is_complete())
2259 scheduler->remove_thread(curr);
2264 Thread *next = scheduler->next_thread(priv->nextThread);
2266 /* Infeasible -> don't take any more steps */
2270 if (params.bound != 0) {
2271 if (priv->used_sequence_numbers > params.bound) {
2276 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2277 next ? id_to_int(next->get_id()) : -1);
2280 * Launch end-of-execution release sequence fixups only when there are:
2282 * (1) no more user threads to run (or when execution replay chooses
2283 * the 'model_thread')
2284 * (2) pending release sequences
2285 * (3) pending assertions (i.e., data races)
2286 * (4) no pending promises
2288 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2289 isfinalfeasible() && !unrealizedraces.empty()) {
2290 printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2291 pending_rel_seqs->size());
2292 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2293 std::memory_order_seq_cst, NULL, VALUE_NONE,
2295 set_current_action(fixup);
2299 /* next == NULL -> don't take any more steps */
2303 next->set_state(THREAD_RUNNING);
2305 if (next->get_pending() != NULL) {
2306 /* restart a pending action */
2307 set_current_action(next->get_pending());
2308 next->set_pending(NULL);
2309 next->set_state(THREAD_READY);
2313 /* Return false only if swap fails with an error */
2314 return (Thread::swap(&system_context, next) == 0);
2317 /** Runs the current execution until threre are no more steps to take. */
2318 void ModelChecker::finish_execution() {
2321 while (take_step());