9 #include "snapshot-interface.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
15 #include "threads-model.h"
17 #define INITIAL_THREAD_ID 0
22 * Structure for holding small ModelChecker members that should be snapshotted
24 struct model_snapshot_members {
25 ModelAction *current_action;
26 unsigned int next_thread_id;
27 modelclock_t used_sequence_numbers;
29 ModelAction *next_backtrack;
32 /** @brief Constructor */
33 ModelChecker::ModelChecker(struct model_params params) :
34 /* Initialize default scheduler */
36 scheduler(new Scheduler()),
38 num_feasible_executions(0),
40 earliest_diverge(NULL),
41 action_trace(new action_list_t()),
42 thread_map(new HashTable<int, Thread *, int>()),
43 obj_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
44 lock_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
45 condvar_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
46 obj_thrd_map(new HashTable<void *, std::vector<action_list_t>, uintptr_t, 4 >()),
47 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
48 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
49 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
50 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
51 node_stack(new NodeStack()),
52 mo_graph(new CycleGraph()),
53 failed_promise(false),
54 too_many_reads(false),
56 bad_synchronization(false)
58 /* Allocate this "size" on the snapshotting heap */
59 priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
60 /* First thread created will have id INITIAL_THREAD_ID */
61 priv->next_thread_id = INITIAL_THREAD_ID;
63 /* Initialize a model-checker thread, for special ModelActions */
64 model_thread = new Thread(get_next_id());
65 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
68 /** @brief Destructor */
69 ModelChecker::~ModelChecker()
71 for (unsigned int i = 0; i < get_num_threads(); i++)
72 delete thread_map->get(i);
77 delete lock_waiters_map;
78 delete condvar_waiters_map;
81 for (unsigned int i = 0; i < promises->size(); i++)
82 delete (*promises)[i];
85 delete pending_rel_seqs;
87 delete thrd_last_action;
94 * Restores user program to initial state and resets all model-checker data
97 void ModelChecker::reset_to_initial_state()
99 DEBUG("+++ Resetting to initial state +++\n");
100 node_stack->reset_execution();
101 failed_promise = false;
102 too_many_reads = false;
103 bad_synchronization = false;
105 snapshotObject->backTrackBeforeStep(0);
108 /** @return a thread ID for a new Thread */
109 thread_id_t ModelChecker::get_next_id()
111 return priv->next_thread_id++;
114 /** @return the number of user threads created during this execution */
115 unsigned int ModelChecker::get_num_threads() const
117 return priv->next_thread_id;
120 /** @return The currently executing Thread. */
121 Thread * ModelChecker::get_current_thread()
123 return scheduler->get_current_thread();
126 /** @return a sequence number for a new ModelAction */
127 modelclock_t ModelChecker::get_next_seq_num()
129 return ++priv->used_sequence_numbers;
132 Node * ModelChecker::get_curr_node() {
133 return node_stack->get_head();
137 * @brief Choose the next thread to execute.
139 * This function chooses the next thread that should execute. It can force the
140 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
141 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
142 * The model-checker may have no preference regarding the next thread (i.e.,
143 * when exploring a new execution ordering), in which case this will return
145 * @param curr The current ModelAction. This action might guide the choice of
147 * @return The next thread to run. If the model-checker has no preference, NULL.
149 Thread * ModelChecker::get_next_thread(ModelAction *curr)
154 /* Do not split atomic actions. */
156 return thread_current();
157 /* The THREAD_CREATE action points to the created Thread */
158 else if (curr->get_type() == THREAD_CREATE)
159 return (Thread *)curr->get_location();
162 /* Have we completed exploring the preselected path? */
166 /* Else, we are trying to replay an execution */
167 ModelAction *next = node_stack->get_next()->get_action();
169 if (next == diverge) {
170 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
171 earliest_diverge=diverge;
173 Node *nextnode = next->get_node();
174 Node *prevnode = nextnode->get_parent();
175 scheduler->update_sleep_set(prevnode);
177 /* Reached divergence point */
178 if (nextnode->increment_misc()) {
179 /* The next node will try to satisfy a different misc_index values. */
180 tid = next->get_tid();
181 node_stack->pop_restofstack(2);
182 } else if (nextnode->increment_promise()) {
183 /* The next node will try to satisfy a different set of promises. */
184 tid = next->get_tid();
185 node_stack->pop_restofstack(2);
186 } else if (nextnode->increment_read_from()) {
187 /* The next node will read from a different value. */
188 tid = next->get_tid();
189 node_stack->pop_restofstack(2);
190 } else if (nextnode->increment_future_value()) {
191 /* The next node will try to read from a different future value. */
192 tid = next->get_tid();
193 node_stack->pop_restofstack(2);
194 } else if (nextnode->increment_relseq_break()) {
195 /* The next node will try to resolve a release sequence differently */
196 tid = next->get_tid();
197 node_stack->pop_restofstack(2);
199 /* Make a different thread execute for next step */
200 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
201 tid = prevnode->get_next_backtrack();
202 /* Make sure the backtracked thread isn't sleeping. */
203 node_stack->pop_restofstack(1);
204 if (diverge==earliest_diverge) {
205 earliest_diverge=prevnode->get_action();
208 /* The correct sleep set is in the parent node. */
211 DEBUG("*** Divergence point ***\n");
215 tid = next->get_tid();
217 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
218 ASSERT(tid != THREAD_ID_T_NONE);
219 return thread_map->get(id_to_int(tid));
223 * We need to know what the next actions of all threads in the sleep
224 * set will be. This method computes them and stores the actions at
225 * the corresponding thread object's pending action.
228 void ModelChecker::execute_sleep_set() {
229 for(unsigned int i=0;i<get_num_threads();i++) {
230 thread_id_t tid=int_to_id(i);
231 Thread *thr=get_thread(tid);
232 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
233 thr->get_pending() == NULL ) {
234 thr->set_state(THREAD_RUNNING);
235 scheduler->next_thread(thr);
236 Thread::swap(&system_context, thr);
237 priv->current_action->set_sleep_flag();
238 thr->set_pending(priv->current_action);
241 priv->current_action = NULL;
244 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
245 for(unsigned int i=0;i<get_num_threads();i++) {
246 thread_id_t tid=int_to_id(i);
247 Thread *thr=get_thread(tid);
248 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
249 ModelAction *pending_act=thr->get_pending();
250 if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
251 //Remove this thread from sleep set
252 scheduler->remove_sleep(thr);
259 * Check if we are in a deadlock. Should only be called at the end of an
260 * execution, although it should not give false positives in the middle of an
261 * execution (there should be some ENABLED thread).
263 * @return True if program is in a deadlock; false otherwise
265 bool ModelChecker::is_deadlocked() const
267 bool blocking_threads = false;
268 for (unsigned int i = 0; i < get_num_threads(); i++) {
269 Thread *t = get_thread(int_to_id(i));
270 if (scheduler->is_enabled(t) != THREAD_DISABLED)
272 else if (!t->is_model_thread() && t->get_pending())
273 blocking_threads = true;
275 return blocking_threads;
279 * Queries the model-checker for more executions to explore and, if one
280 * exists, resets the model-checker state to execute a new execution.
282 * @return If there are more executions to explore, return true. Otherwise,
285 bool ModelChecker::next_execution()
292 printf("ERROR: DEADLOCK\n");
293 if (isfinalfeasible()) {
294 printf("Earliest divergence point since last feasible execution:\n");
295 if (earliest_diverge)
296 earliest_diverge->print();
298 printf("(Not set)\n");
300 earliest_diverge = NULL;
301 num_feasible_executions++;
304 DEBUG("Number of acquires waiting on pending release sequences: %zu\n",
305 pending_rel_seqs->size());
308 if (isfinalfeasible() || (params.bound != 0 && priv->used_sequence_numbers > params.bound ) || DBG_ENABLED() ) {
313 if ((diverge = get_next_backtrack()) == NULL)
317 printf("Next execution will diverge at:\n");
321 reset_to_initial_state();
325 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
327 switch (act->get_type()) {
331 /* linear search: from most recent to oldest */
332 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
333 action_list_t::reverse_iterator rit;
334 for (rit = list->rbegin(); rit != list->rend(); rit++) {
335 ModelAction *prev = *rit;
336 if (prev->could_synchronize_with(act))
342 case ATOMIC_TRYLOCK: {
343 /* linear search: from most recent to oldest */
344 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
345 action_list_t::reverse_iterator rit;
346 for (rit = list->rbegin(); rit != list->rend(); rit++) {
347 ModelAction *prev = *rit;
348 if (act->is_conflicting_lock(prev))
353 case ATOMIC_UNLOCK: {
354 /* linear search: from most recent to oldest */
355 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
356 action_list_t::reverse_iterator rit;
357 for (rit = list->rbegin(); rit != list->rend(); rit++) {
358 ModelAction *prev = *rit;
359 if (!act->same_thread(prev)&&prev->is_failed_trylock())
365 /* linear search: from most recent to oldest */
366 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
367 action_list_t::reverse_iterator rit;
368 for (rit = list->rbegin(); rit != list->rend(); rit++) {
369 ModelAction *prev = *rit;
370 if (!act->same_thread(prev)&&prev->is_failed_trylock())
372 if (!act->same_thread(prev)&&prev->is_notify())
378 case ATOMIC_NOTIFY_ALL:
379 case ATOMIC_NOTIFY_ONE: {
380 /* linear search: from most recent to oldest */
381 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
382 action_list_t::reverse_iterator rit;
383 for (rit = list->rbegin(); rit != list->rend(); rit++) {
384 ModelAction *prev = *rit;
385 if (!act->same_thread(prev)&&prev->is_wait())
396 /** This method finds backtracking points where we should try to
397 * reorder the parameter ModelAction against.
399 * @param the ModelAction to find backtracking points for.
401 void ModelChecker::set_backtracking(ModelAction *act)
403 Thread *t = get_thread(act);
404 ModelAction * prev = get_last_conflict(act);
408 Node * node = prev->get_node()->get_parent();
410 int low_tid, high_tid;
411 if (node->is_enabled(t)) {
412 low_tid = id_to_int(act->get_tid());
413 high_tid = low_tid+1;
416 high_tid = get_num_threads();
419 for(int i = low_tid; i < high_tid; i++) {
420 thread_id_t tid = int_to_id(i);
422 /* Make sure this thread can be enabled here. */
423 if (i >= node->get_num_threads())
426 /* Don't backtrack into a point where the thread is disabled or sleeping. */
427 if (node->enabled_status(tid)!=THREAD_ENABLED)
430 /* Check if this has been explored already */
431 if (node->has_been_explored(tid))
434 /* See if fairness allows */
435 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
437 for(int t=0;t<node->get_num_threads();t++) {
438 thread_id_t tother=int_to_id(t);
439 if (node->is_enabled(tother) && node->has_priority(tother)) {
447 /* Cache the latest backtracking point */
448 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
449 priv->next_backtrack = prev;
451 /* If this is a new backtracking point, mark the tree */
452 if (!node->set_backtrack(tid))
454 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
455 id_to_int(prev->get_tid()),
456 id_to_int(t->get_id()));
465 * Returns last backtracking point. The model checker will explore a different
466 * path for this point in the next execution.
467 * @return The ModelAction at which the next execution should diverge.
469 ModelAction * ModelChecker::get_next_backtrack()
471 ModelAction *next = priv->next_backtrack;
472 priv->next_backtrack = NULL;
477 * Processes a read or rmw model action.
478 * @param curr is the read model action to process.
479 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
480 * @return True if processing this read updates the mo_graph.
482 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
484 uint64_t value = VALUE_NONE;
485 bool updated = false;
487 const ModelAction *reads_from = curr->get_node()->get_read_from();
488 if (reads_from != NULL) {
489 mo_graph->startChanges();
491 value = reads_from->get_value();
492 bool r_status = false;
494 if (!second_part_of_rmw) {
495 check_recency(curr, reads_from);
496 r_status = r_modification_order(curr, reads_from);
500 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
501 mo_graph->rollbackChanges();
502 too_many_reads = false;
506 curr->read_from(reads_from);
507 mo_graph->commitChanges();
508 mo_check_promises(curr->get_tid(), reads_from);
511 } else if (!second_part_of_rmw) {
512 /* Read from future value */
513 value = curr->get_node()->get_future_value();
514 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
515 curr->read_from(NULL);
516 Promise *valuepromise = new Promise(curr, value, expiration);
517 promises->push_back(valuepromise);
519 get_thread(curr)->set_return_value(value);
525 * Processes a lock, trylock, or unlock model action. @param curr is
526 * the read model action to process.
528 * The try lock operation checks whether the lock is taken. If not,
529 * it falls to the normal lock operation case. If so, it returns
532 * The lock operation has already been checked that it is enabled, so
533 * it just grabs the lock and synchronizes with the previous unlock.
535 * The unlock operation has to re-enable all of the threads that are
536 * waiting on the lock.
538 * @return True if synchronization was updated; false otherwise
540 bool ModelChecker::process_mutex(ModelAction *curr) {
541 std::mutex *mutex=NULL;
542 struct std::mutex_state *state=NULL;
544 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
545 mutex = (std::mutex *)curr->get_location();
546 state = mutex->get_state();
547 } else if(curr->is_wait()) {
548 mutex = (std::mutex *)curr->get_value();
549 state = mutex->get_state();
552 switch (curr->get_type()) {
553 case ATOMIC_TRYLOCK: {
554 bool success = !state->islocked;
555 curr->set_try_lock(success);
557 get_thread(curr)->set_return_value(0);
560 get_thread(curr)->set_return_value(1);
562 //otherwise fall into the lock case
564 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
565 printf("Lock access before initialization\n");
568 state->islocked = true;
569 ModelAction *unlock = get_last_unlock(curr);
570 //synchronize with the previous unlock statement
571 if (unlock != NULL) {
572 curr->synchronize_with(unlock);
577 case ATOMIC_UNLOCK: {
579 state->islocked = false;
580 //wake up the other threads
581 action_list_t *waiters = lock_waiters_map->get_safe_ptr(curr->get_location());
582 //activate all the waiting threads
583 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
584 scheduler->wake(get_thread(*rit));
591 state->islocked = false;
592 //wake up the other threads
593 action_list_t *waiters = lock_waiters_map->get_safe_ptr((void *) curr->get_value());
594 //activate all the waiting threads
595 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
596 scheduler->wake(get_thread(*rit));
599 //check whether we should go to sleep or not...simulate spurious failures
600 if (curr->get_node()->get_misc()==0) {
601 condvar_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
603 scheduler->sleep(get_current_thread());
607 case ATOMIC_NOTIFY_ALL: {
608 action_list_t *waiters = condvar_waiters_map->get_safe_ptr(curr->get_location());
609 //activate all the waiting threads
610 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
611 scheduler->wake(get_thread(*rit));
616 case ATOMIC_NOTIFY_ONE: {
617 action_list_t *waiters = condvar_waiters_map->get_safe_ptr(curr->get_location());
618 int wakeupthread=curr->get_node()->get_misc();
619 action_list_t::iterator it = waiters->begin();
620 advance(it, wakeupthread);
621 scheduler->wake(get_thread(*it));
633 * Process a write ModelAction
634 * @param curr The ModelAction to process
635 * @return True if the mo_graph was updated or promises were resolved
637 bool ModelChecker::process_write(ModelAction *curr)
639 bool updated_mod_order = w_modification_order(curr);
640 bool updated_promises = resolve_promises(curr);
642 if (promises->size() == 0) {
643 for (unsigned int i = 0; i < futurevalues->size(); i++) {
644 struct PendingFutureValue pfv = (*futurevalues)[i];
645 //Do more ambitious checks now that mo is more complete
646 if (mo_may_allow(pfv.writer, pfv.act)&&
647 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
648 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
649 priv->next_backtrack = pfv.act;
651 futurevalues->resize(0);
654 mo_graph->commitChanges();
655 mo_check_promises(curr->get_tid(), curr);
657 get_thread(curr)->set_return_value(VALUE_NONE);
658 return updated_mod_order || updated_promises;
662 * @brief Process the current action for thread-related activity
664 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
665 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
666 * synchronization, etc. This function is a no-op for non-THREAD actions
667 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
669 * @param curr The current action
670 * @return True if synchronization was updated or a thread completed
672 bool ModelChecker::process_thread_action(ModelAction *curr)
674 bool updated = false;
676 switch (curr->get_type()) {
677 case THREAD_CREATE: {
678 Thread *th = (Thread *)curr->get_location();
679 th->set_creation(curr);
683 Thread *blocking = (Thread *)curr->get_location();
684 ModelAction *act = get_last_action(blocking->get_id());
685 curr->synchronize_with(act);
686 updated = true; /* trigger rel-seq checks */
689 case THREAD_FINISH: {
690 Thread *th = get_thread(curr);
691 while (!th->wait_list_empty()) {
692 ModelAction *act = th->pop_wait_list();
693 scheduler->wake(get_thread(act));
696 updated = true; /* trigger rel-seq checks */
700 check_promises(curr->get_tid(), NULL, curr->get_cv());
711 * @brief Process the current action for release sequence fixup activity
713 * Performs model-checker release sequence fixups for the current action,
714 * forcing a single pending release sequence to break (with a given, potential
715 * "loose" write) or to complete (i.e., synchronize). If a pending release
716 * sequence forms a complete release sequence, then we must perform the fixup
717 * synchronization, mo_graph additions, etc.
719 * @param curr The current action; must be a release sequence fixup action
720 * @param work_queue The work queue to which to add work items as they are
723 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
725 const ModelAction *write = curr->get_node()->get_relseq_break();
726 struct release_seq *sequence = pending_rel_seqs->back();
727 pending_rel_seqs->pop_back();
729 ModelAction *acquire = sequence->acquire;
730 const ModelAction *rf = sequence->rf;
731 const ModelAction *release = sequence->release;
735 ASSERT(release->same_thread(rf));
739 * @todo Forcing a synchronization requires that we set
740 * modification order constraints. For instance, we can't allow
741 * a fixup sequence in which two separate read-acquire
742 * operations read from the same sequence, where the first one
743 * synchronizes and the other doesn't. Essentially, we can't
744 * allow any writes to insert themselves between 'release' and
748 /* Must synchronize */
749 if (!acquire->synchronize_with(release)) {
750 set_bad_synchronization();
753 /* Re-check all pending release sequences */
754 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
755 /* Re-check act for mo_graph edges */
756 work_queue->push_back(MOEdgeWorkEntry(acquire));
758 /* propagate synchronization to later actions */
759 action_list_t::reverse_iterator rit = action_trace->rbegin();
760 for (; (*rit) != acquire; rit++) {
761 ModelAction *propagate = *rit;
762 if (acquire->happens_before(propagate)) {
763 propagate->synchronize_with(acquire);
764 /* Re-check 'propagate' for mo_graph edges */
765 work_queue->push_back(MOEdgeWorkEntry(propagate));
769 /* Break release sequence with new edges:
770 * release --mo--> write --mo--> rf */
771 mo_graph->addEdge(release, write);
772 mo_graph->addEdge(write, rf);
775 /* See if we have realized a data race */
776 if (checkDataRaces())
781 * Initialize the current action by performing one or more of the following
782 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
783 * in the NodeStack, manipulating backtracking sets, allocating and
784 * initializing clock vectors, and computing the promises to fulfill.
786 * @param curr The current action, as passed from the user context; may be
787 * freed/invalidated after the execution of this function, with a different
788 * action "returned" its place (pass-by-reference)
789 * @return True if curr is a newly-explored action; false otherwise
791 bool ModelChecker::initialize_curr_action(ModelAction **curr)
793 ModelAction *newcurr;
795 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
796 newcurr = process_rmw(*curr);
799 if (newcurr->is_rmw())
800 compute_promises(newcurr);
806 (*curr)->set_seq_number(get_next_seq_num());
808 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled());
810 /* First restore type and order in case of RMW operation */
811 if ((*curr)->is_rmwr())
812 newcurr->copy_typeandorder(*curr);
814 ASSERT((*curr)->get_location() == newcurr->get_location());
815 newcurr->copy_from_new(*curr);
817 /* Discard duplicate ModelAction; use action from NodeStack */
820 /* Always compute new clock vector */
821 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
824 return false; /* Action was explored previously */
828 /* Always compute new clock vector */
829 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
831 * Perform one-time actions when pushing new ModelAction onto
834 if (newcurr->is_write())
835 compute_promises(newcurr);
836 else if (newcurr->is_relseq_fixup())
837 compute_relseq_breakwrites(newcurr);
838 else if (newcurr->is_wait())
839 newcurr->get_node()->set_misc_max(2);
840 else if (newcurr->is_notify_one()) {
841 newcurr->get_node()->set_misc_max(condvar_waiters_map->get_safe_ptr(newcurr->get_location())->size());
843 return true; /* This was a new ModelAction */
848 * @brief Check whether a model action is enabled.
850 * Checks whether a lock or join operation would be successful (i.e., is the
851 * lock already locked, or is the joined thread already complete). If not, put
852 * the action in a waiter list.
854 * @param curr is the ModelAction to check whether it is enabled.
855 * @return a bool that indicates whether the action is enabled.
857 bool ModelChecker::check_action_enabled(ModelAction *curr) {
858 if (curr->is_lock()) {
859 std::mutex * lock = (std::mutex *)curr->get_location();
860 struct std::mutex_state * state = lock->get_state();
861 if (state->islocked) {
862 //Stick the action in the appropriate waiting queue
863 lock_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
866 } else if (curr->get_type() == THREAD_JOIN) {
867 Thread *blocking = (Thread *)curr->get_location();
868 if (!blocking->is_complete()) {
869 blocking->push_wait_list(curr);
878 * Stores the ModelAction for the current thread action. Call this
879 * immediately before switching from user- to system-context to pass
881 * @param act The ModelAction created by the user-thread action
883 void ModelChecker::set_current_action(ModelAction *act) {
884 priv->current_action = act;
888 * This is the heart of the model checker routine. It performs model-checking
889 * actions corresponding to a given "current action." Among other processes, it
890 * calculates reads-from relationships, updates synchronization clock vectors,
891 * forms a memory_order constraints graph, and handles replay/backtrack
892 * execution when running permutations of previously-observed executions.
894 * @param curr The current action to process
895 * @return The next Thread that must be executed. May be NULL if ModelChecker
896 * makes no choice (e.g., according to replay execution, combining RMW actions,
899 Thread * ModelChecker::check_current_action(ModelAction *curr)
902 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
904 if (!check_action_enabled(curr)) {
905 /* Make the execution look like we chose to run this action
906 * much later, when a lock/join can succeed */
907 get_current_thread()->set_pending(curr);
908 scheduler->sleep(get_current_thread());
909 return get_next_thread(NULL);
912 bool newly_explored = initialize_curr_action(&curr);
914 wake_up_sleeping_actions(curr);
916 /* Add the action to lists before any other model-checking tasks */
917 if (!second_part_of_rmw)
918 add_action_to_lists(curr);
920 /* Build may_read_from set for newly-created actions */
921 if (newly_explored && curr->is_read())
922 build_reads_from_past(curr);
924 /* Initialize work_queue with the "current action" work */
925 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
926 while (!work_queue.empty()) {
927 WorkQueueEntry work = work_queue.front();
928 work_queue.pop_front();
931 case WORK_CHECK_CURR_ACTION: {
932 ModelAction *act = work.action;
933 bool update = false; /* update this location's release seq's */
934 bool update_all = false; /* update all release seq's */
936 if (process_thread_action(curr))
939 if (act->is_read() && process_read(act, second_part_of_rmw))
942 if (act->is_write() && process_write(act))
945 if (act->is_mutex_op() && process_mutex(act))
948 if (act->is_relseq_fixup())
949 process_relseq_fixup(curr, &work_queue);
952 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
954 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
957 case WORK_CHECK_RELEASE_SEQ:
958 resolve_release_sequences(work.location, &work_queue);
960 case WORK_CHECK_MO_EDGES: {
961 /** @todo Complete verification of work_queue */
962 ModelAction *act = work.action;
963 bool updated = false;
965 if (act->is_read()) {
966 const ModelAction *rf = act->get_reads_from();
967 if (rf != NULL && r_modification_order(act, rf))
970 if (act->is_write()) {
971 if (w_modification_order(act))
974 mo_graph->commitChanges();
977 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
986 check_curr_backtracking(curr);
987 set_backtracking(curr);
988 return get_next_thread(curr);
991 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
992 Node *currnode = curr->get_node();
993 Node *parnode = currnode->get_parent();
995 if ((!parnode->backtrack_empty() ||
996 !currnode->misc_empty() ||
997 !currnode->read_from_empty() ||
998 !currnode->future_value_empty() ||
999 !currnode->promise_empty() ||
1000 !currnode->relseq_break_empty())
1001 && (!priv->next_backtrack ||
1002 *curr > *priv->next_backtrack)) {
1003 priv->next_backtrack = curr;
1007 bool ModelChecker::promises_expired() {
1008 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1009 Promise *promise = (*promises)[promise_index];
1010 if (promise->get_expiration()<priv->used_sequence_numbers) {
1017 /** @return whether the current partial trace must be a prefix of a
1018 * feasible trace. */
1019 bool ModelChecker::isfeasibleprefix() {
1020 return promises->size() == 0 && pending_rel_seqs->size() == 0 && isfeasible();
1023 /** @return whether the current partial trace is feasible. */
1024 bool ModelChecker::isfeasible() {
1025 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1026 DEBUG("Infeasible: RMW violation\n");
1028 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
1031 /** @return whether the current partial trace is feasible other than
1032 * multiple RMW reading from the same store. */
1033 bool ModelChecker::isfeasibleotherthanRMW() {
1034 if (DBG_ENABLED()) {
1035 if (mo_graph->checkForCycles())
1036 DEBUG("Infeasible: modification order cycles\n");
1038 DEBUG("Infeasible: failed promise\n");
1040 DEBUG("Infeasible: too many reads\n");
1041 if (bad_synchronization)
1042 DEBUG("Infeasible: bad synchronization ordering\n");
1043 if (promises_expired())
1044 DEBUG("Infeasible: promises expired\n");
1046 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
1049 /** Returns whether the current completed trace is feasible. */
1050 bool ModelChecker::isfinalfeasible() {
1051 if (DBG_ENABLED() && promises->size() != 0)
1052 DEBUG("Infeasible: unrevolved promises\n");
1054 return isfeasible() && promises->size() == 0;
1057 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1058 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1059 ModelAction *lastread = get_last_action(act->get_tid());
1060 lastread->process_rmw(act);
1061 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1062 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1063 mo_graph->commitChanges();
1069 * Checks whether a thread has read from the same write for too many times
1070 * without seeing the effects of a later write.
1073 * 1) there must a different write that we could read from that would satisfy the modification order,
1074 * 2) we must have read from the same value in excess of maxreads times, and
1075 * 3) that other write must have been in the reads_from set for maxreads times.
1077 * If so, we decide that the execution is no longer feasible.
1079 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1080 if (params.maxreads != 0) {
1082 if (curr->get_node()->get_read_from_size() <= 1)
1084 //Must make sure that execution is currently feasible... We could
1085 //accidentally clear by rolling back
1088 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1089 int tid = id_to_int(curr->get_tid());
1092 if ((int)thrd_lists->size() <= tid)
1094 action_list_t *list = &(*thrd_lists)[tid];
1096 action_list_t::reverse_iterator rit = list->rbegin();
1097 /* Skip past curr */
1098 for (; (*rit) != curr; rit++)
1100 /* go past curr now */
1103 action_list_t::reverse_iterator ritcopy = rit;
1104 //See if we have enough reads from the same value
1106 for (; count < params.maxreads; rit++,count++) {
1107 if (rit==list->rend())
1109 ModelAction *act = *rit;
1110 if (!act->is_read())
1113 if (act->get_reads_from() != rf)
1115 if (act->get_node()->get_read_from_size() <= 1)
1118 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1120 const ModelAction * write = curr->get_node()->get_read_from_at(i);
1122 //Need a different write
1126 /* Test to see whether this is a feasible write to read from*/
1127 mo_graph->startChanges();
1128 r_modification_order(curr, write);
1129 bool feasiblereadfrom = isfeasible();
1130 mo_graph->rollbackChanges();
1132 if (!feasiblereadfrom)
1136 bool feasiblewrite = true;
1137 //new we need to see if this write works for everyone
1139 for (int loop = count; loop>0; loop--,rit++) {
1140 ModelAction *act=*rit;
1141 bool foundvalue = false;
1142 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1143 if (act->get_node()->get_read_from_at(j)==write) {
1149 feasiblewrite = false;
1153 if (feasiblewrite) {
1154 too_many_reads = true;
1162 * Updates the mo_graph with the constraints imposed from the current
1165 * Basic idea is the following: Go through each other thread and find
1166 * the lastest action that happened before our read. Two cases:
1168 * (1) The action is a write => that write must either occur before
1169 * the write we read from or be the write we read from.
1171 * (2) The action is a read => the write that that action read from
1172 * must occur before the write we read from or be the same write.
1174 * @param curr The current action. Must be a read.
1175 * @param rf The action that curr reads from. Must be a write.
1176 * @return True if modification order edges were added; false otherwise
1178 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1180 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1183 ASSERT(curr->is_read());
1185 /* Iterate over all threads */
1186 for (i = 0; i < thrd_lists->size(); i++) {
1187 /* Iterate over actions in thread, starting from most recent */
1188 action_list_t *list = &(*thrd_lists)[i];
1189 action_list_t::reverse_iterator rit;
1190 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1191 ModelAction *act = *rit;
1194 * Include at most one act per-thread that "happens
1195 * before" curr. Don't consider reflexively.
1197 if (act->happens_before(curr) && act != curr) {
1198 if (act->is_write()) {
1200 mo_graph->addEdge(act, rf);
1204 const ModelAction *prevreadfrom = act->get_reads_from();
1205 //if the previous read is unresolved, keep going...
1206 if (prevreadfrom == NULL)
1209 if (rf != prevreadfrom) {
1210 mo_graph->addEdge(prevreadfrom, rf);
1222 /** This method fixes up the modification order when we resolve a
1223 * promises. The basic problem is that actions that occur after the
1224 * read curr could not property add items to the modification order
1227 * So for each thread, we find the earliest item that happens after
1228 * the read curr. This is the item we have to fix up with additional
1229 * constraints. If that action is write, we add a MO edge between
1230 * the Action rf and that action. If the action is a read, we add a
1231 * MO edge between the Action rf, and whatever the read accessed.
1233 * @param curr is the read ModelAction that we are fixing up MO edges for.
1234 * @param rf is the write ModelAction that curr reads from.
1237 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1239 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1241 ASSERT(curr->is_read());
1243 /* Iterate over all threads */
1244 for (i = 0; i < thrd_lists->size(); i++) {
1245 /* Iterate over actions in thread, starting from most recent */
1246 action_list_t *list = &(*thrd_lists)[i];
1247 action_list_t::reverse_iterator rit;
1248 ModelAction *lastact = NULL;
1250 /* Find last action that happens after curr that is either not curr or a rmw */
1251 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1252 ModelAction *act = *rit;
1253 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1259 /* Include at most one act per-thread that "happens before" curr */
1260 if (lastact != NULL) {
1261 if (lastact==curr) {
1262 //Case 1: The resolved read is a RMW, and we need to make sure
1263 //that the write portion of the RMW mod order after rf
1265 mo_graph->addEdge(rf, lastact);
1266 } else if (lastact->is_read()) {
1267 //Case 2: The resolved read is a normal read and the next
1268 //operation is a read, and we need to make sure the value read
1269 //is mod ordered after rf
1271 const ModelAction *postreadfrom = lastact->get_reads_from();
1272 if (postreadfrom != NULL&&rf != postreadfrom)
1273 mo_graph->addEdge(rf, postreadfrom);
1275 //Case 3: The resolved read is a normal read and the next
1276 //operation is a write, and we need to make sure that the
1277 //write is mod ordered after rf
1279 mo_graph->addEdge(rf, lastact);
1287 * Updates the mo_graph with the constraints imposed from the current write.
1289 * Basic idea is the following: Go through each other thread and find
1290 * the lastest action that happened before our write. Two cases:
1292 * (1) The action is a write => that write must occur before
1295 * (2) The action is a read => the write that that action read from
1296 * must occur before the current write.
1298 * This method also handles two other issues:
1300 * (I) Sequential Consistency: Making sure that if the current write is
1301 * seq_cst, that it occurs after the previous seq_cst write.
1303 * (II) Sending the write back to non-synchronizing reads.
1305 * @param curr The current action. Must be a write.
1306 * @return True if modification order edges were added; false otherwise
1308 bool ModelChecker::w_modification_order(ModelAction *curr)
1310 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1313 ASSERT(curr->is_write());
1315 if (curr->is_seqcst()) {
1316 /* We have to at least see the last sequentially consistent write,
1317 so we are initialized. */
1318 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1319 if (last_seq_cst != NULL) {
1320 mo_graph->addEdge(last_seq_cst, curr);
1325 /* Iterate over all threads */
1326 for (i = 0; i < thrd_lists->size(); i++) {
1327 /* Iterate over actions in thread, starting from most recent */
1328 action_list_t *list = &(*thrd_lists)[i];
1329 action_list_t::reverse_iterator rit;
1330 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1331 ModelAction *act = *rit;
1334 * 1) If RMW and it actually read from something, then we
1335 * already have all relevant edges, so just skip to next
1338 * 2) If RMW and it didn't read from anything, we should
1339 * whatever edge we can get to speed up convergence.
1341 * 3) If normal write, we need to look at earlier actions, so
1342 * continue processing list.
1344 if (curr->is_rmw()) {
1345 if (curr->get_reads_from()!=NULL)
1354 * Include at most one act per-thread that "happens
1357 if (act->happens_before(curr)) {
1359 * Note: if act is RMW, just add edge:
1361 * The following edge should be handled elsewhere:
1362 * readfrom(act) --mo--> act
1364 if (act->is_write())
1365 mo_graph->addEdge(act, curr);
1366 else if (act->is_read()) {
1367 //if previous read accessed a null, just keep going
1368 if (act->get_reads_from() == NULL)
1370 mo_graph->addEdge(act->get_reads_from(), curr);
1374 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1375 !act->same_thread(curr)) {
1376 /* We have an action that:
1377 (1) did not happen before us
1378 (2) is a read and we are a write
1379 (3) cannot synchronize with us
1380 (4) is in a different thread
1382 that read could potentially read from our write. Note that
1383 these checks are overly conservative at this point, we'll
1384 do more checks before actually removing the
1388 if (thin_air_constraint_may_allow(curr, act)) {
1390 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1391 struct PendingFutureValue pfv = {curr,act};
1392 futurevalues->push_back(pfv);
1402 /** Arbitrary reads from the future are not allowed. Section 29.3
1403 * part 9 places some constraints. This method checks one result of constraint
1404 * constraint. Others require compiler support. */
1405 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1406 if (!writer->is_rmw())
1409 if (!reader->is_rmw())
1412 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1413 if (search == reader)
1415 if (search->get_tid() == reader->get_tid() &&
1416 search->happens_before(reader))
1424 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1425 * some constraints. This method checks one the following constraint (others
1426 * require compiler support):
1428 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1430 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1432 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(reader->get_location());
1434 /* Iterate over all threads */
1435 for (i = 0; i < thrd_lists->size(); i++) {
1436 const ModelAction *write_after_read = NULL;
1438 /* Iterate over actions in thread, starting from most recent */
1439 action_list_t *list = &(*thrd_lists)[i];
1440 action_list_t::reverse_iterator rit;
1441 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1442 ModelAction *act = *rit;
1444 if (!reader->happens_before(act))
1446 else if (act->is_write())
1447 write_after_read = act;
1448 else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1449 write_after_read = act->get_reads_from();
1453 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1460 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1461 * The ModelAction under consideration is expected to be taking part in
1462 * release/acquire synchronization as an object of the "reads from" relation.
1463 * Note that this can only provide release sequence support for RMW chains
1464 * which do not read from the future, as those actions cannot be traced until
1465 * their "promise" is fulfilled. Similarly, we may not even establish the
1466 * presence of a release sequence with certainty, as some modification order
1467 * constraints may be decided further in the future. Thus, this function
1468 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1469 * and a boolean representing certainty.
1471 * @param rf The action that might be part of a release sequence. Must be a
1473 * @param release_heads A pass-by-reference style return parameter. After
1474 * execution of this function, release_heads will contain the heads of all the
1475 * relevant release sequences, if any exists with certainty
1476 * @param pending A pass-by-reference style return parameter which is only used
1477 * when returning false (i.e., uncertain). Returns most information regarding
1478 * an uncertain release sequence, including any write operations that might
1479 * break the sequence.
1480 * @return true, if the ModelChecker is certain that release_heads is complete;
1483 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1484 rel_heads_list_t *release_heads,
1485 struct release_seq *pending) const
1487 /* Only check for release sequences if there are no cycles */
1488 if (mo_graph->checkForCycles())
1492 ASSERT(rf->is_write());
1494 if (rf->is_release())
1495 release_heads->push_back(rf);
1497 break; /* End of RMW chain */
1499 /** @todo Need to be smarter here... In the linux lock
1500 * example, this will run to the beginning of the program for
1502 /** @todo The way to be smarter here is to keep going until 1
1503 * thread has a release preceded by an acquire and you've seen
1506 /* acq_rel RMW is a sufficient stopping condition */
1507 if (rf->is_acquire() && rf->is_release())
1508 return true; /* complete */
1510 rf = rf->get_reads_from();
1513 /* read from future: need to settle this later */
1515 return false; /* incomplete */
1518 if (rf->is_release())
1519 return true; /* complete */
1521 /* else relaxed write; check modification order for contiguous subsequence
1522 * -> rf must be same thread as release */
1523 int tid = id_to_int(rf->get_tid());
1524 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(rf->get_location());
1525 action_list_t *list = &(*thrd_lists)[tid];
1526 action_list_t::const_reverse_iterator rit;
1528 /* Find rf in the thread list */
1529 rit = std::find(list->rbegin(), list->rend(), rf);
1530 ASSERT(rit != list->rend());
1532 /* Find the last write/release */
1533 for (; rit != list->rend(); rit++)
1534 if ((*rit)->is_release())
1536 if (rit == list->rend()) {
1537 /* No write-release in this thread */
1538 return true; /* complete */
1540 ModelAction *release = *rit;
1542 ASSERT(rf->same_thread(release));
1544 pending->writes.clear();
1546 bool certain = true;
1547 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1548 if (id_to_int(rf->get_tid()) == (int)i)
1550 list = &(*thrd_lists)[i];
1552 /* Can we ensure no future writes from this thread may break
1553 * the release seq? */
1554 bool future_ordered = false;
1556 ModelAction *last = get_last_action(int_to_id(i));
1557 Thread *th = get_thread(int_to_id(i));
1558 if ((last && rf->happens_before(last)) ||
1559 !scheduler->is_enabled(th) ||
1561 future_ordered = true;
1563 ASSERT(!th->is_model_thread() || future_ordered);
1565 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1566 const ModelAction *act = *rit;
1567 /* Reach synchronization -> this thread is complete */
1568 if (act->happens_before(release))
1570 if (rf->happens_before(act)) {
1571 future_ordered = true;
1575 /* Only non-RMW writes can break release sequences */
1576 if (!act->is_write() || act->is_rmw())
1579 /* Check modification order */
1580 if (mo_graph->checkReachable(rf, act)) {
1581 /* rf --mo--> act */
1582 future_ordered = true;
1585 if (mo_graph->checkReachable(act, release))
1586 /* act --mo--> release */
1588 if (mo_graph->checkReachable(release, act) &&
1589 mo_graph->checkReachable(act, rf)) {
1590 /* release --mo-> act --mo--> rf */
1591 return true; /* complete */
1593 /* act may break release sequence */
1594 pending->writes.push_back(act);
1597 if (!future_ordered)
1598 certain = false; /* This thread is uncertain */
1602 release_heads->push_back(release);
1603 pending->writes.clear();
1605 pending->release = release;
1612 * A public interface for getting the release sequence head(s) with which a
1613 * given ModelAction must synchronize. This function only returns a non-empty
1614 * result when it can locate a release sequence head with certainty. Otherwise,
1615 * it may mark the internal state of the ModelChecker so that it will handle
1616 * the release sequence at a later time, causing @a act to update its
1617 * synchronization at some later point in execution.
1618 * @param act The 'acquire' action that may read from a release sequence
1619 * @param release_heads A pass-by-reference return parameter. Will be filled
1620 * with the head(s) of the release sequence(s), if they exists with certainty.
1621 * @see ModelChecker::release_seq_heads
1623 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1625 const ModelAction *rf = act->get_reads_from();
1626 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1627 sequence->acquire = act;
1629 if (!release_seq_heads(rf, release_heads, sequence)) {
1630 /* add act to 'lazy checking' list */
1631 pending_rel_seqs->push_back(sequence);
1633 snapshot_free(sequence);
1638 * Attempt to resolve all stashed operations that might synchronize with a
1639 * release sequence for a given location. This implements the "lazy" portion of
1640 * determining whether or not a release sequence was contiguous, since not all
1641 * modification order information is present at the time an action occurs.
1643 * @param location The location/object that should be checked for release
1644 * sequence resolutions. A NULL value means to check all locations.
1645 * @param work_queue The work queue to which to add work items as they are
1647 * @return True if any updates occurred (new synchronization, new mo_graph
1650 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1652 bool updated = false;
1653 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1654 while (it != pending_rel_seqs->end()) {
1655 struct release_seq *pending = *it;
1656 ModelAction *act = pending->acquire;
1658 /* Only resolve sequences on the given location, if provided */
1659 if (location && act->get_location() != location) {
1664 const ModelAction *rf = act->get_reads_from();
1665 rel_heads_list_t release_heads;
1667 complete = release_seq_heads(rf, &release_heads, pending);
1668 for (unsigned int i = 0; i < release_heads.size(); i++) {
1669 if (!act->has_synchronized_with(release_heads[i])) {
1670 if (act->synchronize_with(release_heads[i]))
1673 set_bad_synchronization();
1678 /* Re-check all pending release sequences */
1679 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1680 /* Re-check act for mo_graph edges */
1681 work_queue->push_back(MOEdgeWorkEntry(act));
1683 /* propagate synchronization to later actions */
1684 action_list_t::reverse_iterator rit = action_trace->rbegin();
1685 for (; (*rit) != act; rit++) {
1686 ModelAction *propagate = *rit;
1687 if (act->happens_before(propagate)) {
1688 propagate->synchronize_with(act);
1689 /* Re-check 'propagate' for mo_graph edges */
1690 work_queue->push_back(MOEdgeWorkEntry(propagate));
1695 it = pending_rel_seqs->erase(it);
1696 snapshot_free(pending);
1702 // If we resolved promises or data races, see if we have realized a data race.
1703 if (checkDataRaces()) {
1711 * Performs various bookkeeping operations for the current ModelAction. For
1712 * instance, adds action to the per-object, per-thread action vector and to the
1713 * action trace list of all thread actions.
1715 * @param act is the ModelAction to add.
1717 void ModelChecker::add_action_to_lists(ModelAction *act)
1719 int tid = id_to_int(act->get_tid());
1720 action_trace->push_back(act);
1722 obj_map->get_safe_ptr(act->get_location())->push_back(act);
1724 std::vector<action_list_t> *vec = obj_thrd_map->get_safe_ptr(act->get_location());
1725 if (tid >= (int)vec->size())
1726 vec->resize(priv->next_thread_id);
1727 (*vec)[tid].push_back(act);
1729 if ((int)thrd_last_action->size() <= tid)
1730 thrd_last_action->resize(get_num_threads());
1731 (*thrd_last_action)[tid] = act;
1733 if (act->is_wait()) {
1734 void *mutex_loc=(void *) act->get_value();
1735 obj_map->get_safe_ptr(mutex_loc)->push_back(act);
1737 std::vector<action_list_t> *vec = obj_thrd_map->get_safe_ptr(mutex_loc);
1738 if (tid >= (int)vec->size())
1739 vec->resize(priv->next_thread_id);
1740 (*vec)[tid].push_back(act);
1742 if ((int)thrd_last_action->size() <= tid)
1743 thrd_last_action->resize(get_num_threads());
1744 (*thrd_last_action)[tid] = act;
1749 * @brief Get the last action performed by a particular Thread
1750 * @param tid The thread ID of the Thread in question
1751 * @return The last action in the thread
1753 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1755 int threadid = id_to_int(tid);
1756 if (threadid < (int)thrd_last_action->size())
1757 return (*thrd_last_action)[id_to_int(tid)];
1763 * Gets the last memory_order_seq_cst write (in the total global sequence)
1764 * performed on a particular object (i.e., memory location), not including the
1766 * @param curr The current ModelAction; also denotes the object location to
1768 * @return The last seq_cst write
1770 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1772 void *location = curr->get_location();
1773 action_list_t *list = obj_map->get_safe_ptr(location);
1774 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1775 action_list_t::reverse_iterator rit;
1776 for (rit = list->rbegin(); rit != list->rend(); rit++)
1777 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1783 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1784 * location). This function identifies the mutex according to the current
1785 * action, which is presumed to perform on the same mutex.
1786 * @param curr The current ModelAction; also denotes the object location to
1788 * @return The last unlock operation
1790 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1792 void *location = curr->get_location();
1793 action_list_t *list = obj_map->get_safe_ptr(location);
1794 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1795 action_list_t::reverse_iterator rit;
1796 for (rit = list->rbegin(); rit != list->rend(); rit++)
1797 if ((*rit)->is_unlock() || (*rit)->is_wait())
1802 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1804 ModelAction *parent = get_last_action(tid);
1806 parent = get_thread(tid)->get_creation();
1811 * Returns the clock vector for a given thread.
1812 * @param tid The thread whose clock vector we want
1813 * @return Desired clock vector
1815 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1817 return get_parent_action(tid)->get_cv();
1821 * Resolve a set of Promises with a current write. The set is provided in the
1822 * Node corresponding to @a write.
1823 * @param write The ModelAction that is fulfilling Promises
1824 * @return True if promises were resolved; false otherwise
1826 bool ModelChecker::resolve_promises(ModelAction *write)
1828 bool resolved = false;
1829 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
1831 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1832 Promise *promise = (*promises)[promise_index];
1833 if (write->get_node()->get_promise(i)) {
1834 ModelAction *read = promise->get_action();
1835 if (read->is_rmw()) {
1836 mo_graph->addRMWEdge(write, read);
1838 read->read_from(write);
1839 //First fix up the modification order for actions that happened
1841 r_modification_order(read, write);
1842 //Next fix up the modification order for actions that happened
1844 post_r_modification_order(read, write);
1845 //Make sure the promise's value matches the write's value
1846 ASSERT(promise->get_value() == write->get_value());
1849 promises->erase(promises->begin() + promise_index);
1850 threads_to_check.push_back(read->get_tid());
1857 //Check whether reading these writes has made threads unable to
1860 for(unsigned int i=0;i<threads_to_check.size();i++)
1861 mo_check_promises(threads_to_check[i], write);
1867 * Compute the set of promises that could potentially be satisfied by this
1868 * action. Note that the set computation actually appears in the Node, not in
1870 * @param curr The ModelAction that may satisfy promises
1872 void ModelChecker::compute_promises(ModelAction *curr)
1874 for (unsigned int i = 0; i < promises->size(); i++) {
1875 Promise *promise = (*promises)[i];
1876 const ModelAction *act = promise->get_action();
1877 if (!act->happens_before(curr) &&
1879 !act->could_synchronize_with(curr) &&
1880 !act->same_thread(curr) &&
1881 act->get_location() == curr->get_location() &&
1882 promise->get_value() == curr->get_value()) {
1883 curr->get_node()->set_promise(i, act->is_rmw());
1888 /** Checks promises in response to change in ClockVector Threads. */
1889 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
1891 for (unsigned int i = 0; i < promises->size(); i++) {
1892 Promise *promise = (*promises)[i];
1893 const ModelAction *act = promise->get_action();
1894 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1895 merge_cv->synchronized_since(act)) {
1896 if (promise->increment_threads(tid)) {
1897 //Promise has failed
1898 failed_promise = true;
1905 void ModelChecker::check_promises_thread_disabled() {
1906 for (unsigned int i = 0; i < promises->size(); i++) {
1907 Promise *promise = (*promises)[i];
1908 if (promise->check_promise()) {
1909 failed_promise = true;
1915 /** Checks promises in response to addition to modification order for threads.
1917 * pthread is the thread that performed the read that created the promise
1919 * pread is the read that created the promise
1921 * pwrite is either the first write to same location as pread by
1922 * pthread that is sequenced after pread or the value read by the
1923 * first read to the same lcoation as pread by pthread that is
1924 * sequenced after pread..
1926 * 1. If tid=pthread, then we check what other threads are reachable
1927 * through the mode order starting with pwrite. Those threads cannot
1928 * perform a write that will resolve the promise due to modification
1929 * order constraints.
1931 * 2. If the tid is not pthread, we check whether pwrite can reach the
1932 * action write through the modification order. If so, that thread
1933 * cannot perform a future write that will resolve the promise due to
1934 * modificatin order constraints.
1936 * @parem tid The thread that either read from the model action
1937 * write, or actually did the model action write.
1939 * @parem write The ModelAction representing the relevant write.
1942 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
1943 void * location = write->get_location();
1944 for (unsigned int i = 0; i < promises->size(); i++) {
1945 Promise *promise = (*promises)[i];
1946 const ModelAction *act = promise->get_action();
1948 //Is this promise on the same location?
1949 if ( act->get_location() != location )
1952 //same thread as the promise
1953 if ( act->get_tid()==tid ) {
1955 //do we have a pwrite for the promise, if not, set it
1956 if (promise->get_write() == NULL ) {
1957 promise->set_write(write);
1958 //The pwrite cannot happen before the promise
1959 if (write->happens_before(act) && (write != act)) {
1960 failed_promise = true;
1964 if (mo_graph->checkPromise(write, promise)) {
1965 failed_promise = true;
1970 //Don't do any lookups twice for the same thread
1971 if (promise->has_sync_thread(tid))
1974 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
1975 if (promise->increment_threads(tid)) {
1976 failed_promise = true;
1984 * Compute the set of writes that may break the current pending release
1985 * sequence. This information is extracted from previou release sequence
1988 * @param curr The current ModelAction. Must be a release sequence fixup
1991 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
1993 if (pending_rel_seqs->empty())
1996 struct release_seq *pending = pending_rel_seqs->back();
1997 for (unsigned int i = 0; i < pending->writes.size(); i++) {
1998 const ModelAction *write = pending->writes[i];
1999 curr->get_node()->add_relseq_break(write);
2002 /* NULL means don't break the sequence; just synchronize */
2003 curr->get_node()->add_relseq_break(NULL);
2007 * Build up an initial set of all past writes that this 'read' action may read
2008 * from. This set is determined by the clock vector's "happens before"
2010 * @param curr is the current ModelAction that we are exploring; it must be a
2013 void ModelChecker::build_reads_from_past(ModelAction *curr)
2015 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
2017 ASSERT(curr->is_read());
2019 ModelAction *last_seq_cst = NULL;
2021 /* Track whether this object has been initialized */
2022 bool initialized = false;
2024 if (curr->is_seqcst()) {
2025 last_seq_cst = get_last_seq_cst(curr);
2026 /* We have to at least see the last sequentially consistent write,
2027 so we are initialized. */
2028 if (last_seq_cst != NULL)
2032 /* Iterate over all threads */
2033 for (i = 0; i < thrd_lists->size(); i++) {
2034 /* Iterate over actions in thread, starting from most recent */
2035 action_list_t *list = &(*thrd_lists)[i];
2036 action_list_t::reverse_iterator rit;
2037 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2038 ModelAction *act = *rit;
2040 /* Only consider 'write' actions */
2041 if (!act->is_write() || act == curr)
2044 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2045 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
2046 if (!curr->get_sleep_flag() || curr->is_seqcst() || sleep_can_read_from(curr, act)) {
2047 DEBUG("Adding action to may_read_from:\n");
2048 if (DBG_ENABLED()) {
2052 curr->get_node()->add_read_from(act);
2056 /* Include at most one act per-thread that "happens before" curr */
2057 if (act->happens_before(curr)) {
2065 /** @todo Need a more informative way of reporting errors. */
2066 printf("ERROR: may read from uninitialized atomic\n");
2070 if (DBG_ENABLED() || !initialized) {
2071 printf("Reached read action:\n");
2073 printf("Printing may_read_from\n");
2074 curr->get_node()->print_may_read_from();
2075 printf("End printing may_read_from\n");
2079 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2081 Node *prevnode=write->get_node()->get_parent();
2083 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2084 if (write->is_release()&&thread_sleep)
2086 if (!write->is_rmw()) {
2089 if (write->get_reads_from()==NULL)
2091 write=write->get_reads_from();
2095 static void print_list(action_list_t *list)
2097 action_list_t::iterator it;
2099 printf("---------------------------------------------------------------------\n");
2101 unsigned int hash=0;
2103 for (it = list->begin(); it != list->end(); it++) {
2105 hash=hash^(hash<<3)^((*it)->hash());
2107 printf("HASH %u\n", hash);
2108 printf("---------------------------------------------------------------------\n");
2111 #if SUPPORT_MOD_ORDER_DUMP
2112 void ModelChecker::dumpGraph(char *filename) {
2114 sprintf(buffer, "%s.dot",filename);
2115 FILE *file=fopen(buffer, "w");
2116 fprintf(file, "digraph %s {\n",filename);
2117 mo_graph->dumpNodes(file);
2118 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2120 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2121 ModelAction *action=*it;
2122 if (action->is_read()) {
2123 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2124 if (action->get_reads_from()!=NULL)
2125 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2127 if (thread_array[action->get_tid()] != NULL) {
2128 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2131 thread_array[action->get_tid()]=action;
2133 fprintf(file,"}\n");
2134 model_free(thread_array);
2139 void ModelChecker::print_summary()
2142 printf("Number of executions: %d\n", num_executions);
2143 printf("Number of feasible executions: %d\n", num_feasible_executions);
2144 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
2146 #if SUPPORT_MOD_ORDER_DUMP
2148 char buffername[100];
2149 sprintf(buffername, "exec%04u", num_executions);
2150 mo_graph->dumpGraphToFile(buffername);
2151 sprintf(buffername, "graph%04u", num_executions);
2152 dumpGraph(buffername);
2155 if (!isfinalfeasible())
2156 printf("INFEASIBLE EXECUTION!\n");
2157 print_list(action_trace);
2162 * Add a Thread to the system for the first time. Should only be called once
2164 * @param t The Thread to add
2166 void ModelChecker::add_thread(Thread *t)
2168 thread_map->put(id_to_int(t->get_id()), t);
2169 scheduler->add_thread(t);
2173 * Removes a thread from the scheduler.
2174 * @param the thread to remove.
2176 void ModelChecker::remove_thread(Thread *t)
2178 scheduler->remove_thread(t);
2182 * @brief Get a Thread reference by its ID
2183 * @param tid The Thread's ID
2184 * @return A Thread reference
2186 Thread * ModelChecker::get_thread(thread_id_t tid) const
2188 return thread_map->get(id_to_int(tid));
2192 * @brief Get a reference to the Thread in which a ModelAction was executed
2193 * @param act The ModelAction
2194 * @return A Thread reference
2196 Thread * ModelChecker::get_thread(ModelAction *act) const
2198 return get_thread(act->get_tid());
2202 * Switch from a user-context to the "master thread" context (a.k.a. system
2203 * context). This switch is made with the intention of exploring a particular
2204 * model-checking action (described by a ModelAction object). Must be called
2205 * from a user-thread context.
2207 * @param act The current action that will be explored. May be NULL only if
2208 * trace is exiting via an assertion (see ModelChecker::set_assert and
2209 * ModelChecker::has_asserted).
2210 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2212 int ModelChecker::switch_to_master(ModelAction *act)
2215 Thread *old = thread_current();
2216 set_current_action(act);
2217 old->set_state(THREAD_READY);
2218 return Thread::swap(old, &system_context);
2222 * Takes the next step in the execution, if possible.
2223 * @return Returns true (success) if a step was taken and false otherwise.
2225 bool ModelChecker::take_step() {
2229 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2231 if (curr->get_state() == THREAD_READY) {
2232 ASSERT(priv->current_action);
2234 priv->nextThread = check_current_action(priv->current_action);
2235 priv->current_action = NULL;
2237 if (curr->is_blocked() || curr->is_complete())
2238 scheduler->remove_thread(curr);
2243 Thread *next = scheduler->next_thread(priv->nextThread);
2245 /* Infeasible -> don't take any more steps */
2249 if (params.bound != 0) {
2250 if (priv->used_sequence_numbers > params.bound) {
2255 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2256 next ? id_to_int(next->get_id()) : -1);
2259 * Launch end-of-execution release sequence fixups only when there are:
2261 * (1) no more user threads to run (or when execution replay chooses
2262 * the 'model_thread')
2263 * (2) pending release sequences
2264 * (3) pending assertions (i.e., data races)
2265 * (4) no pending promises
2267 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2268 isfinalfeasible() && !unrealizedraces.empty()) {
2269 printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2270 pending_rel_seqs->size());
2271 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2272 std::memory_order_seq_cst, NULL, VALUE_NONE,
2274 set_current_action(fixup);
2278 /* next == NULL -> don't take any more steps */
2282 next->set_state(THREAD_RUNNING);
2284 if (next->get_pending() != NULL) {
2285 /* restart a pending action */
2286 set_current_action(next->get_pending());
2287 next->set_pending(NULL);
2288 next->set_state(THREAD_READY);
2292 /* Return false only if swap fails with an error */
2293 return (Thread::swap(&system_context, next) == 0);
2296 /** Runs the current execution until threre are no more steps to take. */
2297 void ModelChecker::finish_execution() {
2300 while (take_step());