9 #include "snapshot-interface.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
15 #include "threads-model.h"
17 #define INITIAL_THREAD_ID 0
21 /** @brief Constructor */
22 ModelChecker::ModelChecker(struct model_params params) :
23 /* Initialize default scheduler */
25 scheduler(new Scheduler()),
27 num_feasible_executions(0),
29 earliest_diverge(NULL),
30 action_trace(new action_list_t()),
31 thread_map(new HashTable<int, Thread *, int>()),
32 obj_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
33 lock_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
34 condvar_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
35 obj_thrd_map(new HashTable<void *, std::vector<action_list_t>, uintptr_t, 4 >()),
36 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
37 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
38 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
39 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
40 node_stack(new NodeStack()),
41 mo_graph(new CycleGraph()),
42 failed_promise(false),
43 too_many_reads(false),
45 bad_synchronization(false)
47 /* Allocate this "size" on the snapshotting heap */
48 priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
49 /* First thread created will have id INITIAL_THREAD_ID */
50 priv->next_thread_id = INITIAL_THREAD_ID;
52 /* Initialize a model-checker thread, for special ModelActions */
53 model_thread = new Thread(get_next_id());
54 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
57 /** @brief Destructor */
58 ModelChecker::~ModelChecker()
60 for (unsigned int i = 0; i < get_num_threads(); i++)
61 delete thread_map->get(i);
66 delete lock_waiters_map;
67 delete condvar_waiters_map;
70 for (unsigned int i = 0; i < promises->size(); i++)
71 delete (*promises)[i];
74 delete pending_rel_seqs;
76 delete thrd_last_action;
83 * Restores user program to initial state and resets all model-checker data
86 void ModelChecker::reset_to_initial_state()
88 DEBUG("+++ Resetting to initial state +++\n");
89 node_stack->reset_execution();
90 failed_promise = false;
91 too_many_reads = false;
92 bad_synchronization = false;
94 snapshotObject->backTrackBeforeStep(0);
97 /** @return a thread ID for a new Thread */
98 thread_id_t ModelChecker::get_next_id()
100 return priv->next_thread_id++;
103 /** @return the number of user threads created during this execution */
104 unsigned int ModelChecker::get_num_threads()
106 return priv->next_thread_id;
109 /** @return The currently executing Thread. */
110 Thread * ModelChecker::get_current_thread()
112 return scheduler->get_current_thread();
115 /** @return a sequence number for a new ModelAction */
116 modelclock_t ModelChecker::get_next_seq_num()
118 return ++priv->used_sequence_numbers;
121 Node * ModelChecker::get_curr_node() {
122 return node_stack->get_head();
126 * @brief Choose the next thread to execute.
128 * This function chooses the next thread that should execute. It can force the
129 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
130 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
131 * The model-checker may have no preference regarding the next thread (i.e.,
132 * when exploring a new execution ordering), in which case this will return
134 * @param curr The current ModelAction. This action might guide the choice of
136 * @return The next thread to run. If the model-checker has no preference, NULL.
138 Thread * ModelChecker::get_next_thread(ModelAction *curr)
143 /* Do not split atomic actions. */
145 return thread_current();
146 /* The THREAD_CREATE action points to the created Thread */
147 else if (curr->get_type() == THREAD_CREATE)
148 return (Thread *)curr->get_location();
151 /* Have we completed exploring the preselected path? */
155 /* Else, we are trying to replay an execution */
156 ModelAction *next = node_stack->get_next()->get_action();
158 if (next == diverge) {
159 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
160 earliest_diverge=diverge;
162 Node *nextnode = next->get_node();
163 Node *prevnode = nextnode->get_parent();
164 scheduler->update_sleep_set(prevnode);
166 /* Reached divergence point */
167 if (nextnode->increment_misc()) {
168 /* The next node will try to satisfy a different misc_index values. */
169 tid = next->get_tid();
170 node_stack->pop_restofstack(2);
171 } else if (nextnode->increment_promise()) {
172 /* The next node will try to satisfy a different set of promises. */
173 tid = next->get_tid();
174 node_stack->pop_restofstack(2);
175 } else if (nextnode->increment_read_from()) {
176 /* The next node will read from a different value. */
177 tid = next->get_tid();
178 node_stack->pop_restofstack(2);
179 } else if (nextnode->increment_future_value()) {
180 /* The next node will try to read from a different future value. */
181 tid = next->get_tid();
182 node_stack->pop_restofstack(2);
183 } else if (nextnode->increment_relseq_break()) {
184 /* The next node will try to resolve a release sequence differently */
185 tid = next->get_tid();
186 node_stack->pop_restofstack(2);
188 /* Make a different thread execute for next step */
189 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
190 tid = prevnode->get_next_backtrack();
191 /* Make sure the backtracked thread isn't sleeping. */
192 node_stack->pop_restofstack(1);
193 if (diverge==earliest_diverge) {
194 earliest_diverge=prevnode->get_action();
197 /* The correct sleep set is in the parent node. */
200 DEBUG("*** Divergence point ***\n");
204 tid = next->get_tid();
206 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
207 ASSERT(tid != THREAD_ID_T_NONE);
208 return thread_map->get(id_to_int(tid));
212 * We need to know what the next actions of all threads in the sleep
213 * set will be. This method computes them and stores the actions at
214 * the corresponding thread object's pending action.
217 void ModelChecker::execute_sleep_set() {
218 for(unsigned int i=0;i<get_num_threads();i++) {
219 thread_id_t tid=int_to_id(i);
220 Thread *thr=get_thread(tid);
221 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
222 thr->get_pending() == NULL ) {
223 thr->set_state(THREAD_RUNNING);
224 scheduler->next_thread(thr);
225 Thread::swap(&system_context, thr);
226 priv->current_action->set_sleep_flag();
227 thr->set_pending(priv->current_action);
230 priv->current_action = NULL;
233 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
234 for(unsigned int i=0;i<get_num_threads();i++) {
235 thread_id_t tid=int_to_id(i);
236 Thread *thr=get_thread(tid);
237 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
238 ModelAction *pending_act=thr->get_pending();
239 if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
240 //Remove this thread from sleep set
241 scheduler->remove_sleep(thr);
248 * Queries the model-checker for more executions to explore and, if one
249 * exists, resets the model-checker state to execute a new execution.
251 * @return If there are more executions to explore, return true. Otherwise,
254 bool ModelChecker::next_execution()
260 if (isfinalfeasible()) {
261 printf("Earliest divergence point since last feasible execution:\n");
262 if (earliest_diverge)
263 earliest_diverge->print();
265 printf("(Not set)\n");
267 earliest_diverge = NULL;
268 num_feasible_executions++;
271 DEBUG("Number of acquires waiting on pending release sequences: %zu\n",
272 pending_rel_seqs->size());
275 if (isfinalfeasible() || (params.bound != 0 && priv->used_sequence_numbers > params.bound ) || DBG_ENABLED() ) {
280 if ((diverge = get_next_backtrack()) == NULL)
284 printf("Next execution will diverge at:\n");
288 reset_to_initial_state();
292 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
294 switch (act->get_type()) {
298 /* linear search: from most recent to oldest */
299 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
300 action_list_t::reverse_iterator rit;
301 for (rit = list->rbegin(); rit != list->rend(); rit++) {
302 ModelAction *prev = *rit;
303 if (prev->could_synchronize_with(act))
309 case ATOMIC_TRYLOCK: {
310 /* linear search: from most recent to oldest */
311 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
312 action_list_t::reverse_iterator rit;
313 for (rit = list->rbegin(); rit != list->rend(); rit++) {
314 ModelAction *prev = *rit;
315 if (act->is_conflicting_lock(prev))
320 case ATOMIC_UNLOCK: {
321 /* linear search: from most recent to oldest */
322 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
323 action_list_t::reverse_iterator rit;
324 for (rit = list->rbegin(); rit != list->rend(); rit++) {
325 ModelAction *prev = *rit;
326 if (!act->same_thread(prev)&&prev->is_failed_trylock())
332 /* linear search: from most recent to oldest */
333 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
334 action_list_t::reverse_iterator rit;
335 for (rit = list->rbegin(); rit != list->rend(); rit++) {
336 ModelAction *prev = *rit;
337 if (!act->same_thread(prev)&&prev->is_failed_trylock())
339 if (!act->same_thread(prev)&&prev->is_notify())
345 case ATOMIC_NOTIFY_ALL:
346 case ATOMIC_NOTIFY_ONE: {
347 /* linear search: from most recent to oldest */
348 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
349 action_list_t::reverse_iterator rit;
350 for (rit = list->rbegin(); rit != list->rend(); rit++) {
351 ModelAction *prev = *rit;
352 if (!act->same_thread(prev)&&prev->is_wait())
363 /** This method finds backtracking points where we should try to
364 * reorder the parameter ModelAction against.
366 * @param the ModelAction to find backtracking points for.
368 void ModelChecker::set_backtracking(ModelAction *act)
370 Thread *t = get_thread(act);
371 ModelAction * prev = get_last_conflict(act);
375 Node * node = prev->get_node()->get_parent();
377 int low_tid, high_tid;
378 if (node->is_enabled(t)) {
379 low_tid = id_to_int(act->get_tid());
380 high_tid = low_tid+1;
383 high_tid = get_num_threads();
386 for(int i = low_tid; i < high_tid; i++) {
387 thread_id_t tid = int_to_id(i);
389 /* Make sure this thread can be enabled here. */
390 if (i >= node->get_num_threads())
393 /* Don't backtrack into a point where the thread is disabled or sleeping. */
394 if (node->enabled_status(tid)!=THREAD_ENABLED)
397 /* Check if this has been explored already */
398 if (node->has_been_explored(tid))
401 /* See if fairness allows */
402 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
404 for(int t=0;t<node->get_num_threads();t++) {
405 thread_id_t tother=int_to_id(t);
406 if (node->is_enabled(tother) && node->has_priority(tother)) {
414 /* Cache the latest backtracking point */
415 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
416 priv->next_backtrack = prev;
418 /* If this is a new backtracking point, mark the tree */
419 if (!node->set_backtrack(tid))
421 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
422 id_to_int(prev->get_tid()),
423 id_to_int(t->get_id()));
432 * Returns last backtracking point. The model checker will explore a different
433 * path for this point in the next execution.
434 * @return The ModelAction at which the next execution should diverge.
436 ModelAction * ModelChecker::get_next_backtrack()
438 ModelAction *next = priv->next_backtrack;
439 priv->next_backtrack = NULL;
444 * Processes a read or rmw model action.
445 * @param curr is the read model action to process.
446 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
447 * @return True if processing this read updates the mo_graph.
449 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
451 uint64_t value = VALUE_NONE;
452 bool updated = false;
454 const ModelAction *reads_from = curr->get_node()->get_read_from();
455 if (reads_from != NULL) {
456 mo_graph->startChanges();
458 value = reads_from->get_value();
459 bool r_status = false;
461 if (!second_part_of_rmw) {
462 check_recency(curr, reads_from);
463 r_status = r_modification_order(curr, reads_from);
467 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
468 mo_graph->rollbackChanges();
469 too_many_reads = false;
473 curr->read_from(reads_from);
474 mo_graph->commitChanges();
475 mo_check_promises(curr->get_tid(), reads_from);
478 } else if (!second_part_of_rmw) {
479 /* Read from future value */
480 value = curr->get_node()->get_future_value();
481 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
482 curr->read_from(NULL);
483 Promise *valuepromise = new Promise(curr, value, expiration);
484 promises->push_back(valuepromise);
486 get_thread(curr)->set_return_value(value);
492 * Processes a lock, trylock, or unlock model action. @param curr is
493 * the read model action to process.
495 * The try lock operation checks whether the lock is taken. If not,
496 * it falls to the normal lock operation case. If so, it returns
499 * The lock operation has already been checked that it is enabled, so
500 * it just grabs the lock and synchronizes with the previous unlock.
502 * The unlock operation has to re-enable all of the threads that are
503 * waiting on the lock.
505 * @return True if synchronization was updated; false otherwise
507 bool ModelChecker::process_mutex(ModelAction *curr) {
508 std::mutex *mutex=NULL;
509 struct std::mutex_state *state=NULL;
511 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
512 mutex = (std::mutex *)curr->get_location();
513 state = mutex->get_state();
514 } else if(curr->is_wait()) {
515 mutex = (std::mutex *)curr->get_value();
516 state = mutex->get_state();
519 switch (curr->get_type()) {
520 case ATOMIC_TRYLOCK: {
521 bool success = !state->islocked;
522 curr->set_try_lock(success);
524 get_thread(curr)->set_return_value(0);
527 get_thread(curr)->set_return_value(1);
529 //otherwise fall into the lock case
531 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
532 printf("Lock access before initialization\n");
535 state->islocked = true;
536 ModelAction *unlock = get_last_unlock(curr);
537 //synchronize with the previous unlock statement
538 if (unlock != NULL) {
539 curr->synchronize_with(unlock);
544 case ATOMIC_UNLOCK: {
546 state->islocked = false;
547 //wake up the other threads
548 action_list_t *waiters = lock_waiters_map->get_safe_ptr(curr->get_location());
549 //activate all the waiting threads
550 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
551 scheduler->wake(get_thread(*rit));
558 state->islocked = false;
559 //wake up the other threads
560 action_list_t *waiters = lock_waiters_map->get_safe_ptr((void *) curr->get_value());
561 //activate all the waiting threads
562 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
563 scheduler->wake(get_thread(*rit));
566 //check whether we should go to sleep or not...simulate spurious failures
567 if (curr->get_node()->get_misc()==0) {
568 condvar_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
570 scheduler->sleep(get_current_thread());
574 case ATOMIC_NOTIFY_ALL: {
575 action_list_t *waiters = condvar_waiters_map->get_safe_ptr(curr->get_location());
576 //activate all the waiting threads
577 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
578 scheduler->wake(get_thread(*rit));
583 case ATOMIC_NOTIFY_ONE: {
584 action_list_t *waiters = condvar_waiters_map->get_safe_ptr(curr->get_location());
585 int wakeupthread=curr->get_node()->get_misc();
586 action_list_t::iterator it = waiters->begin();
587 advance(it, wakeupthread);
588 scheduler->wake(get_thread(*it));
600 * Process a write ModelAction
601 * @param curr The ModelAction to process
602 * @return True if the mo_graph was updated or promises were resolved
604 bool ModelChecker::process_write(ModelAction *curr)
606 bool updated_mod_order = w_modification_order(curr);
607 bool updated_promises = resolve_promises(curr);
609 if (promises->size() == 0) {
610 for (unsigned int i = 0; i < futurevalues->size(); i++) {
611 struct PendingFutureValue pfv = (*futurevalues)[i];
612 //Do more ambitious checks now that mo is more complete
613 if (mo_may_allow(pfv.writer, pfv.act)&&
614 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
615 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
616 priv->next_backtrack = pfv.act;
618 futurevalues->resize(0);
621 mo_graph->commitChanges();
622 mo_check_promises(curr->get_tid(), curr);
624 get_thread(curr)->set_return_value(VALUE_NONE);
625 return updated_mod_order || updated_promises;
629 * @brief Process the current action for thread-related activity
631 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
632 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
633 * synchronization, etc. This function is a no-op for non-THREAD actions
634 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
636 * @param curr The current action
637 * @return True if synchronization was updated or a thread completed
639 bool ModelChecker::process_thread_action(ModelAction *curr)
641 bool updated = false;
643 switch (curr->get_type()) {
644 case THREAD_CREATE: {
645 Thread *th = (Thread *)curr->get_location();
646 th->set_creation(curr);
650 Thread *blocking = (Thread *)curr->get_location();
651 ModelAction *act = get_last_action(blocking->get_id());
652 curr->synchronize_with(act);
653 updated = true; /* trigger rel-seq checks */
656 case THREAD_FINISH: {
657 Thread *th = get_thread(curr);
658 while (!th->wait_list_empty()) {
659 ModelAction *act = th->pop_wait_list();
660 scheduler->wake(get_thread(act));
663 updated = true; /* trigger rel-seq checks */
667 check_promises(curr->get_tid(), NULL, curr->get_cv());
678 * @brief Process the current action for release sequence fixup activity
680 * Performs model-checker release sequence fixups for the current action,
681 * forcing a single pending release sequence to break (with a given, potential
682 * "loose" write) or to complete (i.e., synchronize). If a pending release
683 * sequence forms a complete release sequence, then we must perform the fixup
684 * synchronization, mo_graph additions, etc.
686 * @param curr The current action; must be a release sequence fixup action
687 * @param work_queue The work queue to which to add work items as they are
690 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
692 const ModelAction *write = curr->get_node()->get_relseq_break();
693 struct release_seq *sequence = pending_rel_seqs->back();
694 pending_rel_seqs->pop_back();
696 ModelAction *acquire = sequence->acquire;
697 const ModelAction *rf = sequence->rf;
698 const ModelAction *release = sequence->release;
702 ASSERT(release->same_thread(rf));
706 * @todo Forcing a synchronization requires that we set
707 * modification order constraints. For instance, we can't allow
708 * a fixup sequence in which two separate read-acquire
709 * operations read from the same sequence, where the first one
710 * synchronizes and the other doesn't. Essentially, we can't
711 * allow any writes to insert themselves between 'release' and
715 /* Must synchronize */
716 if (!acquire->synchronize_with(release)) {
717 set_bad_synchronization();
720 /* Re-check all pending release sequences */
721 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
722 /* Re-check act for mo_graph edges */
723 work_queue->push_back(MOEdgeWorkEntry(acquire));
725 /* propagate synchronization to later actions */
726 action_list_t::reverse_iterator rit = action_trace->rbegin();
727 for (; (*rit) != acquire; rit++) {
728 ModelAction *propagate = *rit;
729 if (acquire->happens_before(propagate)) {
730 propagate->synchronize_with(acquire);
731 /* Re-check 'propagate' for mo_graph edges */
732 work_queue->push_back(MOEdgeWorkEntry(propagate));
736 /* Break release sequence with new edges:
737 * release --mo--> write --mo--> rf */
738 mo_graph->addEdge(release, write);
739 mo_graph->addEdge(write, rf);
742 /* See if we have realized a data race */
743 if (checkDataRaces())
748 * Initialize the current action by performing one or more of the following
749 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
750 * in the NodeStack, manipulating backtracking sets, allocating and
751 * initializing clock vectors, and computing the promises to fulfill.
753 * @param curr The current action, as passed from the user context; may be
754 * freed/invalidated after the execution of this function, with a different
755 * action "returned" its place (pass-by-reference)
756 * @return True if curr is a newly-explored action; false otherwise
758 bool ModelChecker::initialize_curr_action(ModelAction **curr)
760 ModelAction *newcurr;
762 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
763 newcurr = process_rmw(*curr);
766 if (newcurr->is_rmw())
767 compute_promises(newcurr);
773 (*curr)->set_seq_number(get_next_seq_num());
775 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled());
777 /* First restore type and order in case of RMW operation */
778 if ((*curr)->is_rmwr())
779 newcurr->copy_typeandorder(*curr);
781 ASSERT((*curr)->get_location() == newcurr->get_location());
782 newcurr->copy_from_new(*curr);
784 /* Discard duplicate ModelAction; use action from NodeStack */
787 /* Always compute new clock vector */
788 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
791 return false; /* Action was explored previously */
795 /* Always compute new clock vector */
796 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
798 * Perform one-time actions when pushing new ModelAction onto
801 if (newcurr->is_write())
802 compute_promises(newcurr);
803 else if (newcurr->is_relseq_fixup())
804 compute_relseq_breakwrites(newcurr);
805 else if (newcurr->is_wait())
806 newcurr->get_node()->set_misc_max(2);
807 else if (newcurr->is_notify_one()) {
808 newcurr->get_node()->set_misc_max(condvar_waiters_map->get_safe_ptr(newcurr->get_location())->size());
810 return true; /* This was a new ModelAction */
815 * @brief Check whether a model action is enabled.
817 * Checks whether a lock or join operation would be successful (i.e., is the
818 * lock already locked, or is the joined thread already complete). If not, put
819 * the action in a waiter list.
821 * @param curr is the ModelAction to check whether it is enabled.
822 * @return a bool that indicates whether the action is enabled.
824 bool ModelChecker::check_action_enabled(ModelAction *curr) {
825 if (curr->is_lock()) {
826 std::mutex * lock = (std::mutex *)curr->get_location();
827 struct std::mutex_state * state = lock->get_state();
828 if (state->islocked) {
829 //Stick the action in the appropriate waiting queue
830 lock_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
833 } else if (curr->get_type() == THREAD_JOIN) {
834 Thread *blocking = (Thread *)curr->get_location();
835 if (!blocking->is_complete()) {
836 blocking->push_wait_list(curr);
845 * This is the heart of the model checker routine. It performs model-checking
846 * actions corresponding to a given "current action." Among other processes, it
847 * calculates reads-from relationships, updates synchronization clock vectors,
848 * forms a memory_order constraints graph, and handles replay/backtrack
849 * execution when running permutations of previously-observed executions.
851 * @param curr The current action to process
852 * @return The next Thread that must be executed. May be NULL if ModelChecker
853 * makes no choice (e.g., according to replay execution, combining RMW actions,
856 Thread * ModelChecker::check_current_action(ModelAction *curr)
859 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
861 if (!check_action_enabled(curr)) {
862 /* Make the execution look like we chose to run this action
863 * much later, when a lock/join can succeed */
864 get_current_thread()->set_pending(curr);
865 scheduler->sleep(get_current_thread());
866 return get_next_thread(NULL);
869 bool newly_explored = initialize_curr_action(&curr);
871 wake_up_sleeping_actions(curr);
873 /* Add the action to lists before any other model-checking tasks */
874 if (!second_part_of_rmw)
875 add_action_to_lists(curr);
877 /* Build may_read_from set for newly-created actions */
878 if (newly_explored && curr->is_read())
879 build_reads_from_past(curr);
881 /* Initialize work_queue with the "current action" work */
882 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
883 while (!work_queue.empty()) {
884 WorkQueueEntry work = work_queue.front();
885 work_queue.pop_front();
888 case WORK_CHECK_CURR_ACTION: {
889 ModelAction *act = work.action;
890 bool update = false; /* update this location's release seq's */
891 bool update_all = false; /* update all release seq's */
893 if (process_thread_action(curr))
896 if (act->is_read() && process_read(act, second_part_of_rmw))
899 if (act->is_write() && process_write(act))
902 if (act->is_mutex_op() && process_mutex(act))
905 if (act->is_relseq_fixup())
906 process_relseq_fixup(curr, &work_queue);
909 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
911 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
914 case WORK_CHECK_RELEASE_SEQ:
915 resolve_release_sequences(work.location, &work_queue);
917 case WORK_CHECK_MO_EDGES: {
918 /** @todo Complete verification of work_queue */
919 ModelAction *act = work.action;
920 bool updated = false;
922 if (act->is_read()) {
923 const ModelAction *rf = act->get_reads_from();
924 if (rf != NULL && r_modification_order(act, rf))
927 if (act->is_write()) {
928 if (w_modification_order(act))
931 mo_graph->commitChanges();
934 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
943 check_curr_backtracking(curr);
944 set_backtracking(curr);
945 return get_next_thread(curr);
948 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
949 Node *currnode = curr->get_node();
950 Node *parnode = currnode->get_parent();
952 if ((!parnode->backtrack_empty() ||
953 !currnode->misc_empty() ||
954 !currnode->read_from_empty() ||
955 !currnode->future_value_empty() ||
956 !currnode->promise_empty() ||
957 !currnode->relseq_break_empty())
958 && (!priv->next_backtrack ||
959 *curr > *priv->next_backtrack)) {
960 priv->next_backtrack = curr;
964 bool ModelChecker::promises_expired() {
965 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
966 Promise *promise = (*promises)[promise_index];
967 if (promise->get_expiration()<priv->used_sequence_numbers) {
974 /** @return whether the current partial trace must be a prefix of a
976 bool ModelChecker::isfeasibleprefix() {
977 return promises->size() == 0 && pending_rel_seqs->size() == 0 && isfeasible();
980 /** @return whether the current partial trace is feasible. */
981 bool ModelChecker::isfeasible() {
982 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
983 DEBUG("Infeasible: RMW violation\n");
985 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
988 /** @return whether the current partial trace is feasible other than
989 * multiple RMW reading from the same store. */
990 bool ModelChecker::isfeasibleotherthanRMW() {
992 if (mo_graph->checkForCycles())
993 DEBUG("Infeasible: modification order cycles\n");
995 DEBUG("Infeasible: failed promise\n");
997 DEBUG("Infeasible: too many reads\n");
998 if (bad_synchronization)
999 DEBUG("Infeasible: bad synchronization ordering\n");
1000 if (promises_expired())
1001 DEBUG("Infeasible: promises expired\n");
1003 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
1006 /** Returns whether the current completed trace is feasible. */
1007 bool ModelChecker::isfinalfeasible() {
1008 if (DBG_ENABLED() && promises->size() != 0)
1009 DEBUG("Infeasible: unrevolved promises\n");
1011 return isfeasible() && promises->size() == 0;
1014 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1015 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1016 ModelAction *lastread = get_last_action(act->get_tid());
1017 lastread->process_rmw(act);
1018 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1019 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1020 mo_graph->commitChanges();
1026 * Checks whether a thread has read from the same write for too many times
1027 * without seeing the effects of a later write.
1030 * 1) there must a different write that we could read from that would satisfy the modification order,
1031 * 2) we must have read from the same value in excess of maxreads times, and
1032 * 3) that other write must have been in the reads_from set for maxreads times.
1034 * If so, we decide that the execution is no longer feasible.
1036 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1037 if (params.maxreads != 0) {
1039 if (curr->get_node()->get_read_from_size() <= 1)
1041 //Must make sure that execution is currently feasible... We could
1042 //accidentally clear by rolling back
1045 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1046 int tid = id_to_int(curr->get_tid());
1049 if ((int)thrd_lists->size() <= tid)
1051 action_list_t *list = &(*thrd_lists)[tid];
1053 action_list_t::reverse_iterator rit = list->rbegin();
1054 /* Skip past curr */
1055 for (; (*rit) != curr; rit++)
1057 /* go past curr now */
1060 action_list_t::reverse_iterator ritcopy = rit;
1061 //See if we have enough reads from the same value
1063 for (; count < params.maxreads; rit++,count++) {
1064 if (rit==list->rend())
1066 ModelAction *act = *rit;
1067 if (!act->is_read())
1070 if (act->get_reads_from() != rf)
1072 if (act->get_node()->get_read_from_size() <= 1)
1075 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1077 const ModelAction * write = curr->get_node()->get_read_from_at(i);
1079 //Need a different write
1083 /* Test to see whether this is a feasible write to read from*/
1084 mo_graph->startChanges();
1085 r_modification_order(curr, write);
1086 bool feasiblereadfrom = isfeasible();
1087 mo_graph->rollbackChanges();
1089 if (!feasiblereadfrom)
1093 bool feasiblewrite = true;
1094 //new we need to see if this write works for everyone
1096 for (int loop = count; loop>0; loop--,rit++) {
1097 ModelAction *act=*rit;
1098 bool foundvalue = false;
1099 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1100 if (act->get_node()->get_read_from_at(j)==write) {
1106 feasiblewrite = false;
1110 if (feasiblewrite) {
1111 too_many_reads = true;
1119 * Updates the mo_graph with the constraints imposed from the current
1122 * Basic idea is the following: Go through each other thread and find
1123 * the lastest action that happened before our read. Two cases:
1125 * (1) The action is a write => that write must either occur before
1126 * the write we read from or be the write we read from.
1128 * (2) The action is a read => the write that that action read from
1129 * must occur before the write we read from or be the same write.
1131 * @param curr The current action. Must be a read.
1132 * @param rf The action that curr reads from. Must be a write.
1133 * @return True if modification order edges were added; false otherwise
1135 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1137 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1140 ASSERT(curr->is_read());
1142 /* Iterate over all threads */
1143 for (i = 0; i < thrd_lists->size(); i++) {
1144 /* Iterate over actions in thread, starting from most recent */
1145 action_list_t *list = &(*thrd_lists)[i];
1146 action_list_t::reverse_iterator rit;
1147 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1148 ModelAction *act = *rit;
1151 * Include at most one act per-thread that "happens
1152 * before" curr. Don't consider reflexively.
1154 if (act->happens_before(curr) && act != curr) {
1155 if (act->is_write()) {
1157 mo_graph->addEdge(act, rf);
1161 const ModelAction *prevreadfrom = act->get_reads_from();
1162 //if the previous read is unresolved, keep going...
1163 if (prevreadfrom == NULL)
1166 if (rf != prevreadfrom) {
1167 mo_graph->addEdge(prevreadfrom, rf);
1179 /** This method fixes up the modification order when we resolve a
1180 * promises. The basic problem is that actions that occur after the
1181 * read curr could not property add items to the modification order
1184 * So for each thread, we find the earliest item that happens after
1185 * the read curr. This is the item we have to fix up with additional
1186 * constraints. If that action is write, we add a MO edge between
1187 * the Action rf and that action. If the action is a read, we add a
1188 * MO edge between the Action rf, and whatever the read accessed.
1190 * @param curr is the read ModelAction that we are fixing up MO edges for.
1191 * @param rf is the write ModelAction that curr reads from.
1194 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1196 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1198 ASSERT(curr->is_read());
1200 /* Iterate over all threads */
1201 for (i = 0; i < thrd_lists->size(); i++) {
1202 /* Iterate over actions in thread, starting from most recent */
1203 action_list_t *list = &(*thrd_lists)[i];
1204 action_list_t::reverse_iterator rit;
1205 ModelAction *lastact = NULL;
1207 /* Find last action that happens after curr that is either not curr or a rmw */
1208 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1209 ModelAction *act = *rit;
1210 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1216 /* Include at most one act per-thread that "happens before" curr */
1217 if (lastact != NULL) {
1218 if (lastact==curr) {
1219 //Case 1: The resolved read is a RMW, and we need to make sure
1220 //that the write portion of the RMW mod order after rf
1222 mo_graph->addEdge(rf, lastact);
1223 } else if (lastact->is_read()) {
1224 //Case 2: The resolved read is a normal read and the next
1225 //operation is a read, and we need to make sure the value read
1226 //is mod ordered after rf
1228 const ModelAction *postreadfrom = lastact->get_reads_from();
1229 if (postreadfrom != NULL&&rf != postreadfrom)
1230 mo_graph->addEdge(rf, postreadfrom);
1232 //Case 3: The resolved read is a normal read and the next
1233 //operation is a write, and we need to make sure that the
1234 //write is mod ordered after rf
1236 mo_graph->addEdge(rf, lastact);
1244 * Updates the mo_graph with the constraints imposed from the current write.
1246 * Basic idea is the following: Go through each other thread and find
1247 * the lastest action that happened before our write. Two cases:
1249 * (1) The action is a write => that write must occur before
1252 * (2) The action is a read => the write that that action read from
1253 * must occur before the current write.
1255 * This method also handles two other issues:
1257 * (I) Sequential Consistency: Making sure that if the current write is
1258 * seq_cst, that it occurs after the previous seq_cst write.
1260 * (II) Sending the write back to non-synchronizing reads.
1262 * @param curr The current action. Must be a write.
1263 * @return True if modification order edges were added; false otherwise
1265 bool ModelChecker::w_modification_order(ModelAction *curr)
1267 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1270 ASSERT(curr->is_write());
1272 if (curr->is_seqcst()) {
1273 /* We have to at least see the last sequentially consistent write,
1274 so we are initialized. */
1275 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1276 if (last_seq_cst != NULL) {
1277 mo_graph->addEdge(last_seq_cst, curr);
1282 /* Iterate over all threads */
1283 for (i = 0; i < thrd_lists->size(); i++) {
1284 /* Iterate over actions in thread, starting from most recent */
1285 action_list_t *list = &(*thrd_lists)[i];
1286 action_list_t::reverse_iterator rit;
1287 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1288 ModelAction *act = *rit;
1291 * 1) If RMW and it actually read from something, then we
1292 * already have all relevant edges, so just skip to next
1295 * 2) If RMW and it didn't read from anything, we should
1296 * whatever edge we can get to speed up convergence.
1298 * 3) If normal write, we need to look at earlier actions, so
1299 * continue processing list.
1301 if (curr->is_rmw()) {
1302 if (curr->get_reads_from()!=NULL)
1311 * Include at most one act per-thread that "happens
1314 if (act->happens_before(curr)) {
1316 * Note: if act is RMW, just add edge:
1318 * The following edge should be handled elsewhere:
1319 * readfrom(act) --mo--> act
1321 if (act->is_write())
1322 mo_graph->addEdge(act, curr);
1323 else if (act->is_read()) {
1324 //if previous read accessed a null, just keep going
1325 if (act->get_reads_from() == NULL)
1327 mo_graph->addEdge(act->get_reads_from(), curr);
1331 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1332 !act->same_thread(curr)) {
1333 /* We have an action that:
1334 (1) did not happen before us
1335 (2) is a read and we are a write
1336 (3) cannot synchronize with us
1337 (4) is in a different thread
1339 that read could potentially read from our write. Note that
1340 these checks are overly conservative at this point, we'll
1341 do more checks before actually removing the
1345 if (thin_air_constraint_may_allow(curr, act)) {
1347 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1348 struct PendingFutureValue pfv = {curr,act};
1349 futurevalues->push_back(pfv);
1359 /** Arbitrary reads from the future are not allowed. Section 29.3
1360 * part 9 places some constraints. This method checks one result of constraint
1361 * constraint. Others require compiler support. */
1362 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1363 if (!writer->is_rmw())
1366 if (!reader->is_rmw())
1369 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1370 if (search == reader)
1372 if (search->get_tid() == reader->get_tid() &&
1373 search->happens_before(reader))
1381 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1382 * some constraints. This method checks one the following constraint (others
1383 * require compiler support):
1385 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1387 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1389 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(reader->get_location());
1391 /* Iterate over all threads */
1392 for (i = 0; i < thrd_lists->size(); i++) {
1393 const ModelAction *write_after_read = NULL;
1395 /* Iterate over actions in thread, starting from most recent */
1396 action_list_t *list = &(*thrd_lists)[i];
1397 action_list_t::reverse_iterator rit;
1398 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1399 ModelAction *act = *rit;
1401 if (!reader->happens_before(act))
1403 else if (act->is_write())
1404 write_after_read = act;
1405 else if (act->is_read()&&act->get_reads_from()!=NULL&&act!=reader) {
1406 write_after_read = act->get_reads_from();
1410 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1417 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1418 * The ModelAction under consideration is expected to be taking part in
1419 * release/acquire synchronization as an object of the "reads from" relation.
1420 * Note that this can only provide release sequence support for RMW chains
1421 * which do not read from the future, as those actions cannot be traced until
1422 * their "promise" is fulfilled. Similarly, we may not even establish the
1423 * presence of a release sequence with certainty, as some modification order
1424 * constraints may be decided further in the future. Thus, this function
1425 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1426 * and a boolean representing certainty.
1428 * @param rf The action that might be part of a release sequence. Must be a
1430 * @param release_heads A pass-by-reference style return parameter. After
1431 * execution of this function, release_heads will contain the heads of all the
1432 * relevant release sequences, if any exists with certainty
1433 * @param pending A pass-by-reference style return parameter which is only used
1434 * when returning false (i.e., uncertain). Returns most information regarding
1435 * an uncertain release sequence, including any write operations that might
1436 * break the sequence.
1437 * @return true, if the ModelChecker is certain that release_heads is complete;
1440 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1441 rel_heads_list_t *release_heads,
1442 struct release_seq *pending) const
1444 /* Only check for release sequences if there are no cycles */
1445 if (mo_graph->checkForCycles())
1449 ASSERT(rf->is_write());
1451 if (rf->is_release())
1452 release_heads->push_back(rf);
1454 break; /* End of RMW chain */
1456 /** @todo Need to be smarter here... In the linux lock
1457 * example, this will run to the beginning of the program for
1459 /** @todo The way to be smarter here is to keep going until 1
1460 * thread has a release preceded by an acquire and you've seen
1463 /* acq_rel RMW is a sufficient stopping condition */
1464 if (rf->is_acquire() && rf->is_release())
1465 return true; /* complete */
1467 rf = rf->get_reads_from();
1470 /* read from future: need to settle this later */
1472 return false; /* incomplete */
1475 if (rf->is_release())
1476 return true; /* complete */
1478 /* else relaxed write; check modification order for contiguous subsequence
1479 * -> rf must be same thread as release */
1480 int tid = id_to_int(rf->get_tid());
1481 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(rf->get_location());
1482 action_list_t *list = &(*thrd_lists)[tid];
1483 action_list_t::const_reverse_iterator rit;
1485 /* Find rf in the thread list */
1486 rit = std::find(list->rbegin(), list->rend(), rf);
1487 ASSERT(rit != list->rend());
1489 /* Find the last write/release */
1490 for (; rit != list->rend(); rit++)
1491 if ((*rit)->is_release())
1493 if (rit == list->rend()) {
1494 /* No write-release in this thread */
1495 return true; /* complete */
1497 ModelAction *release = *rit;
1499 ASSERT(rf->same_thread(release));
1501 pending->writes.clear();
1503 bool certain = true;
1504 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1505 if (id_to_int(rf->get_tid()) == (int)i)
1507 list = &(*thrd_lists)[i];
1509 /* Can we ensure no future writes from this thread may break
1510 * the release seq? */
1511 bool future_ordered = false;
1513 ModelAction *last = get_last_action(int_to_id(i));
1514 Thread *th = get_thread(int_to_id(i));
1515 if ((last && rf->happens_before(last)) ||
1516 !scheduler->is_enabled(th) ||
1518 future_ordered = true;
1520 ASSERT(!th->is_model_thread() || future_ordered);
1522 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1523 const ModelAction *act = *rit;
1524 /* Reach synchronization -> this thread is complete */
1525 if (act->happens_before(release))
1527 if (rf->happens_before(act)) {
1528 future_ordered = true;
1532 /* Only non-RMW writes can break release sequences */
1533 if (!act->is_write() || act->is_rmw())
1536 /* Check modification order */
1537 if (mo_graph->checkReachable(rf, act)) {
1538 /* rf --mo--> act */
1539 future_ordered = true;
1542 if (mo_graph->checkReachable(act, release))
1543 /* act --mo--> release */
1545 if (mo_graph->checkReachable(release, act) &&
1546 mo_graph->checkReachable(act, rf)) {
1547 /* release --mo-> act --mo--> rf */
1548 return true; /* complete */
1550 /* act may break release sequence */
1551 pending->writes.push_back(act);
1554 if (!future_ordered)
1555 certain = false; /* This thread is uncertain */
1559 release_heads->push_back(release);
1560 pending->writes.clear();
1562 pending->release = release;
1569 * A public interface for getting the release sequence head(s) with which a
1570 * given ModelAction must synchronize. This function only returns a non-empty
1571 * result when it can locate a release sequence head with certainty. Otherwise,
1572 * it may mark the internal state of the ModelChecker so that it will handle
1573 * the release sequence at a later time, causing @a act to update its
1574 * synchronization at some later point in execution.
1575 * @param act The 'acquire' action that may read from a release sequence
1576 * @param release_heads A pass-by-reference return parameter. Will be filled
1577 * with the head(s) of the release sequence(s), if they exists with certainty.
1578 * @see ModelChecker::release_seq_heads
1580 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1582 const ModelAction *rf = act->get_reads_from();
1583 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1584 sequence->acquire = act;
1586 if (!release_seq_heads(rf, release_heads, sequence)) {
1587 /* add act to 'lazy checking' list */
1588 pending_rel_seqs->push_back(sequence);
1590 snapshot_free(sequence);
1595 * Attempt to resolve all stashed operations that might synchronize with a
1596 * release sequence for a given location. This implements the "lazy" portion of
1597 * determining whether or not a release sequence was contiguous, since not all
1598 * modification order information is present at the time an action occurs.
1600 * @param location The location/object that should be checked for release
1601 * sequence resolutions. A NULL value means to check all locations.
1602 * @param work_queue The work queue to which to add work items as they are
1604 * @return True if any updates occurred (new synchronization, new mo_graph
1607 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1609 bool updated = false;
1610 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1611 while (it != pending_rel_seqs->end()) {
1612 struct release_seq *pending = *it;
1613 ModelAction *act = pending->acquire;
1615 /* Only resolve sequences on the given location, if provided */
1616 if (location && act->get_location() != location) {
1621 const ModelAction *rf = act->get_reads_from();
1622 rel_heads_list_t release_heads;
1624 complete = release_seq_heads(rf, &release_heads, pending);
1625 for (unsigned int i = 0; i < release_heads.size(); i++) {
1626 if (!act->has_synchronized_with(release_heads[i])) {
1627 if (act->synchronize_with(release_heads[i]))
1630 set_bad_synchronization();
1635 /* Re-check all pending release sequences */
1636 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1637 /* Re-check act for mo_graph edges */
1638 work_queue->push_back(MOEdgeWorkEntry(act));
1640 /* propagate synchronization to later actions */
1641 action_list_t::reverse_iterator rit = action_trace->rbegin();
1642 for (; (*rit) != act; rit++) {
1643 ModelAction *propagate = *rit;
1644 if (act->happens_before(propagate)) {
1645 propagate->synchronize_with(act);
1646 /* Re-check 'propagate' for mo_graph edges */
1647 work_queue->push_back(MOEdgeWorkEntry(propagate));
1652 it = pending_rel_seqs->erase(it);
1653 snapshot_free(pending);
1659 // If we resolved promises or data races, see if we have realized a data race.
1660 if (checkDataRaces()) {
1668 * Performs various bookkeeping operations for the current ModelAction. For
1669 * instance, adds action to the per-object, per-thread action vector and to the
1670 * action trace list of all thread actions.
1672 * @param act is the ModelAction to add.
1674 void ModelChecker::add_action_to_lists(ModelAction *act)
1676 int tid = id_to_int(act->get_tid());
1677 action_trace->push_back(act);
1679 obj_map->get_safe_ptr(act->get_location())->push_back(act);
1681 std::vector<action_list_t> *vec = obj_thrd_map->get_safe_ptr(act->get_location());
1682 if (tid >= (int)vec->size())
1683 vec->resize(priv->next_thread_id);
1684 (*vec)[tid].push_back(act);
1686 if ((int)thrd_last_action->size() <= tid)
1687 thrd_last_action->resize(get_num_threads());
1688 (*thrd_last_action)[tid] = act;
1690 if (act->is_wait()) {
1691 void *mutex_loc=(void *) act->get_value();
1692 obj_map->get_safe_ptr(mutex_loc)->push_back(act);
1694 std::vector<action_list_t> *vec = obj_thrd_map->get_safe_ptr(mutex_loc);
1695 if (tid >= (int)vec->size())
1696 vec->resize(priv->next_thread_id);
1697 (*vec)[tid].push_back(act);
1699 if ((int)thrd_last_action->size() <= tid)
1700 thrd_last_action->resize(get_num_threads());
1701 (*thrd_last_action)[tid] = act;
1706 * @brief Get the last action performed by a particular Thread
1707 * @param tid The thread ID of the Thread in question
1708 * @return The last action in the thread
1710 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1712 int threadid = id_to_int(tid);
1713 if (threadid < (int)thrd_last_action->size())
1714 return (*thrd_last_action)[id_to_int(tid)];
1720 * Gets the last memory_order_seq_cst write (in the total global sequence)
1721 * performed on a particular object (i.e., memory location), not including the
1723 * @param curr The current ModelAction; also denotes the object location to
1725 * @return The last seq_cst write
1727 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1729 void *location = curr->get_location();
1730 action_list_t *list = obj_map->get_safe_ptr(location);
1731 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1732 action_list_t::reverse_iterator rit;
1733 for (rit = list->rbegin(); rit != list->rend(); rit++)
1734 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1740 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1741 * location). This function identifies the mutex according to the current
1742 * action, which is presumed to perform on the same mutex.
1743 * @param curr The current ModelAction; also denotes the object location to
1745 * @return The last unlock operation
1747 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1749 void *location = curr->get_location();
1750 action_list_t *list = obj_map->get_safe_ptr(location);
1751 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1752 action_list_t::reverse_iterator rit;
1753 for (rit = list->rbegin(); rit != list->rend(); rit++)
1754 if ((*rit)->is_unlock() || (*rit)->is_wait())
1759 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1761 ModelAction *parent = get_last_action(tid);
1763 parent = get_thread(tid)->get_creation();
1768 * Returns the clock vector for a given thread.
1769 * @param tid The thread whose clock vector we want
1770 * @return Desired clock vector
1772 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1774 return get_parent_action(tid)->get_cv();
1778 * Resolve a set of Promises with a current write. The set is provided in the
1779 * Node corresponding to @a write.
1780 * @param write The ModelAction that is fulfilling Promises
1781 * @return True if promises were resolved; false otherwise
1783 bool ModelChecker::resolve_promises(ModelAction *write)
1785 bool resolved = false;
1786 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
1788 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1789 Promise *promise = (*promises)[promise_index];
1790 if (write->get_node()->get_promise(i)) {
1791 ModelAction *read = promise->get_action();
1792 if (read->is_rmw()) {
1793 mo_graph->addRMWEdge(write, read);
1795 read->read_from(write);
1796 //First fix up the modification order for actions that happened
1798 r_modification_order(read, write);
1799 //Next fix up the modification order for actions that happened
1801 post_r_modification_order(read, write);
1802 //Make sure the promise's value matches the write's value
1803 ASSERT(promise->get_value() == write->get_value());
1806 promises->erase(promises->begin() + promise_index);
1807 threads_to_check.push_back(read->get_tid());
1814 //Check whether reading these writes has made threads unable to
1817 for(unsigned int i=0;i<threads_to_check.size();i++)
1818 mo_check_promises(threads_to_check[i], write);
1824 * Compute the set of promises that could potentially be satisfied by this
1825 * action. Note that the set computation actually appears in the Node, not in
1827 * @param curr The ModelAction that may satisfy promises
1829 void ModelChecker::compute_promises(ModelAction *curr)
1831 for (unsigned int i = 0; i < promises->size(); i++) {
1832 Promise *promise = (*promises)[i];
1833 const ModelAction *act = promise->get_action();
1834 if (!act->happens_before(curr) &&
1836 !act->could_synchronize_with(curr) &&
1837 !act->same_thread(curr) &&
1838 act->get_location() == curr->get_location() &&
1839 promise->get_value() == curr->get_value()) {
1840 curr->get_node()->set_promise(i, act->is_rmw());
1845 /** Checks promises in response to change in ClockVector Threads. */
1846 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
1848 for (unsigned int i = 0; i < promises->size(); i++) {
1849 Promise *promise = (*promises)[i];
1850 const ModelAction *act = promise->get_action();
1851 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1852 merge_cv->synchronized_since(act)) {
1853 if (promise->increment_threads(tid)) {
1854 //Promise has failed
1855 failed_promise = true;
1862 void ModelChecker::check_promises_thread_disabled() {
1863 for (unsigned int i = 0; i < promises->size(); i++) {
1864 Promise *promise = (*promises)[i];
1865 if (promise->check_promise()) {
1866 failed_promise = true;
1872 /** Checks promises in response to addition to modification order for threads.
1874 * pthread is the thread that performed the read that created the promise
1876 * pread is the read that created the promise
1878 * pwrite is either the first write to same location as pread by
1879 * pthread that is sequenced after pread or the value read by the
1880 * first read to the same lcoation as pread by pthread that is
1881 * sequenced after pread..
1883 * 1. If tid=pthread, then we check what other threads are reachable
1884 * through the mode order starting with pwrite. Those threads cannot
1885 * perform a write that will resolve the promise due to modification
1886 * order constraints.
1888 * 2. If the tid is not pthread, we check whether pwrite can reach the
1889 * action write through the modification order. If so, that thread
1890 * cannot perform a future write that will resolve the promise due to
1891 * modificatin order constraints.
1893 * @parem tid The thread that either read from the model action
1894 * write, or actually did the model action write.
1896 * @parem write The ModelAction representing the relevant write.
1899 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
1900 void * location = write->get_location();
1901 for (unsigned int i = 0; i < promises->size(); i++) {
1902 Promise *promise = (*promises)[i];
1903 const ModelAction *act = promise->get_action();
1905 //Is this promise on the same location?
1906 if ( act->get_location() != location )
1909 //same thread as the promise
1910 if ( act->get_tid()==tid ) {
1912 //do we have a pwrite for the promise, if not, set it
1913 if (promise->get_write() == NULL ) {
1914 promise->set_write(write);
1915 //The pwrite cannot happen before the promise
1916 if (write->happens_before(act) && (write != act)) {
1917 failed_promise = true;
1921 if (mo_graph->checkPromise(write, promise)) {
1922 failed_promise = true;
1927 //Don't do any lookups twice for the same thread
1928 if (promise->has_sync_thread(tid))
1931 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
1932 if (promise->increment_threads(tid)) {
1933 failed_promise = true;
1941 * Compute the set of writes that may break the current pending release
1942 * sequence. This information is extracted from previou release sequence
1945 * @param curr The current ModelAction. Must be a release sequence fixup
1948 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
1950 if (pending_rel_seqs->empty())
1953 struct release_seq *pending = pending_rel_seqs->back();
1954 for (unsigned int i = 0; i < pending->writes.size(); i++) {
1955 const ModelAction *write = pending->writes[i];
1956 curr->get_node()->add_relseq_break(write);
1959 /* NULL means don't break the sequence; just synchronize */
1960 curr->get_node()->add_relseq_break(NULL);
1964 * Build up an initial set of all past writes that this 'read' action may read
1965 * from. This set is determined by the clock vector's "happens before"
1967 * @param curr is the current ModelAction that we are exploring; it must be a
1970 void ModelChecker::build_reads_from_past(ModelAction *curr)
1972 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1974 ASSERT(curr->is_read());
1976 ModelAction *last_seq_cst = NULL;
1978 /* Track whether this object has been initialized */
1979 bool initialized = false;
1981 if (curr->is_seqcst()) {
1982 last_seq_cst = get_last_seq_cst(curr);
1983 /* We have to at least see the last sequentially consistent write,
1984 so we are initialized. */
1985 if (last_seq_cst != NULL)
1989 /* Iterate over all threads */
1990 for (i = 0; i < thrd_lists->size(); i++) {
1991 /* Iterate over actions in thread, starting from most recent */
1992 action_list_t *list = &(*thrd_lists)[i];
1993 action_list_t::reverse_iterator rit;
1994 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1995 ModelAction *act = *rit;
1997 /* Only consider 'write' actions */
1998 if (!act->is_write() || act == curr)
2001 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2002 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
2003 DEBUG("Adding action to may_read_from:\n");
2004 if (DBG_ENABLED()) {
2009 if (curr->get_sleep_flag() && ! curr->is_seqcst()) {
2010 if (sleep_can_read_from(curr, act))
2011 curr->get_node()->add_read_from(act);
2013 curr->get_node()->add_read_from(act);
2016 /* Include at most one act per-thread that "happens before" curr */
2017 if (act->happens_before(curr)) {
2025 /** @todo Need a more informative way of reporting errors. */
2026 printf("ERROR: may read from uninitialized atomic\n");
2030 if (DBG_ENABLED() || !initialized) {
2031 printf("Reached read action:\n");
2033 printf("Printing may_read_from\n");
2034 curr->get_node()->print_may_read_from();
2035 printf("End printing may_read_from\n");
2039 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2041 Node *prevnode=write->get_node()->get_parent();
2043 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2044 if (write->is_release()&&thread_sleep)
2046 if (!write->is_rmw()) {
2049 if (write->get_reads_from()==NULL)
2051 write=write->get_reads_from();
2055 static void print_list(action_list_t *list)
2057 action_list_t::iterator it;
2059 printf("---------------------------------------------------------------------\n");
2061 unsigned int hash=0;
2063 for (it = list->begin(); it != list->end(); it++) {
2065 hash=hash^(hash<<3)^((*it)->hash());
2067 printf("HASH %u\n", hash);
2068 printf("---------------------------------------------------------------------\n");
2071 #if SUPPORT_MOD_ORDER_DUMP
2072 void ModelChecker::dumpGraph(char *filename) {
2074 sprintf(buffer, "%s.dot",filename);
2075 FILE *file=fopen(buffer, "w");
2076 fprintf(file, "digraph %s {\n",filename);
2077 mo_graph->dumpNodes(file);
2078 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2080 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2081 ModelAction *action=*it;
2082 if (action->is_read()) {
2083 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2084 if (action->get_reads_from()!=NULL)
2085 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2087 if (thread_array[action->get_tid()] != NULL) {
2088 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2091 thread_array[action->get_tid()]=action;
2093 fprintf(file,"}\n");
2094 model_free(thread_array);
2099 void ModelChecker::print_summary()
2102 printf("Number of executions: %d\n", num_executions);
2103 printf("Number of feasible executions: %d\n", num_feasible_executions);
2104 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
2106 #if SUPPORT_MOD_ORDER_DUMP
2108 char buffername[100];
2109 sprintf(buffername, "exec%04u", num_executions);
2110 mo_graph->dumpGraphToFile(buffername);
2111 sprintf(buffername, "graph%04u", num_executions);
2112 dumpGraph(buffername);
2115 if (!isfinalfeasible())
2116 printf("INFEASIBLE EXECUTION!\n");
2117 print_list(action_trace);
2122 * Add a Thread to the system for the first time. Should only be called once
2124 * @param t The Thread to add
2126 void ModelChecker::add_thread(Thread *t)
2128 thread_map->put(id_to_int(t->get_id()), t);
2129 scheduler->add_thread(t);
2133 * Removes a thread from the scheduler.
2134 * @param the thread to remove.
2136 void ModelChecker::remove_thread(Thread *t)
2138 scheduler->remove_thread(t);
2142 * @brief Get a Thread reference by its ID
2143 * @param tid The Thread's ID
2144 * @return A Thread reference
2146 Thread * ModelChecker::get_thread(thread_id_t tid) const
2148 return thread_map->get(id_to_int(tid));
2152 * @brief Get a reference to the Thread in which a ModelAction was executed
2153 * @param act The ModelAction
2154 * @return A Thread reference
2156 Thread * ModelChecker::get_thread(ModelAction *act) const
2158 return get_thread(act->get_tid());
2162 * Switch from a user-context to the "master thread" context (a.k.a. system
2163 * context). This switch is made with the intention of exploring a particular
2164 * model-checking action (described by a ModelAction object). Must be called
2165 * from a user-thread context.
2167 * @param act The current action that will be explored. May be NULL only if
2168 * trace is exiting via an assertion (see ModelChecker::set_assert and
2169 * ModelChecker::has_asserted).
2170 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2172 int ModelChecker::switch_to_master(ModelAction *act)
2175 Thread *old = thread_current();
2176 set_current_action(act);
2177 old->set_state(THREAD_READY);
2178 return Thread::swap(old, &system_context);
2182 * Takes the next step in the execution, if possible.
2183 * @return Returns true (success) if a step was taken and false otherwise.
2185 bool ModelChecker::take_step() {
2189 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2191 if (curr->get_state() == THREAD_READY) {
2192 ASSERT(priv->current_action);
2194 priv->nextThread = check_current_action(priv->current_action);
2195 priv->current_action = NULL;
2197 if (curr->is_blocked() || curr->is_complete())
2198 scheduler->remove_thread(curr);
2203 Thread *next = scheduler->next_thread(priv->nextThread);
2205 /* Infeasible -> don't take any more steps */
2209 if (params.bound != 0) {
2210 if (priv->used_sequence_numbers > params.bound) {
2215 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2216 next ? id_to_int(next->get_id()) : -1);
2219 * Launch end-of-execution release sequence fixups only when there are:
2221 * (1) no more user threads to run (or when execution replay chooses
2222 * the 'model_thread')
2223 * (2) pending release sequences
2224 * (3) pending assertions (i.e., data races)
2225 * (4) no pending promises
2227 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2228 isfinalfeasible() && !unrealizedraces.empty()) {
2229 printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2230 pending_rel_seqs->size());
2231 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2232 std::memory_order_seq_cst, NULL, VALUE_NONE,
2234 set_current_action(fixup);
2238 /* next == NULL -> don't take any more steps */
2242 next->set_state(THREAD_RUNNING);
2244 if (next->get_pending() != NULL) {
2245 /* restart a pending action */
2246 set_current_action(next->get_pending());
2247 next->set_pending(NULL);
2248 next->set_state(THREAD_READY);
2252 /* Return false only if swap fails with an error */
2253 return (Thread::swap(&system_context, next) == 0);
2256 /** Runs the current execution until threre are no more steps to take. */
2257 void ModelChecker::finish_execution() {
2260 while (take_step());