9 #include "snapshot-interface.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
15 #include "threads-model.h"
17 #define INITIAL_THREAD_ID 0
22 * Structure for holding small ModelChecker members that should be snapshotted
24 struct model_snapshot_members {
25 ModelAction *current_action;
26 unsigned int next_thread_id;
27 modelclock_t used_sequence_numbers;
29 ModelAction *next_backtrack;
32 /** @brief Constructor */
33 ModelChecker::ModelChecker(struct model_params params) :
34 /* Initialize default scheduler */
36 scheduler(new Scheduler()),
38 num_feasible_executions(0),
40 earliest_diverge(NULL),
41 action_trace(new action_list_t()),
42 thread_map(new HashTable<int, Thread *, int>()),
43 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
44 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
45 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
46 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
47 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
48 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
49 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
50 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
51 node_stack(new NodeStack()),
52 mo_graph(new CycleGraph()),
53 failed_promise(false),
54 too_many_reads(false),
56 bad_synchronization(false)
58 /* Allocate this "size" on the snapshotting heap */
59 priv = (struct model_snapshot_members *)snapshot_calloc(1, sizeof(*priv));
60 /* First thread created will have id INITIAL_THREAD_ID */
61 priv->next_thread_id = INITIAL_THREAD_ID;
63 /* Initialize a model-checker thread, for special ModelActions */
64 model_thread = new Thread(get_next_id());
65 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
68 /** @brief Destructor */
69 ModelChecker::~ModelChecker()
71 for (unsigned int i = 0; i < get_num_threads(); i++)
72 delete thread_map->get(i);
77 delete lock_waiters_map;
78 delete condvar_waiters_map;
81 for (unsigned int i = 0; i < promises->size(); i++)
82 delete (*promises)[i];
85 delete pending_rel_seqs;
87 delete thrd_last_action;
95 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
96 action_list_t * tmp=hash->get(ptr);
98 tmp=new action_list_t();
104 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
105 std::vector<action_list_t> * tmp=hash->get(ptr);
107 tmp=new std::vector<action_list_t>();
114 * Restores user program to initial state and resets all model-checker data
117 void ModelChecker::reset_to_initial_state()
119 DEBUG("+++ Resetting to initial state +++\n");
120 node_stack->reset_execution();
121 failed_promise = false;
122 too_many_reads = false;
123 bad_synchronization = false;
125 snapshotObject->backTrackBeforeStep(0);
128 /** @return a thread ID for a new Thread */
129 thread_id_t ModelChecker::get_next_id()
131 return priv->next_thread_id++;
134 /** @return the number of user threads created during this execution */
135 unsigned int ModelChecker::get_num_threads() const
137 return priv->next_thread_id;
140 /** @return The currently executing Thread. */
141 Thread * ModelChecker::get_current_thread()
143 return scheduler->get_current_thread();
146 /** @return a sequence number for a new ModelAction */
147 modelclock_t ModelChecker::get_next_seq_num()
149 return ++priv->used_sequence_numbers;
152 Node * ModelChecker::get_curr_node() {
153 return node_stack->get_head();
157 * @brief Choose the next thread to execute.
159 * This function chooses the next thread that should execute. It can force the
160 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
161 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
162 * The model-checker may have no preference regarding the next thread (i.e.,
163 * when exploring a new execution ordering), in which case this will return
165 * @param curr The current ModelAction. This action might guide the choice of
167 * @return The next thread to run. If the model-checker has no preference, NULL.
169 Thread * ModelChecker::get_next_thread(ModelAction *curr)
174 /* Do not split atomic actions. */
176 return thread_current();
177 /* The THREAD_CREATE action points to the created Thread */
178 else if (curr->get_type() == THREAD_CREATE)
179 return (Thread *)curr->get_location();
182 /* Have we completed exploring the preselected path? */
186 /* Else, we are trying to replay an execution */
187 ModelAction *next = node_stack->get_next()->get_action();
189 if (next == diverge) {
190 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
191 earliest_diverge=diverge;
193 Node *nextnode = next->get_node();
194 Node *prevnode = nextnode->get_parent();
195 scheduler->update_sleep_set(prevnode);
197 /* Reached divergence point */
198 if (nextnode->increment_misc()) {
199 /* The next node will try to satisfy a different misc_index values. */
200 tid = next->get_tid();
201 node_stack->pop_restofstack(2);
202 } else if (nextnode->increment_promise()) {
203 /* The next node will try to satisfy a different set of promises. */
204 tid = next->get_tid();
205 node_stack->pop_restofstack(2);
206 } else if (nextnode->increment_read_from()) {
207 /* The next node will read from a different value. */
208 tid = next->get_tid();
209 node_stack->pop_restofstack(2);
210 } else if (nextnode->increment_future_value()) {
211 /* The next node will try to read from a different future value. */
212 tid = next->get_tid();
213 node_stack->pop_restofstack(2);
214 } else if (nextnode->increment_relseq_break()) {
215 /* The next node will try to resolve a release sequence differently */
216 tid = next->get_tid();
217 node_stack->pop_restofstack(2);
219 /* Make a different thread execute for next step */
220 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
221 tid = prevnode->get_next_backtrack();
222 /* Make sure the backtracked thread isn't sleeping. */
223 node_stack->pop_restofstack(1);
224 if (diverge==earliest_diverge) {
225 earliest_diverge=prevnode->get_action();
228 /* The correct sleep set is in the parent node. */
231 DEBUG("*** Divergence point ***\n");
235 tid = next->get_tid();
237 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
238 ASSERT(tid != THREAD_ID_T_NONE);
239 return thread_map->get(id_to_int(tid));
243 * We need to know what the next actions of all threads in the sleep
244 * set will be. This method computes them and stores the actions at
245 * the corresponding thread object's pending action.
248 void ModelChecker::execute_sleep_set() {
249 for(unsigned int i=0;i<get_num_threads();i++) {
250 thread_id_t tid=int_to_id(i);
251 Thread *thr=get_thread(tid);
252 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
253 thr->get_pending() == NULL ) {
254 thr->set_state(THREAD_RUNNING);
255 scheduler->next_thread(thr);
256 Thread::swap(&system_context, thr);
257 priv->current_action->set_sleep_flag();
258 thr->set_pending(priv->current_action);
261 priv->current_action = NULL;
264 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
265 for(unsigned int i=0;i<get_num_threads();i++) {
266 thread_id_t tid=int_to_id(i);
267 Thread *thr=get_thread(tid);
268 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
269 ModelAction *pending_act=thr->get_pending();
270 if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
271 //Remove this thread from sleep set
272 scheduler->remove_sleep(thr);
279 * Check if we are in a deadlock. Should only be called at the end of an
280 * execution, although it should not give false positives in the middle of an
281 * execution (there should be some ENABLED thread).
283 * @return True if program is in a deadlock; false otherwise
285 bool ModelChecker::is_deadlocked() const
287 bool blocking_threads = false;
288 for (unsigned int i = 0; i < get_num_threads(); i++) {
289 thread_id_t tid = int_to_id(i);
292 Thread *t = get_thread(tid);
293 if (!t->is_model_thread() && t->get_pending())
294 blocking_threads = true;
296 return blocking_threads;
300 * Check if this is a complete execution. That is, have all thread completed
301 * execution (rather than exiting because sleep sets have forced a redundant
304 * @return True if the execution is complete.
306 bool ModelChecker::is_complete_execution() const
308 for (unsigned int i = 0; i < get_num_threads(); i++)
309 if (is_enabled(int_to_id(i)))
315 * Queries the model-checker for more executions to explore and, if one
316 * exists, resets the model-checker state to execute a new execution.
318 * @return If there are more executions to explore, return true. Otherwise,
321 bool ModelChecker::next_execution()
328 printf("ERROR: DEADLOCK\n");
329 if (isfinalfeasible()) {
330 printf("Earliest divergence point since last feasible execution:\n");
331 if (earliest_diverge)
332 earliest_diverge->print();
334 printf("(Not set)\n");
336 earliest_diverge = NULL;
337 num_feasible_executions++;
340 DEBUG("Number of acquires waiting on pending release sequences: %zu\n",
341 pending_rel_seqs->size());
344 if (isfinalfeasible() || DBG_ENABLED()) {
349 if ((diverge = get_next_backtrack()) == NULL)
353 printf("Next execution will diverge at:\n");
357 reset_to_initial_state();
361 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
363 switch (act->get_type()) {
367 /* linear search: from most recent to oldest */
368 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
369 action_list_t::reverse_iterator rit;
370 for (rit = list->rbegin(); rit != list->rend(); rit++) {
371 ModelAction *prev = *rit;
372 if (prev->could_synchronize_with(act))
378 case ATOMIC_TRYLOCK: {
379 /* linear search: from most recent to oldest */
380 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
381 action_list_t::reverse_iterator rit;
382 for (rit = list->rbegin(); rit != list->rend(); rit++) {
383 ModelAction *prev = *rit;
384 if (act->is_conflicting_lock(prev))
389 case ATOMIC_UNLOCK: {
390 /* linear search: from most recent to oldest */
391 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
392 action_list_t::reverse_iterator rit;
393 for (rit = list->rbegin(); rit != list->rend(); rit++) {
394 ModelAction *prev = *rit;
395 if (!act->same_thread(prev)&&prev->is_failed_trylock())
401 /* linear search: from most recent to oldest */
402 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
403 action_list_t::reverse_iterator rit;
404 for (rit = list->rbegin(); rit != list->rend(); rit++) {
405 ModelAction *prev = *rit;
406 if (!act->same_thread(prev)&&prev->is_failed_trylock())
408 if (!act->same_thread(prev)&&prev->is_notify())
414 case ATOMIC_NOTIFY_ALL:
415 case ATOMIC_NOTIFY_ONE: {
416 /* linear search: from most recent to oldest */
417 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
418 action_list_t::reverse_iterator rit;
419 for (rit = list->rbegin(); rit != list->rend(); rit++) {
420 ModelAction *prev = *rit;
421 if (!act->same_thread(prev)&&prev->is_wait())
432 /** This method finds backtracking points where we should try to
433 * reorder the parameter ModelAction against.
435 * @param the ModelAction to find backtracking points for.
437 void ModelChecker::set_backtracking(ModelAction *act)
439 Thread *t = get_thread(act);
440 ModelAction * prev = get_last_conflict(act);
444 Node * node = prev->get_node()->get_parent();
446 int low_tid, high_tid;
447 if (node->is_enabled(t)) {
448 low_tid = id_to_int(act->get_tid());
449 high_tid = low_tid+1;
452 high_tid = get_num_threads();
455 for(int i = low_tid; i < high_tid; i++) {
456 thread_id_t tid = int_to_id(i);
458 /* Make sure this thread can be enabled here. */
459 if (i >= node->get_num_threads())
462 /* Don't backtrack into a point where the thread is disabled or sleeping. */
463 if (node->enabled_status(tid)!=THREAD_ENABLED)
466 /* Check if this has been explored already */
467 if (node->has_been_explored(tid))
470 /* See if fairness allows */
471 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
473 for(int t=0;t<node->get_num_threads();t++) {
474 thread_id_t tother=int_to_id(t);
475 if (node->is_enabled(tother) && node->has_priority(tother)) {
483 /* Cache the latest backtracking point */
484 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
485 priv->next_backtrack = prev;
487 /* If this is a new backtracking point, mark the tree */
488 if (!node->set_backtrack(tid))
490 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
491 id_to_int(prev->get_tid()),
492 id_to_int(t->get_id()));
501 * Returns last backtracking point. The model checker will explore a different
502 * path for this point in the next execution.
503 * @return The ModelAction at which the next execution should diverge.
505 ModelAction * ModelChecker::get_next_backtrack()
507 ModelAction *next = priv->next_backtrack;
508 priv->next_backtrack = NULL;
513 * Processes a read or rmw model action.
514 * @param curr is the read model action to process.
515 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
516 * @return True if processing this read updates the mo_graph.
518 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
520 uint64_t value = VALUE_NONE;
521 bool updated = false;
523 const ModelAction *reads_from = curr->get_node()->get_read_from();
524 if (reads_from != NULL) {
525 mo_graph->startChanges();
527 value = reads_from->get_value();
528 bool r_status = false;
530 if (!second_part_of_rmw) {
531 check_recency(curr, reads_from);
532 r_status = r_modification_order(curr, reads_from);
536 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
537 mo_graph->rollbackChanges();
538 too_many_reads = false;
542 curr->read_from(reads_from);
543 mo_graph->commitChanges();
544 mo_check_promises(curr->get_tid(), reads_from);
547 } else if (!second_part_of_rmw) {
548 /* Read from future value */
549 value = curr->get_node()->get_future_value();
550 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
551 curr->read_from(NULL);
552 Promise *valuepromise = new Promise(curr, value, expiration);
553 promises->push_back(valuepromise);
555 get_thread(curr)->set_return_value(value);
561 * Processes a lock, trylock, or unlock model action. @param curr is
562 * the read model action to process.
564 * The try lock operation checks whether the lock is taken. If not,
565 * it falls to the normal lock operation case. If so, it returns
568 * The lock operation has already been checked that it is enabled, so
569 * it just grabs the lock and synchronizes with the previous unlock.
571 * The unlock operation has to re-enable all of the threads that are
572 * waiting on the lock.
574 * @return True if synchronization was updated; false otherwise
576 bool ModelChecker::process_mutex(ModelAction *curr) {
577 std::mutex *mutex=NULL;
578 struct std::mutex_state *state=NULL;
580 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
581 mutex = (std::mutex *)curr->get_location();
582 state = mutex->get_state();
583 } else if(curr->is_wait()) {
584 mutex = (std::mutex *)curr->get_value();
585 state = mutex->get_state();
588 switch (curr->get_type()) {
589 case ATOMIC_TRYLOCK: {
590 bool success = !state->islocked;
591 curr->set_try_lock(success);
593 get_thread(curr)->set_return_value(0);
596 get_thread(curr)->set_return_value(1);
598 //otherwise fall into the lock case
600 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
601 printf("Lock access before initialization\n");
604 state->islocked = true;
605 ModelAction *unlock = get_last_unlock(curr);
606 //synchronize with the previous unlock statement
607 if (unlock != NULL) {
608 curr->synchronize_with(unlock);
613 case ATOMIC_UNLOCK: {
615 state->islocked = false;
616 //wake up the other threads
617 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
618 //activate all the waiting threads
619 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
620 scheduler->wake(get_thread(*rit));
627 state->islocked = false;
628 //wake up the other threads
629 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
630 //activate all the waiting threads
631 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
632 scheduler->wake(get_thread(*rit));
635 //check whether we should go to sleep or not...simulate spurious failures
636 if (curr->get_node()->get_misc()==0) {
637 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
639 scheduler->sleep(get_current_thread());
643 case ATOMIC_NOTIFY_ALL: {
644 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
645 //activate all the waiting threads
646 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
647 scheduler->wake(get_thread(*rit));
652 case ATOMIC_NOTIFY_ONE: {
653 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
654 int wakeupthread=curr->get_node()->get_misc();
655 action_list_t::iterator it = waiters->begin();
656 advance(it, wakeupthread);
657 scheduler->wake(get_thread(*it));
669 * Process a write ModelAction
670 * @param curr The ModelAction to process
671 * @return True if the mo_graph was updated or promises were resolved
673 bool ModelChecker::process_write(ModelAction *curr)
675 bool updated_mod_order = w_modification_order(curr);
676 bool updated_promises = resolve_promises(curr);
678 if (promises->size() == 0) {
679 for (unsigned int i = 0; i < futurevalues->size(); i++) {
680 struct PendingFutureValue pfv = (*futurevalues)[i];
681 //Do more ambitious checks now that mo is more complete
682 if (mo_may_allow(pfv.writer, pfv.act)&&
683 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
684 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
685 priv->next_backtrack = pfv.act;
687 futurevalues->resize(0);
690 mo_graph->commitChanges();
691 mo_check_promises(curr->get_tid(), curr);
693 get_thread(curr)->set_return_value(VALUE_NONE);
694 return updated_mod_order || updated_promises;
698 * @brief Process the current action for thread-related activity
700 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
701 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
702 * synchronization, etc. This function is a no-op for non-THREAD actions
703 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
705 * @param curr The current action
706 * @return True if synchronization was updated or a thread completed
708 bool ModelChecker::process_thread_action(ModelAction *curr)
710 bool updated = false;
712 switch (curr->get_type()) {
713 case THREAD_CREATE: {
714 Thread *th = (Thread *)curr->get_location();
715 th->set_creation(curr);
719 Thread *blocking = (Thread *)curr->get_location();
720 ModelAction *act = get_last_action(blocking->get_id());
721 curr->synchronize_with(act);
722 updated = true; /* trigger rel-seq checks */
725 case THREAD_FINISH: {
726 Thread *th = get_thread(curr);
727 while (!th->wait_list_empty()) {
728 ModelAction *act = th->pop_wait_list();
729 scheduler->wake(get_thread(act));
732 updated = true; /* trigger rel-seq checks */
736 check_promises(curr->get_tid(), NULL, curr->get_cv());
747 * @brief Process the current action for release sequence fixup activity
749 * Performs model-checker release sequence fixups for the current action,
750 * forcing a single pending release sequence to break (with a given, potential
751 * "loose" write) or to complete (i.e., synchronize). If a pending release
752 * sequence forms a complete release sequence, then we must perform the fixup
753 * synchronization, mo_graph additions, etc.
755 * @param curr The current action; must be a release sequence fixup action
756 * @param work_queue The work queue to which to add work items as they are
759 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
761 const ModelAction *write = curr->get_node()->get_relseq_break();
762 struct release_seq *sequence = pending_rel_seqs->back();
763 pending_rel_seqs->pop_back();
765 ModelAction *acquire = sequence->acquire;
766 const ModelAction *rf = sequence->rf;
767 const ModelAction *release = sequence->release;
771 ASSERT(release->same_thread(rf));
775 * @todo Forcing a synchronization requires that we set
776 * modification order constraints. For instance, we can't allow
777 * a fixup sequence in which two separate read-acquire
778 * operations read from the same sequence, where the first one
779 * synchronizes and the other doesn't. Essentially, we can't
780 * allow any writes to insert themselves between 'release' and
784 /* Must synchronize */
785 if (!acquire->synchronize_with(release)) {
786 set_bad_synchronization();
789 /* Re-check all pending release sequences */
790 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
791 /* Re-check act for mo_graph edges */
792 work_queue->push_back(MOEdgeWorkEntry(acquire));
794 /* propagate synchronization to later actions */
795 action_list_t::reverse_iterator rit = action_trace->rbegin();
796 for (; (*rit) != acquire; rit++) {
797 ModelAction *propagate = *rit;
798 if (acquire->happens_before(propagate)) {
799 propagate->synchronize_with(acquire);
800 /* Re-check 'propagate' for mo_graph edges */
801 work_queue->push_back(MOEdgeWorkEntry(propagate));
805 /* Break release sequence with new edges:
806 * release --mo--> write --mo--> rf */
807 mo_graph->addEdge(release, write);
808 mo_graph->addEdge(write, rf);
811 /* See if we have realized a data race */
812 if (checkDataRaces())
817 * Initialize the current action by performing one or more of the following
818 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
819 * in the NodeStack, manipulating backtracking sets, allocating and
820 * initializing clock vectors, and computing the promises to fulfill.
822 * @param curr The current action, as passed from the user context; may be
823 * freed/invalidated after the execution of this function, with a different
824 * action "returned" its place (pass-by-reference)
825 * @return True if curr is a newly-explored action; false otherwise
827 bool ModelChecker::initialize_curr_action(ModelAction **curr)
829 ModelAction *newcurr;
831 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
832 newcurr = process_rmw(*curr);
835 if (newcurr->is_rmw())
836 compute_promises(newcurr);
842 (*curr)->set_seq_number(get_next_seq_num());
844 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled());
846 /* First restore type and order in case of RMW operation */
847 if ((*curr)->is_rmwr())
848 newcurr->copy_typeandorder(*curr);
850 ASSERT((*curr)->get_location() == newcurr->get_location());
851 newcurr->copy_from_new(*curr);
853 /* Discard duplicate ModelAction; use action from NodeStack */
856 /* Always compute new clock vector */
857 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
860 return false; /* Action was explored previously */
864 /* Always compute new clock vector */
865 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
867 * Perform one-time actions when pushing new ModelAction onto
870 if (newcurr->is_write())
871 compute_promises(newcurr);
872 else if (newcurr->is_relseq_fixup())
873 compute_relseq_breakwrites(newcurr);
874 else if (newcurr->is_wait())
875 newcurr->get_node()->set_misc_max(2);
876 else if (newcurr->is_notify_one()) {
877 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
879 return true; /* This was a new ModelAction */
884 * @brief Check whether a model action is enabled.
886 * Checks whether a lock or join operation would be successful (i.e., is the
887 * lock already locked, or is the joined thread already complete). If not, put
888 * the action in a waiter list.
890 * @param curr is the ModelAction to check whether it is enabled.
891 * @return a bool that indicates whether the action is enabled.
893 bool ModelChecker::check_action_enabled(ModelAction *curr) {
894 if (curr->is_lock()) {
895 std::mutex * lock = (std::mutex *)curr->get_location();
896 struct std::mutex_state * state = lock->get_state();
897 if (state->islocked) {
898 //Stick the action in the appropriate waiting queue
899 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
902 } else if (curr->get_type() == THREAD_JOIN) {
903 Thread *blocking = (Thread *)curr->get_location();
904 if (!blocking->is_complete()) {
905 blocking->push_wait_list(curr);
914 * Stores the ModelAction for the current thread action. Call this
915 * immediately before switching from user- to system-context to pass
917 * @param act The ModelAction created by the user-thread action
919 void ModelChecker::set_current_action(ModelAction *act) {
920 priv->current_action = act;
924 * This is the heart of the model checker routine. It performs model-checking
925 * actions corresponding to a given "current action." Among other processes, it
926 * calculates reads-from relationships, updates synchronization clock vectors,
927 * forms a memory_order constraints graph, and handles replay/backtrack
928 * execution when running permutations of previously-observed executions.
930 * @param curr The current action to process
931 * @return The next Thread that must be executed. May be NULL if ModelChecker
932 * makes no choice (e.g., according to replay execution, combining RMW actions,
935 Thread * ModelChecker::check_current_action(ModelAction *curr)
938 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
940 if (!check_action_enabled(curr)) {
941 /* Make the execution look like we chose to run this action
942 * much later, when a lock/join can succeed */
943 get_current_thread()->set_pending(curr);
944 scheduler->sleep(get_current_thread());
945 return get_next_thread(NULL);
948 bool newly_explored = initialize_curr_action(&curr);
950 wake_up_sleeping_actions(curr);
952 /* Add the action to lists before any other model-checking tasks */
953 if (!second_part_of_rmw)
954 add_action_to_lists(curr);
956 /* Build may_read_from set for newly-created actions */
957 if (newly_explored && curr->is_read())
958 build_reads_from_past(curr);
960 /* Initialize work_queue with the "current action" work */
961 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
962 while (!work_queue.empty()) {
963 WorkQueueEntry work = work_queue.front();
964 work_queue.pop_front();
967 case WORK_CHECK_CURR_ACTION: {
968 ModelAction *act = work.action;
969 bool update = false; /* update this location's release seq's */
970 bool update_all = false; /* update all release seq's */
972 if (process_thread_action(curr))
975 if (act->is_read() && process_read(act, second_part_of_rmw))
978 if (act->is_write() && process_write(act))
981 if (act->is_mutex_op() && process_mutex(act))
984 if (act->is_relseq_fixup())
985 process_relseq_fixup(curr, &work_queue);
988 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
990 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
993 case WORK_CHECK_RELEASE_SEQ:
994 resolve_release_sequences(work.location, &work_queue);
996 case WORK_CHECK_MO_EDGES: {
997 /** @todo Complete verification of work_queue */
998 ModelAction *act = work.action;
999 bool updated = false;
1001 if (act->is_read()) {
1002 const ModelAction *rf = act->get_reads_from();
1003 if (rf != NULL && r_modification_order(act, rf))
1006 if (act->is_write()) {
1007 if (w_modification_order(act))
1010 mo_graph->commitChanges();
1013 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1022 check_curr_backtracking(curr);
1023 set_backtracking(curr);
1024 return get_next_thread(curr);
1027 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
1028 Node *currnode = curr->get_node();
1029 Node *parnode = currnode->get_parent();
1031 if ((!parnode->backtrack_empty() ||
1032 !currnode->misc_empty() ||
1033 !currnode->read_from_empty() ||
1034 !currnode->future_value_empty() ||
1035 !currnode->promise_empty() ||
1036 !currnode->relseq_break_empty())
1037 && (!priv->next_backtrack ||
1038 *curr > *priv->next_backtrack)) {
1039 priv->next_backtrack = curr;
1043 bool ModelChecker::promises_expired() const
1045 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1046 Promise *promise = (*promises)[promise_index];
1047 if (promise->get_expiration()<priv->used_sequence_numbers) {
1054 /** @return whether the current partial trace must be a prefix of a
1055 * feasible trace. */
1056 bool ModelChecker::isfeasibleprefix() const
1058 return promises->size() == 0 && pending_rel_seqs->size() == 0 && isfeasible();
1061 /** @return whether the current partial trace is feasible. */
1062 bool ModelChecker::isfeasible() const
1064 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1065 DEBUG("Infeasible: RMW violation\n");
1067 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
1070 /** @return whether the current partial trace is feasible other than
1071 * multiple RMW reading from the same store. */
1072 bool ModelChecker::isfeasibleotherthanRMW() const
1074 if (DBG_ENABLED()) {
1075 if (mo_graph->checkForCycles())
1076 DEBUG("Infeasible: modification order cycles\n");
1078 DEBUG("Infeasible: failed promise\n");
1080 DEBUG("Infeasible: too many reads\n");
1081 if (bad_synchronization)
1082 DEBUG("Infeasible: bad synchronization ordering\n");
1083 if (promises_expired())
1084 DEBUG("Infeasible: promises expired\n");
1086 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
1089 /** Returns whether the current completed trace is feasible. */
1090 bool ModelChecker::isfinalfeasible() const
1092 if (DBG_ENABLED() && promises->size() != 0)
1093 DEBUG("Infeasible: unrevolved promises\n");
1095 return isfeasible() && promises->size() == 0;
1098 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1099 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1100 ModelAction *lastread = get_last_action(act->get_tid());
1101 lastread->process_rmw(act);
1102 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1103 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1104 mo_graph->commitChanges();
1110 * Checks whether a thread has read from the same write for too many times
1111 * without seeing the effects of a later write.
1114 * 1) there must a different write that we could read from that would satisfy the modification order,
1115 * 2) we must have read from the same value in excess of maxreads times, and
1116 * 3) that other write must have been in the reads_from set for maxreads times.
1118 * If so, we decide that the execution is no longer feasible.
1120 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1121 if (params.maxreads != 0) {
1123 if (curr->get_node()->get_read_from_size() <= 1)
1125 //Must make sure that execution is currently feasible... We could
1126 //accidentally clear by rolling back
1129 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1130 int tid = id_to_int(curr->get_tid());
1133 if ((int)thrd_lists->size() <= tid)
1135 action_list_t *list = &(*thrd_lists)[tid];
1137 action_list_t::reverse_iterator rit = list->rbegin();
1138 /* Skip past curr */
1139 for (; (*rit) != curr; rit++)
1141 /* go past curr now */
1144 action_list_t::reverse_iterator ritcopy = rit;
1145 //See if we have enough reads from the same value
1147 for (; count < params.maxreads; rit++,count++) {
1148 if (rit==list->rend())
1150 ModelAction *act = *rit;
1151 if (!act->is_read())
1154 if (act->get_reads_from() != rf)
1156 if (act->get_node()->get_read_from_size() <= 1)
1159 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1161 const ModelAction * write = curr->get_node()->get_read_from_at(i);
1163 //Need a different write
1167 /* Test to see whether this is a feasible write to read from*/
1168 mo_graph->startChanges();
1169 r_modification_order(curr, write);
1170 bool feasiblereadfrom = isfeasible();
1171 mo_graph->rollbackChanges();
1173 if (!feasiblereadfrom)
1177 bool feasiblewrite = true;
1178 //new we need to see if this write works for everyone
1180 for (int loop = count; loop>0; loop--,rit++) {
1181 ModelAction *act=*rit;
1182 bool foundvalue = false;
1183 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1184 if (act->get_node()->get_read_from_at(j)==write) {
1190 feasiblewrite = false;
1194 if (feasiblewrite) {
1195 too_many_reads = true;
1203 * Updates the mo_graph with the constraints imposed from the current
1206 * Basic idea is the following: Go through each other thread and find
1207 * the lastest action that happened before our read. Two cases:
1209 * (1) The action is a write => that write must either occur before
1210 * the write we read from or be the write we read from.
1212 * (2) The action is a read => the write that that action read from
1213 * must occur before the write we read from or be the same write.
1215 * @param curr The current action. Must be a read.
1216 * @param rf The action that curr reads from. Must be a write.
1217 * @return True if modification order edges were added; false otherwise
1219 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1221 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1224 ASSERT(curr->is_read());
1226 /* Iterate over all threads */
1227 for (i = 0; i < thrd_lists->size(); i++) {
1228 /* Iterate over actions in thread, starting from most recent */
1229 action_list_t *list = &(*thrd_lists)[i];
1230 action_list_t::reverse_iterator rit;
1231 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1232 ModelAction *act = *rit;
1235 * Include at most one act per-thread that "happens
1236 * before" curr. Don't consider reflexively.
1238 if (act->happens_before(curr) && act != curr) {
1239 if (act->is_write()) {
1241 mo_graph->addEdge(act, rf);
1245 const ModelAction *prevreadfrom = act->get_reads_from();
1246 //if the previous read is unresolved, keep going...
1247 if (prevreadfrom == NULL)
1250 if (rf != prevreadfrom) {
1251 mo_graph->addEdge(prevreadfrom, rf);
1263 /** This method fixes up the modification order when we resolve a
1264 * promises. The basic problem is that actions that occur after the
1265 * read curr could not property add items to the modification order
1268 * So for each thread, we find the earliest item that happens after
1269 * the read curr. This is the item we have to fix up with additional
1270 * constraints. If that action is write, we add a MO edge between
1271 * the Action rf and that action. If the action is a read, we add a
1272 * MO edge between the Action rf, and whatever the read accessed.
1274 * @param curr is the read ModelAction that we are fixing up MO edges for.
1275 * @param rf is the write ModelAction that curr reads from.
1278 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1280 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1282 ASSERT(curr->is_read());
1284 /* Iterate over all threads */
1285 for (i = 0; i < thrd_lists->size(); i++) {
1286 /* Iterate over actions in thread, starting from most recent */
1287 action_list_t *list = &(*thrd_lists)[i];
1288 action_list_t::reverse_iterator rit;
1289 ModelAction *lastact = NULL;
1291 /* Find last action that happens after curr that is either not curr or a rmw */
1292 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1293 ModelAction *act = *rit;
1294 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1300 /* Include at most one act per-thread that "happens before" curr */
1301 if (lastact != NULL) {
1302 if (lastact==curr) {
1303 //Case 1: The resolved read is a RMW, and we need to make sure
1304 //that the write portion of the RMW mod order after rf
1306 mo_graph->addEdge(rf, lastact);
1307 } else if (lastact->is_read()) {
1308 //Case 2: The resolved read is a normal read and the next
1309 //operation is a read, and we need to make sure the value read
1310 //is mod ordered after rf
1312 const ModelAction *postreadfrom = lastact->get_reads_from();
1313 if (postreadfrom != NULL&&rf != postreadfrom)
1314 mo_graph->addEdge(rf, postreadfrom);
1316 //Case 3: The resolved read is a normal read and the next
1317 //operation is a write, and we need to make sure that the
1318 //write is mod ordered after rf
1320 mo_graph->addEdge(rf, lastact);
1328 * Updates the mo_graph with the constraints imposed from the current write.
1330 * Basic idea is the following: Go through each other thread and find
1331 * the lastest action that happened before our write. Two cases:
1333 * (1) The action is a write => that write must occur before
1336 * (2) The action is a read => the write that that action read from
1337 * must occur before the current write.
1339 * This method also handles two other issues:
1341 * (I) Sequential Consistency: Making sure that if the current write is
1342 * seq_cst, that it occurs after the previous seq_cst write.
1344 * (II) Sending the write back to non-synchronizing reads.
1346 * @param curr The current action. Must be a write.
1347 * @return True if modification order edges were added; false otherwise
1349 bool ModelChecker::w_modification_order(ModelAction *curr)
1351 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1354 ASSERT(curr->is_write());
1356 if (curr->is_seqcst()) {
1357 /* We have to at least see the last sequentially consistent write,
1358 so we are initialized. */
1359 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1360 if (last_seq_cst != NULL) {
1361 mo_graph->addEdge(last_seq_cst, curr);
1366 /* Iterate over all threads */
1367 for (i = 0; i < thrd_lists->size(); i++) {
1368 /* Iterate over actions in thread, starting from most recent */
1369 action_list_t *list = &(*thrd_lists)[i];
1370 action_list_t::reverse_iterator rit;
1371 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1372 ModelAction *act = *rit;
1375 * 1) If RMW and it actually read from something, then we
1376 * already have all relevant edges, so just skip to next
1379 * 2) If RMW and it didn't read from anything, we should
1380 * whatever edge we can get to speed up convergence.
1382 * 3) If normal write, we need to look at earlier actions, so
1383 * continue processing list.
1385 if (curr->is_rmw()) {
1386 if (curr->get_reads_from()!=NULL)
1395 * Include at most one act per-thread that "happens
1398 if (act->happens_before(curr)) {
1400 * Note: if act is RMW, just add edge:
1402 * The following edge should be handled elsewhere:
1403 * readfrom(act) --mo--> act
1405 if (act->is_write())
1406 mo_graph->addEdge(act, curr);
1407 else if (act->is_read()) {
1408 //if previous read accessed a null, just keep going
1409 if (act->get_reads_from() == NULL)
1411 mo_graph->addEdge(act->get_reads_from(), curr);
1415 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1416 !act->same_thread(curr)) {
1417 /* We have an action that:
1418 (1) did not happen before us
1419 (2) is a read and we are a write
1420 (3) cannot synchronize with us
1421 (4) is in a different thread
1423 that read could potentially read from our write. Note that
1424 these checks are overly conservative at this point, we'll
1425 do more checks before actually removing the
1429 if (thin_air_constraint_may_allow(curr, act)) {
1431 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1432 struct PendingFutureValue pfv = {curr,act};
1433 futurevalues->push_back(pfv);
1443 /** Arbitrary reads from the future are not allowed. Section 29.3
1444 * part 9 places some constraints. This method checks one result of constraint
1445 * constraint. Others require compiler support. */
1446 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1447 if (!writer->is_rmw())
1450 if (!reader->is_rmw())
1453 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1454 if (search == reader)
1456 if (search->get_tid() == reader->get_tid() &&
1457 search->happens_before(reader))
1465 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1466 * some constraints. This method checks one the following constraint (others
1467 * require compiler support):
1469 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1471 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1473 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1475 /* Iterate over all threads */
1476 for (i = 0; i < thrd_lists->size(); i++) {
1477 const ModelAction *write_after_read = NULL;
1479 /* Iterate over actions in thread, starting from most recent */
1480 action_list_t *list = &(*thrd_lists)[i];
1481 action_list_t::reverse_iterator rit;
1482 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1483 ModelAction *act = *rit;
1485 if (!reader->happens_before(act))
1487 else if (act->is_write())
1488 write_after_read = act;
1489 else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1490 write_after_read = act->get_reads_from();
1494 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1501 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1502 * The ModelAction under consideration is expected to be taking part in
1503 * release/acquire synchronization as an object of the "reads from" relation.
1504 * Note that this can only provide release sequence support for RMW chains
1505 * which do not read from the future, as those actions cannot be traced until
1506 * their "promise" is fulfilled. Similarly, we may not even establish the
1507 * presence of a release sequence with certainty, as some modification order
1508 * constraints may be decided further in the future. Thus, this function
1509 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1510 * and a boolean representing certainty.
1512 * @param rf The action that might be part of a release sequence. Must be a
1514 * @param release_heads A pass-by-reference style return parameter. After
1515 * execution of this function, release_heads will contain the heads of all the
1516 * relevant release sequences, if any exists with certainty
1517 * @param pending A pass-by-reference style return parameter which is only used
1518 * when returning false (i.e., uncertain). Returns most information regarding
1519 * an uncertain release sequence, including any write operations that might
1520 * break the sequence.
1521 * @return true, if the ModelChecker is certain that release_heads is complete;
1524 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1525 rel_heads_list_t *release_heads,
1526 struct release_seq *pending) const
1528 /* Only check for release sequences if there are no cycles */
1529 if (mo_graph->checkForCycles())
1533 ASSERT(rf->is_write());
1535 if (rf->is_release())
1536 release_heads->push_back(rf);
1538 break; /* End of RMW chain */
1540 /** @todo Need to be smarter here... In the linux lock
1541 * example, this will run to the beginning of the program for
1543 /** @todo The way to be smarter here is to keep going until 1
1544 * thread has a release preceded by an acquire and you've seen
1547 /* acq_rel RMW is a sufficient stopping condition */
1548 if (rf->is_acquire() && rf->is_release())
1549 return true; /* complete */
1551 rf = rf->get_reads_from();
1554 /* read from future: need to settle this later */
1556 return false; /* incomplete */
1559 if (rf->is_release())
1560 return true; /* complete */
1562 /* else relaxed write; check modification order for contiguous subsequence
1563 * -> rf must be same thread as release */
1564 int tid = id_to_int(rf->get_tid());
1565 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1566 action_list_t *list = &(*thrd_lists)[tid];
1567 action_list_t::const_reverse_iterator rit;
1569 /* Find rf in the thread list */
1570 rit = std::find(list->rbegin(), list->rend(), rf);
1571 ASSERT(rit != list->rend());
1573 /* Find the last write/release */
1574 for (; rit != list->rend(); rit++)
1575 if ((*rit)->is_release())
1577 if (rit == list->rend()) {
1578 /* No write-release in this thread */
1579 return true; /* complete */
1581 ModelAction *release = *rit;
1583 ASSERT(rf->same_thread(release));
1585 pending->writes.clear();
1587 bool certain = true;
1588 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1589 if (id_to_int(rf->get_tid()) == (int)i)
1591 list = &(*thrd_lists)[i];
1593 /* Can we ensure no future writes from this thread may break
1594 * the release seq? */
1595 bool future_ordered = false;
1597 ModelAction *last = get_last_action(int_to_id(i));
1598 Thread *th = get_thread(int_to_id(i));
1599 if ((last && rf->happens_before(last)) ||
1602 future_ordered = true;
1604 ASSERT(!th->is_model_thread() || future_ordered);
1606 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1607 const ModelAction *act = *rit;
1608 /* Reach synchronization -> this thread is complete */
1609 if (act->happens_before(release))
1611 if (rf->happens_before(act)) {
1612 future_ordered = true;
1616 /* Only non-RMW writes can break release sequences */
1617 if (!act->is_write() || act->is_rmw())
1620 /* Check modification order */
1621 if (mo_graph->checkReachable(rf, act)) {
1622 /* rf --mo--> act */
1623 future_ordered = true;
1626 if (mo_graph->checkReachable(act, release))
1627 /* act --mo--> release */
1629 if (mo_graph->checkReachable(release, act) &&
1630 mo_graph->checkReachable(act, rf)) {
1631 /* release --mo-> act --mo--> rf */
1632 return true; /* complete */
1634 /* act may break release sequence */
1635 pending->writes.push_back(act);
1638 if (!future_ordered)
1639 certain = false; /* This thread is uncertain */
1643 release_heads->push_back(release);
1644 pending->writes.clear();
1646 pending->release = release;
1653 * A public interface for getting the release sequence head(s) with which a
1654 * given ModelAction must synchronize. This function only returns a non-empty
1655 * result when it can locate a release sequence head with certainty. Otherwise,
1656 * it may mark the internal state of the ModelChecker so that it will handle
1657 * the release sequence at a later time, causing @a act to update its
1658 * synchronization at some later point in execution.
1659 * @param act The 'acquire' action that may read from a release sequence
1660 * @param release_heads A pass-by-reference return parameter. Will be filled
1661 * with the head(s) of the release sequence(s), if they exists with certainty.
1662 * @see ModelChecker::release_seq_heads
1664 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1666 const ModelAction *rf = act->get_reads_from();
1667 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1668 sequence->acquire = act;
1670 if (!release_seq_heads(rf, release_heads, sequence)) {
1671 /* add act to 'lazy checking' list */
1672 pending_rel_seqs->push_back(sequence);
1674 snapshot_free(sequence);
1679 * Attempt to resolve all stashed operations that might synchronize with a
1680 * release sequence for a given location. This implements the "lazy" portion of
1681 * determining whether or not a release sequence was contiguous, since not all
1682 * modification order information is present at the time an action occurs.
1684 * @param location The location/object that should be checked for release
1685 * sequence resolutions. A NULL value means to check all locations.
1686 * @param work_queue The work queue to which to add work items as they are
1688 * @return True if any updates occurred (new synchronization, new mo_graph
1691 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1693 bool updated = false;
1694 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1695 while (it != pending_rel_seqs->end()) {
1696 struct release_seq *pending = *it;
1697 ModelAction *act = pending->acquire;
1699 /* Only resolve sequences on the given location, if provided */
1700 if (location && act->get_location() != location) {
1705 const ModelAction *rf = act->get_reads_from();
1706 rel_heads_list_t release_heads;
1708 complete = release_seq_heads(rf, &release_heads, pending);
1709 for (unsigned int i = 0; i < release_heads.size(); i++) {
1710 if (!act->has_synchronized_with(release_heads[i])) {
1711 if (act->synchronize_with(release_heads[i]))
1714 set_bad_synchronization();
1719 /* Re-check all pending release sequences */
1720 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1721 /* Re-check act for mo_graph edges */
1722 work_queue->push_back(MOEdgeWorkEntry(act));
1724 /* propagate synchronization to later actions */
1725 action_list_t::reverse_iterator rit = action_trace->rbegin();
1726 for (; (*rit) != act; rit++) {
1727 ModelAction *propagate = *rit;
1728 if (act->happens_before(propagate)) {
1729 propagate->synchronize_with(act);
1730 /* Re-check 'propagate' for mo_graph edges */
1731 work_queue->push_back(MOEdgeWorkEntry(propagate));
1736 it = pending_rel_seqs->erase(it);
1737 snapshot_free(pending);
1743 // If we resolved promises or data races, see if we have realized a data race.
1744 if (checkDataRaces()) {
1752 * Performs various bookkeeping operations for the current ModelAction. For
1753 * instance, adds action to the per-object, per-thread action vector and to the
1754 * action trace list of all thread actions.
1756 * @param act is the ModelAction to add.
1758 void ModelChecker::add_action_to_lists(ModelAction *act)
1760 int tid = id_to_int(act->get_tid());
1761 action_trace->push_back(act);
1763 get_safe_ptr_action(obj_map, act->get_location())->push_back(act);
1765 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
1766 if (tid >= (int)vec->size())
1767 vec->resize(priv->next_thread_id);
1768 (*vec)[tid].push_back(act);
1770 if ((int)thrd_last_action->size() <= tid)
1771 thrd_last_action->resize(get_num_threads());
1772 (*thrd_last_action)[tid] = act;
1774 if (act->is_wait()) {
1775 void *mutex_loc=(void *) act->get_value();
1776 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
1778 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
1779 if (tid >= (int)vec->size())
1780 vec->resize(priv->next_thread_id);
1781 (*vec)[tid].push_back(act);
1783 if ((int)thrd_last_action->size() <= tid)
1784 thrd_last_action->resize(get_num_threads());
1785 (*thrd_last_action)[tid] = act;
1790 * @brief Get the last action performed by a particular Thread
1791 * @param tid The thread ID of the Thread in question
1792 * @return The last action in the thread
1794 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1796 int threadid = id_to_int(tid);
1797 if (threadid < (int)thrd_last_action->size())
1798 return (*thrd_last_action)[id_to_int(tid)];
1804 * Gets the last memory_order_seq_cst write (in the total global sequence)
1805 * performed on a particular object (i.e., memory location), not including the
1807 * @param curr The current ModelAction; also denotes the object location to
1809 * @return The last seq_cst write
1811 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1813 void *location = curr->get_location();
1814 action_list_t *list = get_safe_ptr_action(obj_map, location);
1815 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1816 action_list_t::reverse_iterator rit;
1817 for (rit = list->rbegin(); rit != list->rend(); rit++)
1818 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1824 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1825 * location). This function identifies the mutex according to the current
1826 * action, which is presumed to perform on the same mutex.
1827 * @param curr The current ModelAction; also denotes the object location to
1829 * @return The last unlock operation
1831 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1833 void *location = curr->get_location();
1834 action_list_t *list = get_safe_ptr_action(obj_map, location);
1835 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1836 action_list_t::reverse_iterator rit;
1837 for (rit = list->rbegin(); rit != list->rend(); rit++)
1838 if ((*rit)->is_unlock() || (*rit)->is_wait())
1843 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1845 ModelAction *parent = get_last_action(tid);
1847 parent = get_thread(tid)->get_creation();
1852 * Returns the clock vector for a given thread.
1853 * @param tid The thread whose clock vector we want
1854 * @return Desired clock vector
1856 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1858 return get_parent_action(tid)->get_cv();
1862 * Resolve a set of Promises with a current write. The set is provided in the
1863 * Node corresponding to @a write.
1864 * @param write The ModelAction that is fulfilling Promises
1865 * @return True if promises were resolved; false otherwise
1867 bool ModelChecker::resolve_promises(ModelAction *write)
1869 bool resolved = false;
1870 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
1872 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1873 Promise *promise = (*promises)[promise_index];
1874 if (write->get_node()->get_promise(i)) {
1875 ModelAction *read = promise->get_action();
1876 if (read->is_rmw()) {
1877 mo_graph->addRMWEdge(write, read);
1879 read->read_from(write);
1880 //First fix up the modification order for actions that happened
1882 r_modification_order(read, write);
1883 //Next fix up the modification order for actions that happened
1885 post_r_modification_order(read, write);
1886 //Make sure the promise's value matches the write's value
1887 ASSERT(promise->get_value() == write->get_value());
1890 promises->erase(promises->begin() + promise_index);
1891 threads_to_check.push_back(read->get_tid());
1898 //Check whether reading these writes has made threads unable to
1901 for(unsigned int i=0;i<threads_to_check.size();i++)
1902 mo_check_promises(threads_to_check[i], write);
1908 * Compute the set of promises that could potentially be satisfied by this
1909 * action. Note that the set computation actually appears in the Node, not in
1911 * @param curr The ModelAction that may satisfy promises
1913 void ModelChecker::compute_promises(ModelAction *curr)
1915 for (unsigned int i = 0; i < promises->size(); i++) {
1916 Promise *promise = (*promises)[i];
1917 const ModelAction *act = promise->get_action();
1918 if (!act->happens_before(curr) &&
1920 !act->could_synchronize_with(curr) &&
1921 !act->same_thread(curr) &&
1922 act->get_location() == curr->get_location() &&
1923 promise->get_value() == curr->get_value()) {
1924 curr->get_node()->set_promise(i, act->is_rmw());
1929 /** Checks promises in response to change in ClockVector Threads. */
1930 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
1932 for (unsigned int i = 0; i < promises->size(); i++) {
1933 Promise *promise = (*promises)[i];
1934 const ModelAction *act = promise->get_action();
1935 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1936 merge_cv->synchronized_since(act)) {
1937 if (promise->increment_threads(tid)) {
1938 //Promise has failed
1939 failed_promise = true;
1946 void ModelChecker::check_promises_thread_disabled() {
1947 for (unsigned int i = 0; i < promises->size(); i++) {
1948 Promise *promise = (*promises)[i];
1949 if (promise->check_promise()) {
1950 failed_promise = true;
1956 /** Checks promises in response to addition to modification order for threads.
1958 * pthread is the thread that performed the read that created the promise
1960 * pread is the read that created the promise
1962 * pwrite is either the first write to same location as pread by
1963 * pthread that is sequenced after pread or the value read by the
1964 * first read to the same lcoation as pread by pthread that is
1965 * sequenced after pread..
1967 * 1. If tid=pthread, then we check what other threads are reachable
1968 * through the mode order starting with pwrite. Those threads cannot
1969 * perform a write that will resolve the promise due to modification
1970 * order constraints.
1972 * 2. If the tid is not pthread, we check whether pwrite can reach the
1973 * action write through the modification order. If so, that thread
1974 * cannot perform a future write that will resolve the promise due to
1975 * modificatin order constraints.
1977 * @parem tid The thread that either read from the model action
1978 * write, or actually did the model action write.
1980 * @parem write The ModelAction representing the relevant write.
1983 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
1984 void * location = write->get_location();
1985 for (unsigned int i = 0; i < promises->size(); i++) {
1986 Promise *promise = (*promises)[i];
1987 const ModelAction *act = promise->get_action();
1989 //Is this promise on the same location?
1990 if ( act->get_location() != location )
1993 //same thread as the promise
1994 if ( act->get_tid()==tid ) {
1996 //do we have a pwrite for the promise, if not, set it
1997 if (promise->get_write() == NULL ) {
1998 promise->set_write(write);
1999 //The pwrite cannot happen before the promise
2000 if (write->happens_before(act) && (write != act)) {
2001 failed_promise = true;
2005 if (mo_graph->checkPromise(write, promise)) {
2006 failed_promise = true;
2011 //Don't do any lookups twice for the same thread
2012 if (promise->has_sync_thread(tid))
2015 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2016 if (promise->increment_threads(tid)) {
2017 failed_promise = true;
2025 * Compute the set of writes that may break the current pending release
2026 * sequence. This information is extracted from previou release sequence
2029 * @param curr The current ModelAction. Must be a release sequence fixup
2032 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2034 if (pending_rel_seqs->empty())
2037 struct release_seq *pending = pending_rel_seqs->back();
2038 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2039 const ModelAction *write = pending->writes[i];
2040 curr->get_node()->add_relseq_break(write);
2043 /* NULL means don't break the sequence; just synchronize */
2044 curr->get_node()->add_relseq_break(NULL);
2048 * Build up an initial set of all past writes that this 'read' action may read
2049 * from. This set is determined by the clock vector's "happens before"
2051 * @param curr is the current ModelAction that we are exploring; it must be a
2054 void ModelChecker::build_reads_from_past(ModelAction *curr)
2056 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2058 ASSERT(curr->is_read());
2060 ModelAction *last_seq_cst = NULL;
2062 /* Track whether this object has been initialized */
2063 bool initialized = false;
2065 if (curr->is_seqcst()) {
2066 last_seq_cst = get_last_seq_cst(curr);
2067 /* We have to at least see the last sequentially consistent write,
2068 so we are initialized. */
2069 if (last_seq_cst != NULL)
2073 /* Iterate over all threads */
2074 for (i = 0; i < thrd_lists->size(); i++) {
2075 /* Iterate over actions in thread, starting from most recent */
2076 action_list_t *list = &(*thrd_lists)[i];
2077 action_list_t::reverse_iterator rit;
2078 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2079 ModelAction *act = *rit;
2081 /* Only consider 'write' actions */
2082 if (!act->is_write() || act == curr)
2085 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2086 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
2087 if (!curr->get_sleep_flag() || curr->is_seqcst() || sleep_can_read_from(curr, act)) {
2088 DEBUG("Adding action to may_read_from:\n");
2089 if (DBG_ENABLED()) {
2093 curr->get_node()->add_read_from(act);
2097 /* Include at most one act per-thread that "happens before" curr */
2098 if (act->happens_before(curr)) {
2106 /** @todo Need a more informative way of reporting errors. */
2107 printf("ERROR: may read from uninitialized atomic\n");
2111 if (DBG_ENABLED() || !initialized) {
2112 printf("Reached read action:\n");
2114 printf("Printing may_read_from\n");
2115 curr->get_node()->print_may_read_from();
2116 printf("End printing may_read_from\n");
2120 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2122 Node *prevnode=write->get_node()->get_parent();
2124 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2125 if (write->is_release()&&thread_sleep)
2127 if (!write->is_rmw()) {
2130 if (write->get_reads_from()==NULL)
2132 write=write->get_reads_from();
2136 static void print_list(action_list_t *list)
2138 action_list_t::iterator it;
2140 printf("---------------------------------------------------------------------\n");
2142 unsigned int hash=0;
2144 for (it = list->begin(); it != list->end(); it++) {
2146 hash=hash^(hash<<3)^((*it)->hash());
2148 printf("HASH %u\n", hash);
2149 printf("---------------------------------------------------------------------\n");
2152 #if SUPPORT_MOD_ORDER_DUMP
2153 void ModelChecker::dumpGraph(char *filename) {
2155 sprintf(buffer, "%s.dot",filename);
2156 FILE *file=fopen(buffer, "w");
2157 fprintf(file, "digraph %s {\n",filename);
2158 mo_graph->dumpNodes(file);
2159 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2161 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2162 ModelAction *action=*it;
2163 if (action->is_read()) {
2164 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2165 if (action->get_reads_from()!=NULL)
2166 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2168 if (thread_array[action->get_tid()] != NULL) {
2169 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2172 thread_array[action->get_tid()]=action;
2174 fprintf(file,"}\n");
2175 model_free(thread_array);
2180 void ModelChecker::print_summary()
2183 printf("Number of executions: %d\n", num_executions);
2184 printf("Number of feasible executions: %d\n", num_feasible_executions);
2185 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
2187 #if SUPPORT_MOD_ORDER_DUMP
2189 char buffername[100];
2190 sprintf(buffername, "exec%04u", num_executions);
2191 mo_graph->dumpGraphToFile(buffername);
2192 sprintf(buffername, "graph%04u", num_executions);
2193 dumpGraph(buffername);
2196 if (!isfinalfeasible())
2197 printf("INFEASIBLE EXECUTION!\n");
2198 print_list(action_trace);
2203 * Add a Thread to the system for the first time. Should only be called once
2205 * @param t The Thread to add
2207 void ModelChecker::add_thread(Thread *t)
2209 thread_map->put(id_to_int(t->get_id()), t);
2210 scheduler->add_thread(t);
2214 * Removes a thread from the scheduler.
2215 * @param the thread to remove.
2217 void ModelChecker::remove_thread(Thread *t)
2219 scheduler->remove_thread(t);
2223 * @brief Get a Thread reference by its ID
2224 * @param tid The Thread's ID
2225 * @return A Thread reference
2227 Thread * ModelChecker::get_thread(thread_id_t tid) const
2229 return thread_map->get(id_to_int(tid));
2233 * @brief Get a reference to the Thread in which a ModelAction was executed
2234 * @param act The ModelAction
2235 * @return A Thread reference
2237 Thread * ModelChecker::get_thread(ModelAction *act) const
2239 return get_thread(act->get_tid());
2243 * @brief Check if a Thread is currently enabled
2244 * @param t The Thread to check
2245 * @return True if the Thread is currently enabled
2247 bool ModelChecker::is_enabled(Thread *t) const
2249 return scheduler->is_enabled(t);
2253 * @brief Check if a Thread is currently enabled
2254 * @param tid The ID of the Thread to check
2255 * @return True if the Thread is currently enabled
2257 bool ModelChecker::is_enabled(thread_id_t tid) const
2259 return scheduler->is_enabled(tid);
2263 * Switch from a user-context to the "master thread" context (a.k.a. system
2264 * context). This switch is made with the intention of exploring a particular
2265 * model-checking action (described by a ModelAction object). Must be called
2266 * from a user-thread context.
2268 * @param act The current action that will be explored. May be NULL only if
2269 * trace is exiting via an assertion (see ModelChecker::set_assert and
2270 * ModelChecker::has_asserted).
2271 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2273 int ModelChecker::switch_to_master(ModelAction *act)
2276 Thread *old = thread_current();
2277 set_current_action(act);
2278 old->set_state(THREAD_READY);
2279 return Thread::swap(old, &system_context);
2283 * Takes the next step in the execution, if possible.
2284 * @return Returns true (success) if a step was taken and false otherwise.
2286 bool ModelChecker::take_step() {
2290 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2292 if (curr->get_state() == THREAD_READY) {
2293 ASSERT(priv->current_action);
2295 priv->nextThread = check_current_action(priv->current_action);
2296 priv->current_action = NULL;
2298 if (curr->is_blocked() || curr->is_complete())
2299 scheduler->remove_thread(curr);
2304 Thread *next = scheduler->next_thread(priv->nextThread);
2306 /* Infeasible -> don't take any more steps */
2310 if (params.bound != 0) {
2311 if (priv->used_sequence_numbers > params.bound) {
2316 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2317 next ? id_to_int(next->get_id()) : -1);
2320 * Launch end-of-execution release sequence fixups only when there are:
2322 * (1) no more user threads to run (or when execution replay chooses
2323 * the 'model_thread')
2324 * (2) pending release sequences
2325 * (3) pending assertions (i.e., data races)
2326 * (4) no pending promises
2328 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2329 isfinalfeasible() && !unrealizedraces.empty()) {
2330 printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2331 pending_rel_seqs->size());
2332 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2333 std::memory_order_seq_cst, NULL, VALUE_NONE,
2335 set_current_action(fixup);
2339 /* next == NULL -> don't take any more steps */
2343 next->set_state(THREAD_RUNNING);
2345 if (next->get_pending() != NULL) {
2346 /* restart a pending action */
2347 set_current_action(next->get_pending());
2348 next->set_pending(NULL);
2349 next->set_state(THREAD_READY);
2353 /* Return false only if swap fails with an error */
2354 return (Thread::swap(&system_context, next) == 0);
2357 /** Runs the current execution until threre are no more steps to take. */
2358 void ModelChecker::finish_execution() {
2361 while (take_step());