9 #include "snapshot-interface.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
15 #include "threads-model.h"
17 #define INITIAL_THREAD_ID 0
22 * Structure for holding small ModelChecker members that should be snapshotted
24 struct model_snapshot_members {
25 ModelAction *current_action;
26 unsigned int next_thread_id;
27 modelclock_t used_sequence_numbers;
29 ModelAction *next_backtrack;
32 /** @brief Constructor */
33 ModelChecker::ModelChecker(struct model_params params) :
34 /* Initialize default scheduler */
36 scheduler(new Scheduler()),
38 num_feasible_executions(0),
40 earliest_diverge(NULL),
41 action_trace(new action_list_t()),
42 thread_map(new HashTable<int, Thread *, int>()),
43 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
44 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
45 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
46 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
47 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
48 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
49 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
50 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
51 node_stack(new NodeStack()),
52 mo_graph(new CycleGraph()),
53 failed_promise(false),
54 too_many_reads(false),
56 bad_synchronization(false)
58 /* Allocate this "size" on the snapshotting heap */
59 priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
60 /* First thread created will have id INITIAL_THREAD_ID */
61 priv->next_thread_id = INITIAL_THREAD_ID;
63 /* Initialize a model-checker thread, for special ModelActions */
64 model_thread = new Thread(get_next_id());
65 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
68 /** @brief Destructor */
69 ModelChecker::~ModelChecker()
71 for (unsigned int i = 0; i < get_num_threads(); i++)
72 delete thread_map->get(i);
77 delete lock_waiters_map;
78 delete condvar_waiters_map;
81 for (unsigned int i = 0; i < promises->size(); i++)
82 delete (*promises)[i];
85 delete pending_rel_seqs;
87 delete thrd_last_action;
93 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
94 action_list_t * tmp=hash->get(ptr);
96 tmp=new action_list_t();
102 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
103 std::vector<action_list_t> * tmp=hash->get(ptr);
105 tmp=new std::vector<action_list_t>();
112 * Restores user program to initial state and resets all model-checker data
115 void ModelChecker::reset_to_initial_state()
117 DEBUG("+++ Resetting to initial state +++\n");
118 node_stack->reset_execution();
119 failed_promise = false;
120 too_many_reads = false;
121 bad_synchronization = false;
123 snapshotObject->backTrackBeforeStep(0);
126 /** @return a thread ID for a new Thread */
127 thread_id_t ModelChecker::get_next_id()
129 return priv->next_thread_id++;
132 /** @return the number of user threads created during this execution */
133 unsigned int ModelChecker::get_num_threads() const
135 return priv->next_thread_id;
138 /** @return The currently executing Thread. */
139 Thread * ModelChecker::get_current_thread()
141 return scheduler->get_current_thread();
144 /** @return a sequence number for a new ModelAction */
145 modelclock_t ModelChecker::get_next_seq_num()
147 return ++priv->used_sequence_numbers;
150 Node * ModelChecker::get_curr_node() {
151 return node_stack->get_head();
155 * @brief Choose the next thread to execute.
157 * This function chooses the next thread that should execute. It can force the
158 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
159 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
160 * The model-checker may have no preference regarding the next thread (i.e.,
161 * when exploring a new execution ordering), in which case this will return
163 * @param curr The current ModelAction. This action might guide the choice of
165 * @return The next thread to run. If the model-checker has no preference, NULL.
167 Thread * ModelChecker::get_next_thread(ModelAction *curr)
172 /* Do not split atomic actions. */
174 return thread_current();
175 /* The THREAD_CREATE action points to the created Thread */
176 else if (curr->get_type() == THREAD_CREATE)
177 return (Thread *)curr->get_location();
180 /* Have we completed exploring the preselected path? */
184 /* Else, we are trying to replay an execution */
185 ModelAction *next = node_stack->get_next()->get_action();
187 if (next == diverge) {
188 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
189 earliest_diverge=diverge;
191 Node *nextnode = next->get_node();
192 Node *prevnode = nextnode->get_parent();
193 scheduler->update_sleep_set(prevnode);
195 /* Reached divergence point */
196 if (nextnode->increment_misc()) {
197 /* The next node will try to satisfy a different misc_index values. */
198 tid = next->get_tid();
199 node_stack->pop_restofstack(2);
200 } else if (nextnode->increment_promise()) {
201 /* The next node will try to satisfy a different set of promises. */
202 tid = next->get_tid();
203 node_stack->pop_restofstack(2);
204 } else if (nextnode->increment_read_from()) {
205 /* The next node will read from a different value. */
206 tid = next->get_tid();
207 node_stack->pop_restofstack(2);
208 } else if (nextnode->increment_future_value()) {
209 /* The next node will try to read from a different future value. */
210 tid = next->get_tid();
211 node_stack->pop_restofstack(2);
212 } else if (nextnode->increment_relseq_break()) {
213 /* The next node will try to resolve a release sequence differently */
214 tid = next->get_tid();
215 node_stack->pop_restofstack(2);
217 /* Make a different thread execute for next step */
218 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
219 tid = prevnode->get_next_backtrack();
220 /* Make sure the backtracked thread isn't sleeping. */
221 node_stack->pop_restofstack(1);
222 if (diverge==earliest_diverge) {
223 earliest_diverge=prevnode->get_action();
226 /* The correct sleep set is in the parent node. */
229 DEBUG("*** Divergence point ***\n");
233 tid = next->get_tid();
235 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
236 ASSERT(tid != THREAD_ID_T_NONE);
237 return thread_map->get(id_to_int(tid));
241 * We need to know what the next actions of all threads in the sleep
242 * set will be. This method computes them and stores the actions at
243 * the corresponding thread object's pending action.
246 void ModelChecker::execute_sleep_set() {
247 for(unsigned int i=0;i<get_num_threads();i++) {
248 thread_id_t tid=int_to_id(i);
249 Thread *thr=get_thread(tid);
250 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
251 thr->get_pending() == NULL ) {
252 thr->set_state(THREAD_RUNNING);
253 scheduler->next_thread(thr);
254 Thread::swap(&system_context, thr);
255 priv->current_action->set_sleep_flag();
256 thr->set_pending(priv->current_action);
259 priv->current_action = NULL;
262 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
263 for(unsigned int i=0;i<get_num_threads();i++) {
264 thread_id_t tid=int_to_id(i);
265 Thread *thr=get_thread(tid);
266 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
267 ModelAction *pending_act=thr->get_pending();
268 if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
269 //Remove this thread from sleep set
270 scheduler->remove_sleep(thr);
277 * Check if we are in a deadlock. Should only be called at the end of an
278 * execution, although it should not give false positives in the middle of an
279 * execution (there should be some ENABLED thread).
281 * @return True if program is in a deadlock; false otherwise
283 bool ModelChecker::is_deadlocked() const
285 bool blocking_threads = false;
286 for (unsigned int i = 0; i < get_num_threads(); i++) {
287 thread_id_t tid = int_to_id(i);
290 Thread *t = get_thread(tid);
291 if (!t->is_model_thread() && t->get_pending())
292 blocking_threads = true;
294 return blocking_threads;
298 * Check if this is a complete execution. That is, have all thread completed
299 * execution (rather than exiting because sleep sets have forced a redundant
302 * @return True if the execution is complete.
304 bool ModelChecker::is_complete_execution() const
306 for (unsigned int i = 0; i < get_num_threads(); i++)
307 if (is_enabled(int_to_id(i)))
313 * Queries the model-checker for more executions to explore and, if one
314 * exists, resets the model-checker state to execute a new execution.
316 * @return If there are more executions to explore, return true. Otherwise,
319 bool ModelChecker::next_execution()
326 printf("ERROR: DEADLOCK\n");
327 if (isfinalfeasible()) {
328 printf("Earliest divergence point since last feasible execution:\n");
329 if (earliest_diverge)
330 earliest_diverge->print();
332 printf("(Not set)\n");
334 earliest_diverge = NULL;
335 num_feasible_executions++;
338 DEBUG("Number of acquires waiting on pending release sequences: %zu\n",
339 pending_rel_seqs->size());
342 if (isfinalfeasible() || DBG_ENABLED()) {
347 if ((diverge = get_next_backtrack()) == NULL)
351 printf("Next execution will diverge at:\n");
355 reset_to_initial_state();
359 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
361 switch (act->get_type()) {
365 /* linear search: from most recent to oldest */
366 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
367 action_list_t::reverse_iterator rit;
368 for (rit = list->rbegin(); rit != list->rend(); rit++) {
369 ModelAction *prev = *rit;
370 if (prev->could_synchronize_with(act))
376 case ATOMIC_TRYLOCK: {
377 /* linear search: from most recent to oldest */
378 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
379 action_list_t::reverse_iterator rit;
380 for (rit = list->rbegin(); rit != list->rend(); rit++) {
381 ModelAction *prev = *rit;
382 if (act->is_conflicting_lock(prev))
387 case ATOMIC_UNLOCK: {
388 /* linear search: from most recent to oldest */
389 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
390 action_list_t::reverse_iterator rit;
391 for (rit = list->rbegin(); rit != list->rend(); rit++) {
392 ModelAction *prev = *rit;
393 if (!act->same_thread(prev)&&prev->is_failed_trylock())
399 /* linear search: from most recent to oldest */
400 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
401 action_list_t::reverse_iterator rit;
402 for (rit = list->rbegin(); rit != list->rend(); rit++) {
403 ModelAction *prev = *rit;
404 if (!act->same_thread(prev)&&prev->is_failed_trylock())
406 if (!act->same_thread(prev)&&prev->is_notify())
412 case ATOMIC_NOTIFY_ALL:
413 case ATOMIC_NOTIFY_ONE: {
414 /* linear search: from most recent to oldest */
415 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
416 action_list_t::reverse_iterator rit;
417 for (rit = list->rbegin(); rit != list->rend(); rit++) {
418 ModelAction *prev = *rit;
419 if (!act->same_thread(prev)&&prev->is_wait())
430 /** This method finds backtracking points where we should try to
431 * reorder the parameter ModelAction against.
433 * @param the ModelAction to find backtracking points for.
435 void ModelChecker::set_backtracking(ModelAction *act)
437 Thread *t = get_thread(act);
438 ModelAction * prev = get_last_conflict(act);
442 Node * node = prev->get_node()->get_parent();
444 int low_tid, high_tid;
445 if (node->is_enabled(t)) {
446 low_tid = id_to_int(act->get_tid());
447 high_tid = low_tid+1;
450 high_tid = get_num_threads();
453 for(int i = low_tid; i < high_tid; i++) {
454 thread_id_t tid = int_to_id(i);
456 /* Make sure this thread can be enabled here. */
457 if (i >= node->get_num_threads())
460 /* Don't backtrack into a point where the thread is disabled or sleeping. */
461 if (node->enabled_status(tid)!=THREAD_ENABLED)
464 /* Check if this has been explored already */
465 if (node->has_been_explored(tid))
468 /* See if fairness allows */
469 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
471 for(int t=0;t<node->get_num_threads();t++) {
472 thread_id_t tother=int_to_id(t);
473 if (node->is_enabled(tother) && node->has_priority(tother)) {
481 /* Cache the latest backtracking point */
482 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
483 priv->next_backtrack = prev;
485 /* If this is a new backtracking point, mark the tree */
486 if (!node->set_backtrack(tid))
488 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
489 id_to_int(prev->get_tid()),
490 id_to_int(t->get_id()));
499 * Returns last backtracking point. The model checker will explore a different
500 * path for this point in the next execution.
501 * @return The ModelAction at which the next execution should diverge.
503 ModelAction * ModelChecker::get_next_backtrack()
505 ModelAction *next = priv->next_backtrack;
506 priv->next_backtrack = NULL;
511 * Processes a read or rmw model action.
512 * @param curr is the read model action to process.
513 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
514 * @return True if processing this read updates the mo_graph.
516 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
518 uint64_t value = VALUE_NONE;
519 bool updated = false;
521 const ModelAction *reads_from = curr->get_node()->get_read_from();
522 if (reads_from != NULL) {
523 mo_graph->startChanges();
525 value = reads_from->get_value();
526 bool r_status = false;
528 if (!second_part_of_rmw) {
529 check_recency(curr, reads_from);
530 r_status = r_modification_order(curr, reads_from);
534 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
535 mo_graph->rollbackChanges();
536 too_many_reads = false;
540 curr->read_from(reads_from);
541 mo_graph->commitChanges();
542 mo_check_promises(curr->get_tid(), reads_from);
545 } else if (!second_part_of_rmw) {
546 /* Read from future value */
547 value = curr->get_node()->get_future_value();
548 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
549 curr->read_from(NULL);
550 Promise *valuepromise = new Promise(curr, value, expiration);
551 promises->push_back(valuepromise);
553 get_thread(curr)->set_return_value(value);
559 * Processes a lock, trylock, or unlock model action. @param curr is
560 * the read model action to process.
562 * The try lock operation checks whether the lock is taken. If not,
563 * it falls to the normal lock operation case. If so, it returns
566 * The lock operation has already been checked that it is enabled, so
567 * it just grabs the lock and synchronizes with the previous unlock.
569 * The unlock operation has to re-enable all of the threads that are
570 * waiting on the lock.
572 * @return True if synchronization was updated; false otherwise
574 bool ModelChecker::process_mutex(ModelAction *curr) {
575 std::mutex *mutex=NULL;
576 struct std::mutex_state *state=NULL;
578 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
579 mutex = (std::mutex *)curr->get_location();
580 state = mutex->get_state();
581 } else if(curr->is_wait()) {
582 mutex = (std::mutex *)curr->get_value();
583 state = mutex->get_state();
586 switch (curr->get_type()) {
587 case ATOMIC_TRYLOCK: {
588 bool success = !state->islocked;
589 curr->set_try_lock(success);
591 get_thread(curr)->set_return_value(0);
594 get_thread(curr)->set_return_value(1);
596 //otherwise fall into the lock case
598 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
599 printf("Lock access before initialization\n");
602 state->islocked = true;
603 ModelAction *unlock = get_last_unlock(curr);
604 //synchronize with the previous unlock statement
605 if (unlock != NULL) {
606 curr->synchronize_with(unlock);
611 case ATOMIC_UNLOCK: {
613 state->islocked = false;
614 //wake up the other threads
615 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
616 //activate all the waiting threads
617 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
618 scheduler->wake(get_thread(*rit));
625 state->islocked = false;
626 //wake up the other threads
627 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
628 //activate all the waiting threads
629 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
630 scheduler->wake(get_thread(*rit));
633 //check whether we should go to sleep or not...simulate spurious failures
634 if (curr->get_node()->get_misc()==0) {
635 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
637 scheduler->sleep(get_current_thread());
641 case ATOMIC_NOTIFY_ALL: {
642 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
643 //activate all the waiting threads
644 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
645 scheduler->wake(get_thread(*rit));
650 case ATOMIC_NOTIFY_ONE: {
651 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
652 int wakeupthread=curr->get_node()->get_misc();
653 action_list_t::iterator it = waiters->begin();
654 advance(it, wakeupthread);
655 scheduler->wake(get_thread(*it));
667 * Process a write ModelAction
668 * @param curr The ModelAction to process
669 * @return True if the mo_graph was updated or promises were resolved
671 bool ModelChecker::process_write(ModelAction *curr)
673 bool updated_mod_order = w_modification_order(curr);
674 bool updated_promises = resolve_promises(curr);
676 if (promises->size() == 0) {
677 for (unsigned int i = 0; i < futurevalues->size(); i++) {
678 struct PendingFutureValue pfv = (*futurevalues)[i];
679 //Do more ambitious checks now that mo is more complete
680 if (mo_may_allow(pfv.writer, pfv.act)&&
681 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
682 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
683 priv->next_backtrack = pfv.act;
685 futurevalues->resize(0);
688 mo_graph->commitChanges();
689 mo_check_promises(curr->get_tid(), curr);
691 get_thread(curr)->set_return_value(VALUE_NONE);
692 return updated_mod_order || updated_promises;
696 * @brief Process the current action for thread-related activity
698 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
699 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
700 * synchronization, etc. This function is a no-op for non-THREAD actions
701 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
703 * @param curr The current action
704 * @return True if synchronization was updated or a thread completed
706 bool ModelChecker::process_thread_action(ModelAction *curr)
708 bool updated = false;
710 switch (curr->get_type()) {
711 case THREAD_CREATE: {
712 Thread *th = (Thread *)curr->get_location();
713 th->set_creation(curr);
717 Thread *blocking = (Thread *)curr->get_location();
718 ModelAction *act = get_last_action(blocking->get_id());
719 curr->synchronize_with(act);
720 updated = true; /* trigger rel-seq checks */
723 case THREAD_FINISH: {
724 Thread *th = get_thread(curr);
725 while (!th->wait_list_empty()) {
726 ModelAction *act = th->pop_wait_list();
727 scheduler->wake(get_thread(act));
730 updated = true; /* trigger rel-seq checks */
734 check_promises(curr->get_tid(), NULL, curr->get_cv());
745 * @brief Process the current action for release sequence fixup activity
747 * Performs model-checker release sequence fixups for the current action,
748 * forcing a single pending release sequence to break (with a given, potential
749 * "loose" write) or to complete (i.e., synchronize). If a pending release
750 * sequence forms a complete release sequence, then we must perform the fixup
751 * synchronization, mo_graph additions, etc.
753 * @param curr The current action; must be a release sequence fixup action
754 * @param work_queue The work queue to which to add work items as they are
757 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
759 const ModelAction *write = curr->get_node()->get_relseq_break();
760 struct release_seq *sequence = pending_rel_seqs->back();
761 pending_rel_seqs->pop_back();
763 ModelAction *acquire = sequence->acquire;
764 const ModelAction *rf = sequence->rf;
765 const ModelAction *release = sequence->release;
769 ASSERT(release->same_thread(rf));
773 * @todo Forcing a synchronization requires that we set
774 * modification order constraints. For instance, we can't allow
775 * a fixup sequence in which two separate read-acquire
776 * operations read from the same sequence, where the first one
777 * synchronizes and the other doesn't. Essentially, we can't
778 * allow any writes to insert themselves between 'release' and
782 /* Must synchronize */
783 if (!acquire->synchronize_with(release)) {
784 set_bad_synchronization();
787 /* Re-check all pending release sequences */
788 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
789 /* Re-check act for mo_graph edges */
790 work_queue->push_back(MOEdgeWorkEntry(acquire));
792 /* propagate synchronization to later actions */
793 action_list_t::reverse_iterator rit = action_trace->rbegin();
794 for (; (*rit) != acquire; rit++) {
795 ModelAction *propagate = *rit;
796 if (acquire->happens_before(propagate)) {
797 propagate->synchronize_with(acquire);
798 /* Re-check 'propagate' for mo_graph edges */
799 work_queue->push_back(MOEdgeWorkEntry(propagate));
803 /* Break release sequence with new edges:
804 * release --mo--> write --mo--> rf */
805 mo_graph->addEdge(release, write);
806 mo_graph->addEdge(write, rf);
809 /* See if we have realized a data race */
810 if (checkDataRaces())
815 * Initialize the current action by performing one or more of the following
816 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
817 * in the NodeStack, manipulating backtracking sets, allocating and
818 * initializing clock vectors, and computing the promises to fulfill.
820 * @param curr The current action, as passed from the user context; may be
821 * freed/invalidated after the execution of this function, with a different
822 * action "returned" its place (pass-by-reference)
823 * @return True if curr is a newly-explored action; false otherwise
825 bool ModelChecker::initialize_curr_action(ModelAction **curr)
827 ModelAction *newcurr;
829 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
830 newcurr = process_rmw(*curr);
833 if (newcurr->is_rmw())
834 compute_promises(newcurr);
840 (*curr)->set_seq_number(get_next_seq_num());
842 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled());
844 /* First restore type and order in case of RMW operation */
845 if ((*curr)->is_rmwr())
846 newcurr->copy_typeandorder(*curr);
848 ASSERT((*curr)->get_location() == newcurr->get_location());
849 newcurr->copy_from_new(*curr);
851 /* Discard duplicate ModelAction; use action from NodeStack */
854 /* Always compute new clock vector */
855 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
858 return false; /* Action was explored previously */
862 /* Always compute new clock vector */
863 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
865 * Perform one-time actions when pushing new ModelAction onto
868 if (newcurr->is_write())
869 compute_promises(newcurr);
870 else if (newcurr->is_relseq_fixup())
871 compute_relseq_breakwrites(newcurr);
872 else if (newcurr->is_wait())
873 newcurr->get_node()->set_misc_max(2);
874 else if (newcurr->is_notify_one()) {
875 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
877 return true; /* This was a new ModelAction */
882 * @brief Check whether a model action is enabled.
884 * Checks whether a lock or join operation would be successful (i.e., is the
885 * lock already locked, or is the joined thread already complete). If not, put
886 * the action in a waiter list.
888 * @param curr is the ModelAction to check whether it is enabled.
889 * @return a bool that indicates whether the action is enabled.
891 bool ModelChecker::check_action_enabled(ModelAction *curr) {
892 if (curr->is_lock()) {
893 std::mutex * lock = (std::mutex *)curr->get_location();
894 struct std::mutex_state * state = lock->get_state();
895 if (state->islocked) {
896 //Stick the action in the appropriate waiting queue
897 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
900 } else if (curr->get_type() == THREAD_JOIN) {
901 Thread *blocking = (Thread *)curr->get_location();
902 if (!blocking->is_complete()) {
903 blocking->push_wait_list(curr);
912 * Stores the ModelAction for the current thread action. Call this
913 * immediately before switching from user- to system-context to pass
915 * @param act The ModelAction created by the user-thread action
917 void ModelChecker::set_current_action(ModelAction *act) {
918 priv->current_action = act;
922 * This is the heart of the model checker routine. It performs model-checking
923 * actions corresponding to a given "current action." Among other processes, it
924 * calculates reads-from relationships, updates synchronization clock vectors,
925 * forms a memory_order constraints graph, and handles replay/backtrack
926 * execution when running permutations of previously-observed executions.
928 * @param curr The current action to process
929 * @return The next Thread that must be executed. May be NULL if ModelChecker
930 * makes no choice (e.g., according to replay execution, combining RMW actions,
933 Thread * ModelChecker::check_current_action(ModelAction *curr)
936 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
938 if (!check_action_enabled(curr)) {
939 /* Make the execution look like we chose to run this action
940 * much later, when a lock/join can succeed */
941 get_current_thread()->set_pending(curr);
942 scheduler->sleep(get_current_thread());
943 return get_next_thread(NULL);
946 bool newly_explored = initialize_curr_action(&curr);
948 wake_up_sleeping_actions(curr);
950 /* Add the action to lists before any other model-checking tasks */
951 if (!second_part_of_rmw)
952 add_action_to_lists(curr);
954 /* Build may_read_from set for newly-created actions */
955 if (newly_explored && curr->is_read())
956 build_reads_from_past(curr);
958 /* Initialize work_queue with the "current action" work */
959 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
960 while (!work_queue.empty()) {
961 WorkQueueEntry work = work_queue.front();
962 work_queue.pop_front();
965 case WORK_CHECK_CURR_ACTION: {
966 ModelAction *act = work.action;
967 bool update = false; /* update this location's release seq's */
968 bool update_all = false; /* update all release seq's */
970 if (process_thread_action(curr))
973 if (act->is_read() && process_read(act, second_part_of_rmw))
976 if (act->is_write() && process_write(act))
979 if (act->is_mutex_op() && process_mutex(act))
982 if (act->is_relseq_fixup())
983 process_relseq_fixup(curr, &work_queue);
986 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
988 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
991 case WORK_CHECK_RELEASE_SEQ:
992 resolve_release_sequences(work.location, &work_queue);
994 case WORK_CHECK_MO_EDGES: {
995 /** @todo Complete verification of work_queue */
996 ModelAction *act = work.action;
997 bool updated = false;
999 if (act->is_read()) {
1000 const ModelAction *rf = act->get_reads_from();
1001 if (rf != NULL && r_modification_order(act, rf))
1004 if (act->is_write()) {
1005 if (w_modification_order(act))
1008 mo_graph->commitChanges();
1011 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1020 check_curr_backtracking(curr);
1021 set_backtracking(curr);
1022 return get_next_thread(curr);
1025 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
1026 Node *currnode = curr->get_node();
1027 Node *parnode = currnode->get_parent();
1029 if ((!parnode->backtrack_empty() ||
1030 !currnode->misc_empty() ||
1031 !currnode->read_from_empty() ||
1032 !currnode->future_value_empty() ||
1033 !currnode->promise_empty() ||
1034 !currnode->relseq_break_empty())
1035 && (!priv->next_backtrack ||
1036 *curr > *priv->next_backtrack)) {
1037 priv->next_backtrack = curr;
1041 bool ModelChecker::promises_expired() const
1043 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1044 Promise *promise = (*promises)[promise_index];
1045 if (promise->get_expiration()<priv->used_sequence_numbers) {
1052 /** @return whether the current partial trace must be a prefix of a
1053 * feasible trace. */
1054 bool ModelChecker::isfeasibleprefix() const
1056 return promises->size() == 0 && pending_rel_seqs->size() == 0 && isfeasible();
1059 /** @return whether the current partial trace is feasible. */
1060 bool ModelChecker::isfeasible() const
1062 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1063 DEBUG("Infeasible: RMW violation\n");
1065 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
1068 /** @return whether the current partial trace is feasible other than
1069 * multiple RMW reading from the same store. */
1070 bool ModelChecker::isfeasibleotherthanRMW() const
1072 if (DBG_ENABLED()) {
1073 if (mo_graph->checkForCycles())
1074 DEBUG("Infeasible: modification order cycles\n");
1076 DEBUG("Infeasible: failed promise\n");
1078 DEBUG("Infeasible: too many reads\n");
1079 if (bad_synchronization)
1080 DEBUG("Infeasible: bad synchronization ordering\n");
1081 if (promises_expired())
1082 DEBUG("Infeasible: promises expired\n");
1084 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
1087 /** Returns whether the current completed trace is feasible. */
1088 bool ModelChecker::isfinalfeasible() const
1090 if (DBG_ENABLED() && promises->size() != 0)
1091 DEBUG("Infeasible: unrevolved promises\n");
1093 return isfeasible() && promises->size() == 0;
1096 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1097 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1098 ModelAction *lastread = get_last_action(act->get_tid());
1099 lastread->process_rmw(act);
1100 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1101 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1102 mo_graph->commitChanges();
1108 * Checks whether a thread has read from the same write for too many times
1109 * without seeing the effects of a later write.
1112 * 1) there must a different write that we could read from that would satisfy the modification order,
1113 * 2) we must have read from the same value in excess of maxreads times, and
1114 * 3) that other write must have been in the reads_from set for maxreads times.
1116 * If so, we decide that the execution is no longer feasible.
1118 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1119 if (params.maxreads != 0) {
1121 if (curr->get_node()->get_read_from_size() <= 1)
1123 //Must make sure that execution is currently feasible... We could
1124 //accidentally clear by rolling back
1127 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1128 int tid = id_to_int(curr->get_tid());
1131 if ((int)thrd_lists->size() <= tid)
1133 action_list_t *list = &(*thrd_lists)[tid];
1135 action_list_t::reverse_iterator rit = list->rbegin();
1136 /* Skip past curr */
1137 for (; (*rit) != curr; rit++)
1139 /* go past curr now */
1142 action_list_t::reverse_iterator ritcopy = rit;
1143 //See if we have enough reads from the same value
1145 for (; count < params.maxreads; rit++,count++) {
1146 if (rit==list->rend())
1148 ModelAction *act = *rit;
1149 if (!act->is_read())
1152 if (act->get_reads_from() != rf)
1154 if (act->get_node()->get_read_from_size() <= 1)
1157 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1159 const ModelAction * write = curr->get_node()->get_read_from_at(i);
1161 //Need a different write
1165 /* Test to see whether this is a feasible write to read from*/
1166 mo_graph->startChanges();
1167 r_modification_order(curr, write);
1168 bool feasiblereadfrom = isfeasible();
1169 mo_graph->rollbackChanges();
1171 if (!feasiblereadfrom)
1175 bool feasiblewrite = true;
1176 //new we need to see if this write works for everyone
1178 for (int loop = count; loop>0; loop--,rit++) {
1179 ModelAction *act=*rit;
1180 bool foundvalue = false;
1181 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1182 if (act->get_node()->get_read_from_at(j)==write) {
1188 feasiblewrite = false;
1192 if (feasiblewrite) {
1193 too_many_reads = true;
1201 * Updates the mo_graph with the constraints imposed from the current
1204 * Basic idea is the following: Go through each other thread and find
1205 * the lastest action that happened before our read. Two cases:
1207 * (1) The action is a write => that write must either occur before
1208 * the write we read from or be the write we read from.
1210 * (2) The action is a read => the write that that action read from
1211 * must occur before the write we read from or be the same write.
1213 * @param curr The current action. Must be a read.
1214 * @param rf The action that curr reads from. Must be a write.
1215 * @return True if modification order edges were added; false otherwise
1217 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1219 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1222 ASSERT(curr->is_read());
1224 /* Iterate over all threads */
1225 for (i = 0; i < thrd_lists->size(); i++) {
1226 /* Iterate over actions in thread, starting from most recent */
1227 action_list_t *list = &(*thrd_lists)[i];
1228 action_list_t::reverse_iterator rit;
1229 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1230 ModelAction *act = *rit;
1233 * Include at most one act per-thread that "happens
1234 * before" curr. Don't consider reflexively.
1236 if (act->happens_before(curr) && act != curr) {
1237 if (act->is_write()) {
1239 mo_graph->addEdge(act, rf);
1243 const ModelAction *prevreadfrom = act->get_reads_from();
1244 //if the previous read is unresolved, keep going...
1245 if (prevreadfrom == NULL)
1248 if (rf != prevreadfrom) {
1249 mo_graph->addEdge(prevreadfrom, rf);
1261 /** This method fixes up the modification order when we resolve a
1262 * promises. The basic problem is that actions that occur after the
1263 * read curr could not property add items to the modification order
1266 * So for each thread, we find the earliest item that happens after
1267 * the read curr. This is the item we have to fix up with additional
1268 * constraints. If that action is write, we add a MO edge between
1269 * the Action rf and that action. If the action is a read, we add a
1270 * MO edge between the Action rf, and whatever the read accessed.
1272 * @param curr is the read ModelAction that we are fixing up MO edges for.
1273 * @param rf is the write ModelAction that curr reads from.
1276 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1278 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1280 ASSERT(curr->is_read());
1282 /* Iterate over all threads */
1283 for (i = 0; i < thrd_lists->size(); i++) {
1284 /* Iterate over actions in thread, starting from most recent */
1285 action_list_t *list = &(*thrd_lists)[i];
1286 action_list_t::reverse_iterator rit;
1287 ModelAction *lastact = NULL;
1289 /* Find last action that happens after curr that is either not curr or a rmw */
1290 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1291 ModelAction *act = *rit;
1292 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1298 /* Include at most one act per-thread that "happens before" curr */
1299 if (lastact != NULL) {
1300 if (lastact==curr) {
1301 //Case 1: The resolved read is a RMW, and we need to make sure
1302 //that the write portion of the RMW mod order after rf
1304 mo_graph->addEdge(rf, lastact);
1305 } else if (lastact->is_read()) {
1306 //Case 2: The resolved read is a normal read and the next
1307 //operation is a read, and we need to make sure the value read
1308 //is mod ordered after rf
1310 const ModelAction *postreadfrom = lastact->get_reads_from();
1311 if (postreadfrom != NULL&&rf != postreadfrom)
1312 mo_graph->addEdge(rf, postreadfrom);
1314 //Case 3: The resolved read is a normal read and the next
1315 //operation is a write, and we need to make sure that the
1316 //write is mod ordered after rf
1318 mo_graph->addEdge(rf, lastact);
1326 * Updates the mo_graph with the constraints imposed from the current write.
1328 * Basic idea is the following: Go through each other thread and find
1329 * the lastest action that happened before our write. Two cases:
1331 * (1) The action is a write => that write must occur before
1334 * (2) The action is a read => the write that that action read from
1335 * must occur before the current write.
1337 * This method also handles two other issues:
1339 * (I) Sequential Consistency: Making sure that if the current write is
1340 * seq_cst, that it occurs after the previous seq_cst write.
1342 * (II) Sending the write back to non-synchronizing reads.
1344 * @param curr The current action. Must be a write.
1345 * @return True if modification order edges were added; false otherwise
1347 bool ModelChecker::w_modification_order(ModelAction *curr)
1349 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1352 ASSERT(curr->is_write());
1354 if (curr->is_seqcst()) {
1355 /* We have to at least see the last sequentially consistent write,
1356 so we are initialized. */
1357 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1358 if (last_seq_cst != NULL) {
1359 mo_graph->addEdge(last_seq_cst, curr);
1364 /* Iterate over all threads */
1365 for (i = 0; i < thrd_lists->size(); i++) {
1366 /* Iterate over actions in thread, starting from most recent */
1367 action_list_t *list = &(*thrd_lists)[i];
1368 action_list_t::reverse_iterator rit;
1369 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1370 ModelAction *act = *rit;
1373 * 1) If RMW and it actually read from something, then we
1374 * already have all relevant edges, so just skip to next
1377 * 2) If RMW and it didn't read from anything, we should
1378 * whatever edge we can get to speed up convergence.
1380 * 3) If normal write, we need to look at earlier actions, so
1381 * continue processing list.
1383 if (curr->is_rmw()) {
1384 if (curr->get_reads_from()!=NULL)
1393 * Include at most one act per-thread that "happens
1396 if (act->happens_before(curr)) {
1398 * Note: if act is RMW, just add edge:
1400 * The following edge should be handled elsewhere:
1401 * readfrom(act) --mo--> act
1403 if (act->is_write())
1404 mo_graph->addEdge(act, curr);
1405 else if (act->is_read()) {
1406 //if previous read accessed a null, just keep going
1407 if (act->get_reads_from() == NULL)
1409 mo_graph->addEdge(act->get_reads_from(), curr);
1413 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1414 !act->same_thread(curr)) {
1415 /* We have an action that:
1416 (1) did not happen before us
1417 (2) is a read and we are a write
1418 (3) cannot synchronize with us
1419 (4) is in a different thread
1421 that read could potentially read from our write. Note that
1422 these checks are overly conservative at this point, we'll
1423 do more checks before actually removing the
1427 if (thin_air_constraint_may_allow(curr, act)) {
1429 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1430 struct PendingFutureValue pfv = {curr,act};
1431 futurevalues->push_back(pfv);
1441 /** Arbitrary reads from the future are not allowed. Section 29.3
1442 * part 9 places some constraints. This method checks one result of constraint
1443 * constraint. Others require compiler support. */
1444 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1445 if (!writer->is_rmw())
1448 if (!reader->is_rmw())
1451 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1452 if (search == reader)
1454 if (search->get_tid() == reader->get_tid() &&
1455 search->happens_before(reader))
1463 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1464 * some constraints. This method checks one the following constraint (others
1465 * require compiler support):
1467 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1469 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1471 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1473 /* Iterate over all threads */
1474 for (i = 0; i < thrd_lists->size(); i++) {
1475 const ModelAction *write_after_read = NULL;
1477 /* Iterate over actions in thread, starting from most recent */
1478 action_list_t *list = &(*thrd_lists)[i];
1479 action_list_t::reverse_iterator rit;
1480 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1481 ModelAction *act = *rit;
1483 if (!reader->happens_before(act))
1485 else if (act->is_write())
1486 write_after_read = act;
1487 else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1488 write_after_read = act->get_reads_from();
1492 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1499 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1500 * The ModelAction under consideration is expected to be taking part in
1501 * release/acquire synchronization as an object of the "reads from" relation.
1502 * Note that this can only provide release sequence support for RMW chains
1503 * which do not read from the future, as those actions cannot be traced until
1504 * their "promise" is fulfilled. Similarly, we may not even establish the
1505 * presence of a release sequence with certainty, as some modification order
1506 * constraints may be decided further in the future. Thus, this function
1507 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1508 * and a boolean representing certainty.
1510 * @param rf The action that might be part of a release sequence. Must be a
1512 * @param release_heads A pass-by-reference style return parameter. After
1513 * execution of this function, release_heads will contain the heads of all the
1514 * relevant release sequences, if any exists with certainty
1515 * @param pending A pass-by-reference style return parameter which is only used
1516 * when returning false (i.e., uncertain). Returns most information regarding
1517 * an uncertain release sequence, including any write operations that might
1518 * break the sequence.
1519 * @return true, if the ModelChecker is certain that release_heads is complete;
1522 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1523 rel_heads_list_t *release_heads,
1524 struct release_seq *pending) const
1526 /* Only check for release sequences if there are no cycles */
1527 if (mo_graph->checkForCycles())
1531 ASSERT(rf->is_write());
1533 if (rf->is_release())
1534 release_heads->push_back(rf);
1536 break; /* End of RMW chain */
1538 /** @todo Need to be smarter here... In the linux lock
1539 * example, this will run to the beginning of the program for
1541 /** @todo The way to be smarter here is to keep going until 1
1542 * thread has a release preceded by an acquire and you've seen
1545 /* acq_rel RMW is a sufficient stopping condition */
1546 if (rf->is_acquire() && rf->is_release())
1547 return true; /* complete */
1549 rf = rf->get_reads_from();
1552 /* read from future: need to settle this later */
1554 return false; /* incomplete */
1557 if (rf->is_release())
1558 return true; /* complete */
1560 /* else relaxed write; check modification order for contiguous subsequence
1561 * -> rf must be same thread as release */
1562 int tid = id_to_int(rf->get_tid());
1563 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1564 action_list_t *list = &(*thrd_lists)[tid];
1565 action_list_t::const_reverse_iterator rit;
1567 /* Find rf in the thread list */
1568 rit = std::find(list->rbegin(), list->rend(), rf);
1569 ASSERT(rit != list->rend());
1571 /* Find the last write/release */
1572 for (; rit != list->rend(); rit++)
1573 if ((*rit)->is_release())
1575 if (rit == list->rend()) {
1576 /* No write-release in this thread */
1577 return true; /* complete */
1579 ModelAction *release = *rit;
1581 ASSERT(rf->same_thread(release));
1583 pending->writes.clear();
1585 bool certain = true;
1586 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1587 if (id_to_int(rf->get_tid()) == (int)i)
1589 list = &(*thrd_lists)[i];
1591 /* Can we ensure no future writes from this thread may break
1592 * the release seq? */
1593 bool future_ordered = false;
1595 ModelAction *last = get_last_action(int_to_id(i));
1596 Thread *th = get_thread(int_to_id(i));
1597 if ((last && rf->happens_before(last)) ||
1600 future_ordered = true;
1602 ASSERT(!th->is_model_thread() || future_ordered);
1604 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1605 const ModelAction *act = *rit;
1606 /* Reach synchronization -> this thread is complete */
1607 if (act->happens_before(release))
1609 if (rf->happens_before(act)) {
1610 future_ordered = true;
1614 /* Only non-RMW writes can break release sequences */
1615 if (!act->is_write() || act->is_rmw())
1618 /* Check modification order */
1619 if (mo_graph->checkReachable(rf, act)) {
1620 /* rf --mo--> act */
1621 future_ordered = true;
1624 if (mo_graph->checkReachable(act, release))
1625 /* act --mo--> release */
1627 if (mo_graph->checkReachable(release, act) &&
1628 mo_graph->checkReachable(act, rf)) {
1629 /* release --mo-> act --mo--> rf */
1630 return true; /* complete */
1632 /* act may break release sequence */
1633 pending->writes.push_back(act);
1636 if (!future_ordered)
1637 certain = false; /* This thread is uncertain */
1641 release_heads->push_back(release);
1642 pending->writes.clear();
1644 pending->release = release;
1651 * A public interface for getting the release sequence head(s) with which a
1652 * given ModelAction must synchronize. This function only returns a non-empty
1653 * result when it can locate a release sequence head with certainty. Otherwise,
1654 * it may mark the internal state of the ModelChecker so that it will handle
1655 * the release sequence at a later time, causing @a act to update its
1656 * synchronization at some later point in execution.
1657 * @param act The 'acquire' action that may read from a release sequence
1658 * @param release_heads A pass-by-reference return parameter. Will be filled
1659 * with the head(s) of the release sequence(s), if they exists with certainty.
1660 * @see ModelChecker::release_seq_heads
1662 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1664 const ModelAction *rf = act->get_reads_from();
1665 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1666 sequence->acquire = act;
1668 if (!release_seq_heads(rf, release_heads, sequence)) {
1669 /* add act to 'lazy checking' list */
1670 pending_rel_seqs->push_back(sequence);
1672 snapshot_free(sequence);
1677 * Attempt to resolve all stashed operations that might synchronize with a
1678 * release sequence for a given location. This implements the "lazy" portion of
1679 * determining whether or not a release sequence was contiguous, since not all
1680 * modification order information is present at the time an action occurs.
1682 * @param location The location/object that should be checked for release
1683 * sequence resolutions. A NULL value means to check all locations.
1684 * @param work_queue The work queue to which to add work items as they are
1686 * @return True if any updates occurred (new synchronization, new mo_graph
1689 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1691 bool updated = false;
1692 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1693 while (it != pending_rel_seqs->end()) {
1694 struct release_seq *pending = *it;
1695 ModelAction *act = pending->acquire;
1697 /* Only resolve sequences on the given location, if provided */
1698 if (location && act->get_location() != location) {
1703 const ModelAction *rf = act->get_reads_from();
1704 rel_heads_list_t release_heads;
1706 complete = release_seq_heads(rf, &release_heads, pending);
1707 for (unsigned int i = 0; i < release_heads.size(); i++) {
1708 if (!act->has_synchronized_with(release_heads[i])) {
1709 if (act->synchronize_with(release_heads[i]))
1712 set_bad_synchronization();
1717 /* Re-check all pending release sequences */
1718 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1719 /* Re-check act for mo_graph edges */
1720 work_queue->push_back(MOEdgeWorkEntry(act));
1722 /* propagate synchronization to later actions */
1723 action_list_t::reverse_iterator rit = action_trace->rbegin();
1724 for (; (*rit) != act; rit++) {
1725 ModelAction *propagate = *rit;
1726 if (act->happens_before(propagate)) {
1727 propagate->synchronize_with(act);
1728 /* Re-check 'propagate' for mo_graph edges */
1729 work_queue->push_back(MOEdgeWorkEntry(propagate));
1734 it = pending_rel_seqs->erase(it);
1735 snapshot_free(pending);
1741 // If we resolved promises or data races, see if we have realized a data race.
1742 if (checkDataRaces()) {
1750 * Performs various bookkeeping operations for the current ModelAction. For
1751 * instance, adds action to the per-object, per-thread action vector and to the
1752 * action trace list of all thread actions.
1754 * @param act is the ModelAction to add.
1756 void ModelChecker::add_action_to_lists(ModelAction *act)
1758 int tid = id_to_int(act->get_tid());
1759 action_trace->push_back(act);
1761 get_safe_ptr_action(obj_map, act->get_location())->push_back(act);
1763 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
1764 if (tid >= (int)vec->size())
1765 vec->resize(priv->next_thread_id);
1766 (*vec)[tid].push_back(act);
1768 if ((int)thrd_last_action->size() <= tid)
1769 thrd_last_action->resize(get_num_threads());
1770 (*thrd_last_action)[tid] = act;
1772 if (act->is_wait()) {
1773 void *mutex_loc=(void *) act->get_value();
1774 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
1776 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
1777 if (tid >= (int)vec->size())
1778 vec->resize(priv->next_thread_id);
1779 (*vec)[tid].push_back(act);
1781 if ((int)thrd_last_action->size() <= tid)
1782 thrd_last_action->resize(get_num_threads());
1783 (*thrd_last_action)[tid] = act;
1788 * @brief Get the last action performed by a particular Thread
1789 * @param tid The thread ID of the Thread in question
1790 * @return The last action in the thread
1792 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1794 int threadid = id_to_int(tid);
1795 if (threadid < (int)thrd_last_action->size())
1796 return (*thrd_last_action)[id_to_int(tid)];
1802 * Gets the last memory_order_seq_cst write (in the total global sequence)
1803 * performed on a particular object (i.e., memory location), not including the
1805 * @param curr The current ModelAction; also denotes the object location to
1807 * @return The last seq_cst write
1809 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1811 void *location = curr->get_location();
1812 action_list_t *list = get_safe_ptr_action(obj_map, location);
1813 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1814 action_list_t::reverse_iterator rit;
1815 for (rit = list->rbegin(); rit != list->rend(); rit++)
1816 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1822 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1823 * location). This function identifies the mutex according to the current
1824 * action, which is presumed to perform on the same mutex.
1825 * @param curr The current ModelAction; also denotes the object location to
1827 * @return The last unlock operation
1829 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1831 void *location = curr->get_location();
1832 action_list_t *list = get_safe_ptr_action(obj_map, location);
1833 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1834 action_list_t::reverse_iterator rit;
1835 for (rit = list->rbegin(); rit != list->rend(); rit++)
1836 if ((*rit)->is_unlock() || (*rit)->is_wait())
1841 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1843 ModelAction *parent = get_last_action(tid);
1845 parent = get_thread(tid)->get_creation();
1850 * Returns the clock vector for a given thread.
1851 * @param tid The thread whose clock vector we want
1852 * @return Desired clock vector
1854 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1856 return get_parent_action(tid)->get_cv();
1860 * Resolve a set of Promises with a current write. The set is provided in the
1861 * Node corresponding to @a write.
1862 * @param write The ModelAction that is fulfilling Promises
1863 * @return True if promises were resolved; false otherwise
1865 bool ModelChecker::resolve_promises(ModelAction *write)
1867 bool resolved = false;
1868 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
1870 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1871 Promise *promise = (*promises)[promise_index];
1872 if (write->get_node()->get_promise(i)) {
1873 ModelAction *read = promise->get_action();
1874 if (read->is_rmw()) {
1875 mo_graph->addRMWEdge(write, read);
1877 read->read_from(write);
1878 //First fix up the modification order for actions that happened
1880 r_modification_order(read, write);
1881 //Next fix up the modification order for actions that happened
1883 post_r_modification_order(read, write);
1884 //Make sure the promise's value matches the write's value
1885 ASSERT(promise->get_value() == write->get_value());
1888 promises->erase(promises->begin() + promise_index);
1889 threads_to_check.push_back(read->get_tid());
1896 //Check whether reading these writes has made threads unable to
1899 for(unsigned int i=0;i<threads_to_check.size();i++)
1900 mo_check_promises(threads_to_check[i], write);
1906 * Compute the set of promises that could potentially be satisfied by this
1907 * action. Note that the set computation actually appears in the Node, not in
1909 * @param curr The ModelAction that may satisfy promises
1911 void ModelChecker::compute_promises(ModelAction *curr)
1913 for (unsigned int i = 0; i < promises->size(); i++) {
1914 Promise *promise = (*promises)[i];
1915 const ModelAction *act = promise->get_action();
1916 if (!act->happens_before(curr) &&
1918 !act->could_synchronize_with(curr) &&
1919 !act->same_thread(curr) &&
1920 act->get_location() == curr->get_location() &&
1921 promise->get_value() == curr->get_value()) {
1922 curr->get_node()->set_promise(i, act->is_rmw());
1927 /** Checks promises in response to change in ClockVector Threads. */
1928 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
1930 for (unsigned int i = 0; i < promises->size(); i++) {
1931 Promise *promise = (*promises)[i];
1932 const ModelAction *act = promise->get_action();
1933 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1934 merge_cv->synchronized_since(act)) {
1935 if (promise->increment_threads(tid)) {
1936 //Promise has failed
1937 failed_promise = true;
1944 void ModelChecker::check_promises_thread_disabled() {
1945 for (unsigned int i = 0; i < promises->size(); i++) {
1946 Promise *promise = (*promises)[i];
1947 if (promise->check_promise()) {
1948 failed_promise = true;
1954 /** Checks promises in response to addition to modification order for threads.
1956 * pthread is the thread that performed the read that created the promise
1958 * pread is the read that created the promise
1960 * pwrite is either the first write to same location as pread by
1961 * pthread that is sequenced after pread or the value read by the
1962 * first read to the same lcoation as pread by pthread that is
1963 * sequenced after pread..
1965 * 1. If tid=pthread, then we check what other threads are reachable
1966 * through the mode order starting with pwrite. Those threads cannot
1967 * perform a write that will resolve the promise due to modification
1968 * order constraints.
1970 * 2. If the tid is not pthread, we check whether pwrite can reach the
1971 * action write through the modification order. If so, that thread
1972 * cannot perform a future write that will resolve the promise due to
1973 * modificatin order constraints.
1975 * @parem tid The thread that either read from the model action
1976 * write, or actually did the model action write.
1978 * @parem write The ModelAction representing the relevant write.
1981 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
1982 void * location = write->get_location();
1983 for (unsigned int i = 0; i < promises->size(); i++) {
1984 Promise *promise = (*promises)[i];
1985 const ModelAction *act = promise->get_action();
1987 //Is this promise on the same location?
1988 if ( act->get_location() != location )
1991 //same thread as the promise
1992 if ( act->get_tid()==tid ) {
1994 //do we have a pwrite for the promise, if not, set it
1995 if (promise->get_write() == NULL ) {
1996 promise->set_write(write);
1997 //The pwrite cannot happen before the promise
1998 if (write->happens_before(act) && (write != act)) {
1999 failed_promise = true;
2003 if (mo_graph->checkPromise(write, promise)) {
2004 failed_promise = true;
2009 //Don't do any lookups twice for the same thread
2010 if (promise->has_sync_thread(tid))
2013 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2014 if (promise->increment_threads(tid)) {
2015 failed_promise = true;
2023 * Compute the set of writes that may break the current pending release
2024 * sequence. This information is extracted from previou release sequence
2027 * @param curr The current ModelAction. Must be a release sequence fixup
2030 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2032 if (pending_rel_seqs->empty())
2035 struct release_seq *pending = pending_rel_seqs->back();
2036 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2037 const ModelAction *write = pending->writes[i];
2038 curr->get_node()->add_relseq_break(write);
2041 /* NULL means don't break the sequence; just synchronize */
2042 curr->get_node()->add_relseq_break(NULL);
2046 * Build up an initial set of all past writes that this 'read' action may read
2047 * from. This set is determined by the clock vector's "happens before"
2049 * @param curr is the current ModelAction that we are exploring; it must be a
2052 void ModelChecker::build_reads_from_past(ModelAction *curr)
2054 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2056 ASSERT(curr->is_read());
2058 ModelAction *last_seq_cst = NULL;
2060 /* Track whether this object has been initialized */
2061 bool initialized = false;
2063 if (curr->is_seqcst()) {
2064 last_seq_cst = get_last_seq_cst(curr);
2065 /* We have to at least see the last sequentially consistent write,
2066 so we are initialized. */
2067 if (last_seq_cst != NULL)
2071 /* Iterate over all threads */
2072 for (i = 0; i < thrd_lists->size(); i++) {
2073 /* Iterate over actions in thread, starting from most recent */
2074 action_list_t *list = &(*thrd_lists)[i];
2075 action_list_t::reverse_iterator rit;
2076 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2077 ModelAction *act = *rit;
2079 /* Only consider 'write' actions */
2080 if (!act->is_write() || act == curr)
2083 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2084 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
2085 if (!curr->get_sleep_flag() || curr->is_seqcst() || sleep_can_read_from(curr, act)) {
2086 DEBUG("Adding action to may_read_from:\n");
2087 if (DBG_ENABLED()) {
2091 curr->get_node()->add_read_from(act);
2095 /* Include at most one act per-thread that "happens before" curr */
2096 if (act->happens_before(curr)) {
2104 /** @todo Need a more informative way of reporting errors. */
2105 printf("ERROR: may read from uninitialized atomic\n");
2109 if (DBG_ENABLED() || !initialized) {
2110 printf("Reached read action:\n");
2112 printf("Printing may_read_from\n");
2113 curr->get_node()->print_may_read_from();
2114 printf("End printing may_read_from\n");
2118 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2120 Node *prevnode=write->get_node()->get_parent();
2122 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2123 if (write->is_release()&&thread_sleep)
2125 if (!write->is_rmw()) {
2128 if (write->get_reads_from()==NULL)
2130 write=write->get_reads_from();
2134 static void print_list(action_list_t *list)
2136 action_list_t::iterator it;
2138 printf("---------------------------------------------------------------------\n");
2140 unsigned int hash=0;
2142 for (it = list->begin(); it != list->end(); it++) {
2144 hash=hash^(hash<<3)^((*it)->hash());
2146 printf("HASH %u\n", hash);
2147 printf("---------------------------------------------------------------------\n");
2150 #if SUPPORT_MOD_ORDER_DUMP
2151 void ModelChecker::dumpGraph(char *filename) {
2153 sprintf(buffer, "%s.dot",filename);
2154 FILE *file=fopen(buffer, "w");
2155 fprintf(file, "digraph %s {\n",filename);
2156 mo_graph->dumpNodes(file);
2157 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2159 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2160 ModelAction *action=*it;
2161 if (action->is_read()) {
2162 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2163 if (action->get_reads_from()!=NULL)
2164 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2166 if (thread_array[action->get_tid()] != NULL) {
2167 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2170 thread_array[action->get_tid()]=action;
2172 fprintf(file,"}\n");
2173 model_free(thread_array);
2178 void ModelChecker::print_summary()
2181 printf("Number of executions: %d\n", num_executions);
2182 printf("Number of feasible executions: %d\n", num_feasible_executions);
2183 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
2185 #if SUPPORT_MOD_ORDER_DUMP
2187 char buffername[100];
2188 sprintf(buffername, "exec%04u", num_executions);
2189 mo_graph->dumpGraphToFile(buffername);
2190 sprintf(buffername, "graph%04u", num_executions);
2191 dumpGraph(buffername);
2194 if (!isfinalfeasible())
2195 printf("INFEASIBLE EXECUTION!\n");
2196 print_list(action_trace);
2201 * Add a Thread to the system for the first time. Should only be called once
2203 * @param t The Thread to add
2205 void ModelChecker::add_thread(Thread *t)
2207 thread_map->put(id_to_int(t->get_id()), t);
2208 scheduler->add_thread(t);
2212 * Removes a thread from the scheduler.
2213 * @param the thread to remove.
2215 void ModelChecker::remove_thread(Thread *t)
2217 scheduler->remove_thread(t);
2221 * @brief Get a Thread reference by its ID
2222 * @param tid The Thread's ID
2223 * @return A Thread reference
2225 Thread * ModelChecker::get_thread(thread_id_t tid) const
2227 return thread_map->get(id_to_int(tid));
2231 * @brief Get a reference to the Thread in which a ModelAction was executed
2232 * @param act The ModelAction
2233 * @return A Thread reference
2235 Thread * ModelChecker::get_thread(ModelAction *act) const
2237 return get_thread(act->get_tid());
2241 * @brief Check if a Thread is currently enabled
2242 * @param t The Thread to check
2243 * @return True if the Thread is currently enabled
2245 bool ModelChecker::is_enabled(Thread *t) const
2247 return scheduler->is_enabled(t);
2251 * @brief Check if a Thread is currently enabled
2252 * @param tid The ID of the Thread to check
2253 * @return True if the Thread is currently enabled
2255 bool ModelChecker::is_enabled(thread_id_t tid) const
2257 return scheduler->is_enabled(tid);
2261 * Switch from a user-context to the "master thread" context (a.k.a. system
2262 * context). This switch is made with the intention of exploring a particular
2263 * model-checking action (described by a ModelAction object). Must be called
2264 * from a user-thread context.
2266 * @param act The current action that will be explored. May be NULL only if
2267 * trace is exiting via an assertion (see ModelChecker::set_assert and
2268 * ModelChecker::has_asserted).
2269 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2271 int ModelChecker::switch_to_master(ModelAction *act)
2274 Thread *old = thread_current();
2275 set_current_action(act);
2276 old->set_state(THREAD_READY);
2277 return Thread::swap(old, &system_context);
2281 * Takes the next step in the execution, if possible.
2282 * @return Returns true (success) if a step was taken and false otherwise.
2284 bool ModelChecker::take_step() {
2288 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2290 if (curr->get_state() == THREAD_READY) {
2291 ASSERT(priv->current_action);
2293 priv->nextThread = check_current_action(priv->current_action);
2294 priv->current_action = NULL;
2296 if (curr->is_blocked() || curr->is_complete())
2297 scheduler->remove_thread(curr);
2302 Thread *next = scheduler->next_thread(priv->nextThread);
2304 /* Infeasible -> don't take any more steps */
2308 if (params.bound != 0) {
2309 if (priv->used_sequence_numbers > params.bound) {
2314 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2315 next ? id_to_int(next->get_id()) : -1);
2318 * Launch end-of-execution release sequence fixups only when there are:
2320 * (1) no more user threads to run (or when execution replay chooses
2321 * the 'model_thread')
2322 * (2) pending release sequences
2323 * (3) pending assertions (i.e., data races)
2324 * (4) no pending promises
2326 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2327 isfinalfeasible() && !unrealizedraces.empty()) {
2328 printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2329 pending_rel_seqs->size());
2330 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2331 std::memory_order_seq_cst, NULL, VALUE_NONE,
2333 set_current_action(fixup);
2337 /* next == NULL -> don't take any more steps */
2341 next->set_state(THREAD_RUNNING);
2343 if (next->get_pending() != NULL) {
2344 /* restart a pending action */
2345 set_current_action(next->get_pending());
2346 next->set_pending(NULL);
2347 next->set_state(THREAD_READY);
2351 /* Return false only if swap fails with an error */
2352 return (Thread::swap(&system_context, next) == 0);
2355 /** Runs the current execution until threre are no more steps to take. */
2356 void ModelChecker::finish_execution() {
2359 while (take_step());