9 #include "snapshot-interface.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
15 #include "threads-model.h"
17 #define INITIAL_THREAD_ID 0
22 * Structure for holding small ModelChecker members that should be snapshotted
24 struct model_snapshot_members {
25 ModelAction *current_action;
26 unsigned int next_thread_id;
27 modelclock_t used_sequence_numbers;
29 ModelAction *next_backtrack;
32 /** @brief Constructor */
33 ModelChecker::ModelChecker(struct model_params params) :
34 /* Initialize default scheduler */
36 scheduler(new Scheduler()),
38 num_feasible_executions(0),
40 earliest_diverge(NULL),
41 action_trace(new action_list_t()),
42 thread_map(new HashTable<int, Thread *, int>()),
43 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
44 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
45 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
46 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
47 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
48 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
49 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
50 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
51 node_stack(new NodeStack()),
52 mo_graph(new CycleGraph()),
53 failed_promise(false),
54 too_many_reads(false),
56 bad_synchronization(false)
58 /* Allocate this "size" on the snapshotting heap */
59 priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
60 /* First thread created will have id INITIAL_THREAD_ID */
61 priv->next_thread_id = INITIAL_THREAD_ID;
63 /* Initialize a model-checker thread, for special ModelActions */
64 model_thread = new Thread(get_next_id());
65 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
68 /** @brief Destructor */
69 ModelChecker::~ModelChecker()
71 for (unsigned int i = 0; i < get_num_threads(); i++)
72 delete thread_map->get(i);
77 delete lock_waiters_map;
78 delete condvar_waiters_map;
81 for (unsigned int i = 0; i < promises->size(); i++)
82 delete (*promises)[i];
85 delete pending_rel_seqs;
87 delete thrd_last_action;
93 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
94 action_list_t * tmp=hash->get(ptr);
96 tmp=new action_list_t();
102 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
103 std::vector<action_list_t> * tmp=hash->get(ptr);
105 tmp=new std::vector<action_list_t>();
112 * Restores user program to initial state and resets all model-checker data
115 void ModelChecker::reset_to_initial_state()
117 DEBUG("+++ Resetting to initial state +++\n");
118 node_stack->reset_execution();
119 failed_promise = false;
120 too_many_reads = false;
121 bad_synchronization = false;
123 snapshotObject->backTrackBeforeStep(0);
126 /** @return a thread ID for a new Thread */
127 thread_id_t ModelChecker::get_next_id()
129 return priv->next_thread_id++;
132 /** @return the number of user threads created during this execution */
133 unsigned int ModelChecker::get_num_threads() const
135 return priv->next_thread_id;
138 /** @return The currently executing Thread. */
139 Thread * ModelChecker::get_current_thread()
141 return scheduler->get_current_thread();
144 /** @return a sequence number for a new ModelAction */
145 modelclock_t ModelChecker::get_next_seq_num()
147 return ++priv->used_sequence_numbers;
150 Node * ModelChecker::get_curr_node() {
151 return node_stack->get_head();
155 * @brief Choose the next thread to execute.
157 * This function chooses the next thread that should execute. It can force the
158 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
159 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
160 * The model-checker may have no preference regarding the next thread (i.e.,
161 * when exploring a new execution ordering), in which case this will return
163 * @param curr The current ModelAction. This action might guide the choice of
165 * @return The next thread to run. If the model-checker has no preference, NULL.
167 Thread * ModelChecker::get_next_thread(ModelAction *curr)
172 /* Do not split atomic actions. */
174 return thread_current();
175 /* The THREAD_CREATE action points to the created Thread */
176 else if (curr->get_type() == THREAD_CREATE)
177 return (Thread *)curr->get_location();
180 /* Have we completed exploring the preselected path? */
184 /* Else, we are trying to replay an execution */
185 ModelAction *next = node_stack->get_next()->get_action();
187 if (next == diverge) {
188 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
189 earliest_diverge=diverge;
191 Node *nextnode = next->get_node();
192 Node *prevnode = nextnode->get_parent();
193 scheduler->update_sleep_set(prevnode);
195 /* Reached divergence point */
196 if (nextnode->increment_misc()) {
197 /* The next node will try to satisfy a different misc_index values. */
198 tid = next->get_tid();
199 node_stack->pop_restofstack(2);
200 } else if (nextnode->increment_promise()) {
201 /* The next node will try to satisfy a different set of promises. */
202 tid = next->get_tid();
203 node_stack->pop_restofstack(2);
204 } else if (nextnode->increment_read_from()) {
205 /* The next node will read from a different value. */
206 tid = next->get_tid();
207 node_stack->pop_restofstack(2);
208 } else if (nextnode->increment_future_value()) {
209 /* The next node will try to read from a different future value. */
210 tid = next->get_tid();
211 node_stack->pop_restofstack(2);
212 } else if (nextnode->increment_relseq_break()) {
213 /* The next node will try to resolve a release sequence differently */
214 tid = next->get_tid();
215 node_stack->pop_restofstack(2);
217 /* Make a different thread execute for next step */
218 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
219 tid = prevnode->get_next_backtrack();
220 /* Make sure the backtracked thread isn't sleeping. */
221 node_stack->pop_restofstack(1);
222 if (diverge==earliest_diverge) {
223 earliest_diverge=prevnode->get_action();
226 /* The correct sleep set is in the parent node. */
229 DEBUG("*** Divergence point ***\n");
233 tid = next->get_tid();
235 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
236 ASSERT(tid != THREAD_ID_T_NONE);
237 return thread_map->get(id_to_int(tid));
241 * We need to know what the next actions of all threads in the sleep
242 * set will be. This method computes them and stores the actions at
243 * the corresponding thread object's pending action.
246 void ModelChecker::execute_sleep_set() {
247 for(unsigned int i=0;i<get_num_threads();i++) {
248 thread_id_t tid=int_to_id(i);
249 Thread *thr=get_thread(tid);
250 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
251 thr->get_pending() == NULL ) {
252 thr->set_state(THREAD_RUNNING);
253 scheduler->next_thread(thr);
254 Thread::swap(&system_context, thr);
255 priv->current_action->set_sleep_flag();
256 thr->set_pending(priv->current_action);
259 priv->current_action = NULL;
262 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
263 for(unsigned int i=0;i<get_num_threads();i++) {
264 thread_id_t tid=int_to_id(i);
265 Thread *thr=get_thread(tid);
266 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
267 ModelAction *pending_act=thr->get_pending();
268 if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
269 //Remove this thread from sleep set
270 scheduler->remove_sleep(thr);
277 * Check if we are in a deadlock. Should only be called at the end of an
278 * execution, although it should not give false positives in the middle of an
279 * execution (there should be some ENABLED thread).
281 * @return True if program is in a deadlock; false otherwise
283 bool ModelChecker::is_deadlocked() const
285 bool blocking_threads = false;
286 for (unsigned int i = 0; i < get_num_threads(); i++) {
287 thread_id_t tid = int_to_id(i);
290 Thread *t = get_thread(tid);
291 if (!t->is_model_thread() && t->get_pending())
292 blocking_threads = true;
294 return blocking_threads;
298 * Check if this is a complete execution. That is, have all thread completed
299 * execution (rather than exiting because sleep sets have forced a redundant
302 * @return True if the execution is complete.
304 bool ModelChecker::is_complete_execution() const
306 for (unsigned int i = 0; i < get_num_threads(); i++)
307 if (is_enabled(int_to_id(i)))
313 * Queries the model-checker for more executions to explore and, if one
314 * exists, resets the model-checker state to execute a new execution.
316 * @return If there are more executions to explore, return true. Otherwise,
319 bool ModelChecker::next_execution()
326 printf("ERROR: DEADLOCK\n");
327 if (isfinalfeasible()) {
328 printf("Earliest divergence point since last feasible execution:\n");
329 if (earliest_diverge)
330 earliest_diverge->print();
332 printf("(Not set)\n");
334 earliest_diverge = NULL;
335 num_feasible_executions++;
338 DEBUG("Number of acquires waiting on pending release sequences: %zu\n",
339 pending_rel_seqs->size());
342 if (isfinalfeasible() || DBG_ENABLED()) {
347 if ((diverge = get_next_backtrack()) == NULL)
351 printf("Next execution will diverge at:\n");
355 reset_to_initial_state();
359 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
361 switch (act->get_type()) {
365 /* linear search: from most recent to oldest */
366 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
367 action_list_t::reverse_iterator rit;
368 for (rit = list->rbegin(); rit != list->rend(); rit++) {
369 ModelAction *prev = *rit;
370 if (prev->could_synchronize_with(act))
376 case ATOMIC_TRYLOCK: {
377 /* linear search: from most recent to oldest */
378 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
379 action_list_t::reverse_iterator rit;
380 for (rit = list->rbegin(); rit != list->rend(); rit++) {
381 ModelAction *prev = *rit;
382 if (act->is_conflicting_lock(prev))
387 case ATOMIC_UNLOCK: {
388 /* linear search: from most recent to oldest */
389 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
390 action_list_t::reverse_iterator rit;
391 for (rit = list->rbegin(); rit != list->rend(); rit++) {
392 ModelAction *prev = *rit;
393 if (!act->same_thread(prev)&&prev->is_failed_trylock())
399 /* linear search: from most recent to oldest */
400 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
401 action_list_t::reverse_iterator rit;
402 for (rit = list->rbegin(); rit != list->rend(); rit++) {
403 ModelAction *prev = *rit;
404 if (!act->same_thread(prev)&&prev->is_failed_trylock())
406 if (!act->same_thread(prev)&&prev->is_notify())
412 case ATOMIC_NOTIFY_ALL:
413 case ATOMIC_NOTIFY_ONE: {
414 /* linear search: from most recent to oldest */
415 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
416 action_list_t::reverse_iterator rit;
417 for (rit = list->rbegin(); rit != list->rend(); rit++) {
418 ModelAction *prev = *rit;
419 if (!act->same_thread(prev)&&prev->is_wait())
430 /** This method finds backtracking points where we should try to
431 * reorder the parameter ModelAction against.
433 * @param the ModelAction to find backtracking points for.
435 void ModelChecker::set_backtracking(ModelAction *act)
437 Thread *t = get_thread(act);
438 ModelAction * prev = get_last_conflict(act);
442 Node * node = prev->get_node()->get_parent();
444 int low_tid, high_tid;
445 if (node->is_enabled(t)) {
446 low_tid = id_to_int(act->get_tid());
447 high_tid = low_tid+1;
450 high_tid = get_num_threads();
453 for(int i = low_tid; i < high_tid; i++) {
454 thread_id_t tid = int_to_id(i);
456 /* Make sure this thread can be enabled here. */
457 if (i >= node->get_num_threads())
460 /* Don't backtrack into a point where the thread is disabled or sleeping. */
461 if (node->enabled_status(tid)!=THREAD_ENABLED)
464 /* Check if this has been explored already */
465 if (node->has_been_explored(tid))
468 /* See if fairness allows */
469 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
471 for(int t=0;t<node->get_num_threads();t++) {
472 thread_id_t tother=int_to_id(t);
473 if (node->is_enabled(tother) && node->has_priority(tother)) {
481 /* Cache the latest backtracking point */
482 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
483 priv->next_backtrack = prev;
485 /* If this is a new backtracking point, mark the tree */
486 if (!node->set_backtrack(tid))
488 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
489 id_to_int(prev->get_tid()),
490 id_to_int(t->get_id()));
499 * Returns last backtracking point. The model checker will explore a different
500 * path for this point in the next execution.
501 * @return The ModelAction at which the next execution should diverge.
503 ModelAction * ModelChecker::get_next_backtrack()
505 ModelAction *next = priv->next_backtrack;
506 priv->next_backtrack = NULL;
511 * Processes a read or rmw model action.
512 * @param curr is the read model action to process.
513 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
514 * @return True if processing this read updates the mo_graph.
516 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
518 uint64_t value = VALUE_NONE;
519 bool updated = false;
521 const ModelAction *reads_from = curr->get_node()->get_read_from();
522 if (reads_from != NULL) {
523 mo_graph->startChanges();
525 value = reads_from->get_value();
526 bool r_status = false;
528 if (!second_part_of_rmw) {
529 check_recency(curr, reads_from);
530 r_status = r_modification_order(curr, reads_from);
534 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
535 mo_graph->rollbackChanges();
536 too_many_reads = false;
540 curr->read_from(reads_from);
541 mo_graph->commitChanges();
542 mo_check_promises(curr->get_tid(), reads_from);
545 } else if (!second_part_of_rmw) {
546 /* Read from future value */
547 value = curr->get_node()->get_future_value();
548 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
549 curr->read_from(NULL);
550 Promise *valuepromise = new Promise(curr, value, expiration);
551 promises->push_back(valuepromise);
553 get_thread(curr)->set_return_value(value);
559 * Processes a lock, trylock, or unlock model action. @param curr is
560 * the read model action to process.
562 * The try lock operation checks whether the lock is taken. If not,
563 * it falls to the normal lock operation case. If so, it returns
566 * The lock operation has already been checked that it is enabled, so
567 * it just grabs the lock and synchronizes with the previous unlock.
569 * The unlock operation has to re-enable all of the threads that are
570 * waiting on the lock.
572 * @return True if synchronization was updated; false otherwise
574 bool ModelChecker::process_mutex(ModelAction *curr) {
575 std::mutex *mutex=NULL;
576 struct std::mutex_state *state=NULL;
578 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
579 mutex = (std::mutex *)curr->get_location();
580 state = mutex->get_state();
581 } else if(curr->is_wait()) {
582 mutex = (std::mutex *)curr->get_value();
583 state = mutex->get_state();
586 switch (curr->get_type()) {
587 case ATOMIC_TRYLOCK: {
588 bool success = !state->islocked;
589 curr->set_try_lock(success);
591 get_thread(curr)->set_return_value(0);
594 get_thread(curr)->set_return_value(1);
596 //otherwise fall into the lock case
598 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
599 printf("Lock access before initialization\n");
602 state->islocked = true;
603 ModelAction *unlock = get_last_unlock(curr);
604 //synchronize with the previous unlock statement
605 if (unlock != NULL) {
606 curr->synchronize_with(unlock);
611 case ATOMIC_UNLOCK: {
613 state->islocked = false;
614 //wake up the other threads
615 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
616 //activate all the waiting threads
617 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
618 scheduler->wake(get_thread(*rit));
625 state->islocked = false;
626 //wake up the other threads
627 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
628 //activate all the waiting threads
629 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
630 scheduler->wake(get_thread(*rit));
633 //check whether we should go to sleep or not...simulate spurious failures
634 if (curr->get_node()->get_misc()==0) {
635 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
637 scheduler->sleep(get_current_thread());
641 case ATOMIC_NOTIFY_ALL: {
642 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
643 //activate all the waiting threads
644 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
645 scheduler->wake(get_thread(*rit));
650 case ATOMIC_NOTIFY_ONE: {
651 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
652 int wakeupthread=curr->get_node()->get_misc();
653 action_list_t::iterator it = waiters->begin();
654 advance(it, wakeupthread);
655 scheduler->wake(get_thread(*it));
667 * Process a write ModelAction
668 * @param curr The ModelAction to process
669 * @return True if the mo_graph was updated or promises were resolved
671 bool ModelChecker::process_write(ModelAction *curr)
673 bool updated_mod_order = w_modification_order(curr);
674 bool updated_promises = resolve_promises(curr);
676 if (promises->size() == 0) {
677 for (unsigned int i = 0; i < futurevalues->size(); i++) {
678 struct PendingFutureValue pfv = (*futurevalues)[i];
679 //Do more ambitious checks now that mo is more complete
680 if (mo_may_allow(pfv.writer, pfv.act)&&
681 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
682 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
683 priv->next_backtrack = pfv.act;
685 futurevalues->resize(0);
688 mo_graph->commitChanges();
689 mo_check_promises(curr->get_tid(), curr);
691 get_thread(curr)->set_return_value(VALUE_NONE);
692 return updated_mod_order || updated_promises;
696 * @brief Process the current action for thread-related activity
698 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
699 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
700 * synchronization, etc. This function is a no-op for non-THREAD actions
701 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
703 * @param curr The current action
704 * @return True if synchronization was updated or a thread completed
706 bool ModelChecker::process_thread_action(ModelAction *curr)
708 bool updated = false;
710 switch (curr->get_type()) {
711 case THREAD_CREATE: {
712 Thread *th = (Thread *)curr->get_location();
713 th->set_creation(curr);
717 Thread *blocking = (Thread *)curr->get_location();
718 ModelAction *act = get_last_action(blocking->get_id());
719 curr->synchronize_with(act);
720 updated = true; /* trigger rel-seq checks */
723 case THREAD_FINISH: {
724 Thread *th = get_thread(curr);
725 while (!th->wait_list_empty()) {
726 ModelAction *act = th->pop_wait_list();
727 scheduler->wake(get_thread(act));
730 updated = true; /* trigger rel-seq checks */
734 check_promises(curr->get_tid(), NULL, curr->get_cv());
745 * @brief Process the current action for release sequence fixup activity
747 * Performs model-checker release sequence fixups for the current action,
748 * forcing a single pending release sequence to break (with a given, potential
749 * "loose" write) or to complete (i.e., synchronize). If a pending release
750 * sequence forms a complete release sequence, then we must perform the fixup
751 * synchronization, mo_graph additions, etc.
753 * @param curr The current action; must be a release sequence fixup action
754 * @param work_queue The work queue to which to add work items as they are
757 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
759 const ModelAction *write = curr->get_node()->get_relseq_break();
760 struct release_seq *sequence = pending_rel_seqs->back();
761 pending_rel_seqs->pop_back();
763 ModelAction *acquire = sequence->acquire;
764 const ModelAction *rf = sequence->rf;
765 const ModelAction *release = sequence->release;
769 ASSERT(release->same_thread(rf));
773 * @todo Forcing a synchronization requires that we set
774 * modification order constraints. For instance, we can't allow
775 * a fixup sequence in which two separate read-acquire
776 * operations read from the same sequence, where the first one
777 * synchronizes and the other doesn't. Essentially, we can't
778 * allow any writes to insert themselves between 'release' and
782 /* Must synchronize */
783 if (!acquire->synchronize_with(release)) {
784 set_bad_synchronization();
787 /* Re-check all pending release sequences */
788 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
789 /* Re-check act for mo_graph edges */
790 work_queue->push_back(MOEdgeWorkEntry(acquire));
792 /* propagate synchronization to later actions */
793 action_list_t::reverse_iterator rit = action_trace->rbegin();
794 for (; (*rit) != acquire; rit++) {
795 ModelAction *propagate = *rit;
796 if (acquire->happens_before(propagate)) {
797 propagate->synchronize_with(acquire);
798 /* Re-check 'propagate' for mo_graph edges */
799 work_queue->push_back(MOEdgeWorkEntry(propagate));
803 /* Break release sequence with new edges:
804 * release --mo--> write --mo--> rf */
805 mo_graph->addEdge(release, write);
806 mo_graph->addEdge(write, rf);
809 /* See if we have realized a data race */
810 if (checkDataRaces())
815 * Initialize the current action by performing one or more of the following
816 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
817 * in the NodeStack, manipulating backtracking sets, allocating and
818 * initializing clock vectors, and computing the promises to fulfill.
820 * @param curr The current action, as passed from the user context; may be
821 * freed/invalidated after the execution of this function, with a different
822 * action "returned" its place (pass-by-reference)
823 * @return True if curr is a newly-explored action; false otherwise
825 bool ModelChecker::initialize_curr_action(ModelAction **curr)
827 ModelAction *newcurr;
829 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
830 newcurr = process_rmw(*curr);
833 if (newcurr->is_rmw())
834 compute_promises(newcurr);
840 (*curr)->set_seq_number(get_next_seq_num());
842 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled());
844 /* First restore type and order in case of RMW operation */
845 if ((*curr)->is_rmwr())
846 newcurr->copy_typeandorder(*curr);
848 ASSERT((*curr)->get_location() == newcurr->get_location());
849 newcurr->copy_from_new(*curr);
851 /* Discard duplicate ModelAction; use action from NodeStack */
854 /* Always compute new clock vector */
855 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
858 return false; /* Action was explored previously */
862 /* Always compute new clock vector */
863 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
865 * Perform one-time actions when pushing new ModelAction onto
868 if (newcurr->is_write())
869 compute_promises(newcurr);
870 else if (newcurr->is_relseq_fixup())
871 compute_relseq_breakwrites(newcurr);
872 else if (newcurr->is_wait())
873 newcurr->get_node()->set_misc_max(2);
874 else if (newcurr->is_notify_one()) {
875 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
877 return true; /* This was a new ModelAction */
882 * @brief Check whether a model action is enabled.
884 * Checks whether a lock or join operation would be successful (i.e., is the
885 * lock already locked, or is the joined thread already complete). If not, put
886 * the action in a waiter list.
888 * @param curr is the ModelAction to check whether it is enabled.
889 * @return a bool that indicates whether the action is enabled.
891 bool ModelChecker::check_action_enabled(ModelAction *curr) {
892 if (curr->is_lock()) {
893 std::mutex * lock = (std::mutex *)curr->get_location();
894 struct std::mutex_state * state = lock->get_state();
895 if (state->islocked) {
896 //Stick the action in the appropriate waiting queue
897 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
900 } else if (curr->get_type() == THREAD_JOIN) {
901 Thread *blocking = (Thread *)curr->get_location();
902 if (!blocking->is_complete()) {
903 blocking->push_wait_list(curr);
912 * Stores the ModelAction for the current thread action. Call this
913 * immediately before switching from user- to system-context to pass
915 * @param act The ModelAction created by the user-thread action
917 void ModelChecker::set_current_action(ModelAction *act) {
918 priv->current_action = act;
922 * This is the heart of the model checker routine. It performs model-checking
923 * actions corresponding to a given "current action." Among other processes, it
924 * calculates reads-from relationships, updates synchronization clock vectors,
925 * forms a memory_order constraints graph, and handles replay/backtrack
926 * execution when running permutations of previously-observed executions.
928 * @param curr The current action to process
929 * @return The next Thread that must be executed. May be NULL if ModelChecker
930 * makes no choice (e.g., according to replay execution, combining RMW actions,
933 Thread * ModelChecker::check_current_action(ModelAction *curr)
936 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
938 if (!check_action_enabled(curr)) {
939 /* Make the execution look like we chose to run this action
940 * much later, when a lock/join can succeed */
941 get_current_thread()->set_pending(curr);
942 scheduler->sleep(get_current_thread());
943 return get_next_thread(NULL);
946 bool newly_explored = initialize_curr_action(&curr);
948 wake_up_sleeping_actions(curr);
950 /* Add the action to lists before any other model-checking tasks */
951 if (!second_part_of_rmw)
952 add_action_to_lists(curr);
954 /* Build may_read_from set for newly-created actions */
955 if (newly_explored && curr->is_read())
956 build_reads_from_past(curr);
958 /* Initialize work_queue with the "current action" work */
959 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
960 while (!work_queue.empty()) {
961 WorkQueueEntry work = work_queue.front();
962 work_queue.pop_front();
965 case WORK_CHECK_CURR_ACTION: {
966 ModelAction *act = work.action;
967 bool update = false; /* update this location's release seq's */
968 bool update_all = false; /* update all release seq's */
970 if (process_thread_action(curr))
973 if (act->is_read() && process_read(act, second_part_of_rmw))
976 if (act->is_write() && process_write(act))
979 if (act->is_mutex_op() && process_mutex(act))
982 if (act->is_relseq_fixup())
983 process_relseq_fixup(curr, &work_queue);
986 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
988 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
991 case WORK_CHECK_RELEASE_SEQ:
992 resolve_release_sequences(work.location, &work_queue);
994 case WORK_CHECK_MO_EDGES: {
995 /** @todo Complete verification of work_queue */
996 ModelAction *act = work.action;
997 bool updated = false;
999 if (act->is_read()) {
1000 const ModelAction *rf = act->get_reads_from();
1001 if (rf != NULL && r_modification_order(act, rf))
1004 if (act->is_write()) {
1005 if (w_modification_order(act))
1008 mo_graph->commitChanges();
1011 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1020 check_curr_backtracking(curr);
1021 set_backtracking(curr);
1022 return get_next_thread(curr);
1025 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
1026 Node *currnode = curr->get_node();
1027 Node *parnode = currnode->get_parent();
1029 if ((!parnode->backtrack_empty() ||
1030 !currnode->misc_empty() ||
1031 !currnode->read_from_empty() ||
1032 !currnode->future_value_empty() ||
1033 !currnode->promise_empty() ||
1034 !currnode->relseq_break_empty())
1035 && (!priv->next_backtrack ||
1036 *curr > *priv->next_backtrack)) {
1037 priv->next_backtrack = curr;
1041 bool ModelChecker::promises_expired() {
1042 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1043 Promise *promise = (*promises)[promise_index];
1044 if (promise->get_expiration()<priv->used_sequence_numbers) {
1051 /** @return whether the current partial trace must be a prefix of a
1052 * feasible trace. */
1053 bool ModelChecker::isfeasibleprefix() {
1054 return promises->size() == 0 && pending_rel_seqs->size() == 0 && isfeasible();
1057 /** @return whether the current partial trace is feasible. */
1058 bool ModelChecker::isfeasible() {
1059 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1060 DEBUG("Infeasible: RMW violation\n");
1062 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
1065 /** @return whether the current partial trace is feasible other than
1066 * multiple RMW reading from the same store. */
1067 bool ModelChecker::isfeasibleotherthanRMW() {
1068 if (DBG_ENABLED()) {
1069 if (mo_graph->checkForCycles())
1070 DEBUG("Infeasible: modification order cycles\n");
1072 DEBUG("Infeasible: failed promise\n");
1074 DEBUG("Infeasible: too many reads\n");
1075 if (bad_synchronization)
1076 DEBUG("Infeasible: bad synchronization ordering\n");
1077 if (promises_expired())
1078 DEBUG("Infeasible: promises expired\n");
1080 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
1083 /** Returns whether the current completed trace is feasible. */
1084 bool ModelChecker::isfinalfeasible() {
1085 if (DBG_ENABLED() && promises->size() != 0)
1086 DEBUG("Infeasible: unrevolved promises\n");
1088 return isfeasible() && promises->size() == 0;
1091 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1092 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1093 ModelAction *lastread = get_last_action(act->get_tid());
1094 lastread->process_rmw(act);
1095 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1096 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1097 mo_graph->commitChanges();
1103 * Checks whether a thread has read from the same write for too many times
1104 * without seeing the effects of a later write.
1107 * 1) there must a different write that we could read from that would satisfy the modification order,
1108 * 2) we must have read from the same value in excess of maxreads times, and
1109 * 3) that other write must have been in the reads_from set for maxreads times.
1111 * If so, we decide that the execution is no longer feasible.
1113 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1114 if (params.maxreads != 0) {
1116 if (curr->get_node()->get_read_from_size() <= 1)
1118 //Must make sure that execution is currently feasible... We could
1119 //accidentally clear by rolling back
1122 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1123 int tid = id_to_int(curr->get_tid());
1126 if ((int)thrd_lists->size() <= tid)
1128 action_list_t *list = &(*thrd_lists)[tid];
1130 action_list_t::reverse_iterator rit = list->rbegin();
1131 /* Skip past curr */
1132 for (; (*rit) != curr; rit++)
1134 /* go past curr now */
1137 action_list_t::reverse_iterator ritcopy = rit;
1138 //See if we have enough reads from the same value
1140 for (; count < params.maxreads; rit++,count++) {
1141 if (rit==list->rend())
1143 ModelAction *act = *rit;
1144 if (!act->is_read())
1147 if (act->get_reads_from() != rf)
1149 if (act->get_node()->get_read_from_size() <= 1)
1152 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1154 const ModelAction * write = curr->get_node()->get_read_from_at(i);
1156 //Need a different write
1160 /* Test to see whether this is a feasible write to read from*/
1161 mo_graph->startChanges();
1162 r_modification_order(curr, write);
1163 bool feasiblereadfrom = isfeasible();
1164 mo_graph->rollbackChanges();
1166 if (!feasiblereadfrom)
1170 bool feasiblewrite = true;
1171 //new we need to see if this write works for everyone
1173 for (int loop = count; loop>0; loop--,rit++) {
1174 ModelAction *act=*rit;
1175 bool foundvalue = false;
1176 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1177 if (act->get_node()->get_read_from_at(j)==write) {
1183 feasiblewrite = false;
1187 if (feasiblewrite) {
1188 too_many_reads = true;
1196 * Updates the mo_graph with the constraints imposed from the current
1199 * Basic idea is the following: Go through each other thread and find
1200 * the lastest action that happened before our read. Two cases:
1202 * (1) The action is a write => that write must either occur before
1203 * the write we read from or be the write we read from.
1205 * (2) The action is a read => the write that that action read from
1206 * must occur before the write we read from or be the same write.
1208 * @param curr The current action. Must be a read.
1209 * @param rf The action that curr reads from. Must be a write.
1210 * @return True if modification order edges were added; false otherwise
1212 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1214 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1217 ASSERT(curr->is_read());
1219 /* Iterate over all threads */
1220 for (i = 0; i < thrd_lists->size(); i++) {
1221 /* Iterate over actions in thread, starting from most recent */
1222 action_list_t *list = &(*thrd_lists)[i];
1223 action_list_t::reverse_iterator rit;
1224 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1225 ModelAction *act = *rit;
1228 * Include at most one act per-thread that "happens
1229 * before" curr. Don't consider reflexively.
1231 if (act->happens_before(curr) && act != curr) {
1232 if (act->is_write()) {
1234 mo_graph->addEdge(act, rf);
1238 const ModelAction *prevreadfrom = act->get_reads_from();
1239 //if the previous read is unresolved, keep going...
1240 if (prevreadfrom == NULL)
1243 if (rf != prevreadfrom) {
1244 mo_graph->addEdge(prevreadfrom, rf);
1256 /** This method fixes up the modification order when we resolve a
1257 * promises. The basic problem is that actions that occur after the
1258 * read curr could not property add items to the modification order
1261 * So for each thread, we find the earliest item that happens after
1262 * the read curr. This is the item we have to fix up with additional
1263 * constraints. If that action is write, we add a MO edge between
1264 * the Action rf and that action. If the action is a read, we add a
1265 * MO edge between the Action rf, and whatever the read accessed.
1267 * @param curr is the read ModelAction that we are fixing up MO edges for.
1268 * @param rf is the write ModelAction that curr reads from.
1271 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1273 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1275 ASSERT(curr->is_read());
1277 /* Iterate over all threads */
1278 for (i = 0; i < thrd_lists->size(); i++) {
1279 /* Iterate over actions in thread, starting from most recent */
1280 action_list_t *list = &(*thrd_lists)[i];
1281 action_list_t::reverse_iterator rit;
1282 ModelAction *lastact = NULL;
1284 /* Find last action that happens after curr that is either not curr or a rmw */
1285 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1286 ModelAction *act = *rit;
1287 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1293 /* Include at most one act per-thread that "happens before" curr */
1294 if (lastact != NULL) {
1295 if (lastact==curr) {
1296 //Case 1: The resolved read is a RMW, and we need to make sure
1297 //that the write portion of the RMW mod order after rf
1299 mo_graph->addEdge(rf, lastact);
1300 } else if (lastact->is_read()) {
1301 //Case 2: The resolved read is a normal read and the next
1302 //operation is a read, and we need to make sure the value read
1303 //is mod ordered after rf
1305 const ModelAction *postreadfrom = lastact->get_reads_from();
1306 if (postreadfrom != NULL&&rf != postreadfrom)
1307 mo_graph->addEdge(rf, postreadfrom);
1309 //Case 3: The resolved read is a normal read and the next
1310 //operation is a write, and we need to make sure that the
1311 //write is mod ordered after rf
1313 mo_graph->addEdge(rf, lastact);
1321 * Updates the mo_graph with the constraints imposed from the current write.
1323 * Basic idea is the following: Go through each other thread and find
1324 * the lastest action that happened before our write. Two cases:
1326 * (1) The action is a write => that write must occur before
1329 * (2) The action is a read => the write that that action read from
1330 * must occur before the current write.
1332 * This method also handles two other issues:
1334 * (I) Sequential Consistency: Making sure that if the current write is
1335 * seq_cst, that it occurs after the previous seq_cst write.
1337 * (II) Sending the write back to non-synchronizing reads.
1339 * @param curr The current action. Must be a write.
1340 * @return True if modification order edges were added; false otherwise
1342 bool ModelChecker::w_modification_order(ModelAction *curr)
1344 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1347 ASSERT(curr->is_write());
1349 if (curr->is_seqcst()) {
1350 /* We have to at least see the last sequentially consistent write,
1351 so we are initialized. */
1352 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1353 if (last_seq_cst != NULL) {
1354 mo_graph->addEdge(last_seq_cst, curr);
1359 /* Iterate over all threads */
1360 for (i = 0; i < thrd_lists->size(); i++) {
1361 /* Iterate over actions in thread, starting from most recent */
1362 action_list_t *list = &(*thrd_lists)[i];
1363 action_list_t::reverse_iterator rit;
1364 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1365 ModelAction *act = *rit;
1368 * 1) If RMW and it actually read from something, then we
1369 * already have all relevant edges, so just skip to next
1372 * 2) If RMW and it didn't read from anything, we should
1373 * whatever edge we can get to speed up convergence.
1375 * 3) If normal write, we need to look at earlier actions, so
1376 * continue processing list.
1378 if (curr->is_rmw()) {
1379 if (curr->get_reads_from()!=NULL)
1388 * Include at most one act per-thread that "happens
1391 if (act->happens_before(curr)) {
1393 * Note: if act is RMW, just add edge:
1395 * The following edge should be handled elsewhere:
1396 * readfrom(act) --mo--> act
1398 if (act->is_write())
1399 mo_graph->addEdge(act, curr);
1400 else if (act->is_read()) {
1401 //if previous read accessed a null, just keep going
1402 if (act->get_reads_from() == NULL)
1404 mo_graph->addEdge(act->get_reads_from(), curr);
1408 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1409 !act->same_thread(curr)) {
1410 /* We have an action that:
1411 (1) did not happen before us
1412 (2) is a read and we are a write
1413 (3) cannot synchronize with us
1414 (4) is in a different thread
1416 that read could potentially read from our write. Note that
1417 these checks are overly conservative at this point, we'll
1418 do more checks before actually removing the
1422 if (thin_air_constraint_may_allow(curr, act)) {
1424 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1425 struct PendingFutureValue pfv = {curr,act};
1426 futurevalues->push_back(pfv);
1436 /** Arbitrary reads from the future are not allowed. Section 29.3
1437 * part 9 places some constraints. This method checks one result of constraint
1438 * constraint. Others require compiler support. */
1439 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1440 if (!writer->is_rmw())
1443 if (!reader->is_rmw())
1446 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1447 if (search == reader)
1449 if (search->get_tid() == reader->get_tid() &&
1450 search->happens_before(reader))
1458 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1459 * some constraints. This method checks one the following constraint (others
1460 * require compiler support):
1462 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1464 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1466 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1468 /* Iterate over all threads */
1469 for (i = 0; i < thrd_lists->size(); i++) {
1470 const ModelAction *write_after_read = NULL;
1472 /* Iterate over actions in thread, starting from most recent */
1473 action_list_t *list = &(*thrd_lists)[i];
1474 action_list_t::reverse_iterator rit;
1475 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1476 ModelAction *act = *rit;
1478 if (!reader->happens_before(act))
1480 else if (act->is_write())
1481 write_after_read = act;
1482 else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1483 write_after_read = act->get_reads_from();
1487 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1494 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1495 * The ModelAction under consideration is expected to be taking part in
1496 * release/acquire synchronization as an object of the "reads from" relation.
1497 * Note that this can only provide release sequence support for RMW chains
1498 * which do not read from the future, as those actions cannot be traced until
1499 * their "promise" is fulfilled. Similarly, we may not even establish the
1500 * presence of a release sequence with certainty, as some modification order
1501 * constraints may be decided further in the future. Thus, this function
1502 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1503 * and a boolean representing certainty.
1505 * @param rf The action that might be part of a release sequence. Must be a
1507 * @param release_heads A pass-by-reference style return parameter. After
1508 * execution of this function, release_heads will contain the heads of all the
1509 * relevant release sequences, if any exists with certainty
1510 * @param pending A pass-by-reference style return parameter which is only used
1511 * when returning false (i.e., uncertain). Returns most information regarding
1512 * an uncertain release sequence, including any write operations that might
1513 * break the sequence.
1514 * @return true, if the ModelChecker is certain that release_heads is complete;
1517 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1518 rel_heads_list_t *release_heads,
1519 struct release_seq *pending) const
1521 /* Only check for release sequences if there are no cycles */
1522 if (mo_graph->checkForCycles())
1526 ASSERT(rf->is_write());
1528 if (rf->is_release())
1529 release_heads->push_back(rf);
1531 break; /* End of RMW chain */
1533 /** @todo Need to be smarter here... In the linux lock
1534 * example, this will run to the beginning of the program for
1536 /** @todo The way to be smarter here is to keep going until 1
1537 * thread has a release preceded by an acquire and you've seen
1540 /* acq_rel RMW is a sufficient stopping condition */
1541 if (rf->is_acquire() && rf->is_release())
1542 return true; /* complete */
1544 rf = rf->get_reads_from();
1547 /* read from future: need to settle this later */
1549 return false; /* incomplete */
1552 if (rf->is_release())
1553 return true; /* complete */
1555 /* else relaxed write; check modification order for contiguous subsequence
1556 * -> rf must be same thread as release */
1557 int tid = id_to_int(rf->get_tid());
1558 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1559 action_list_t *list = &(*thrd_lists)[tid];
1560 action_list_t::const_reverse_iterator rit;
1562 /* Find rf in the thread list */
1563 rit = std::find(list->rbegin(), list->rend(), rf);
1564 ASSERT(rit != list->rend());
1566 /* Find the last write/release */
1567 for (; rit != list->rend(); rit++)
1568 if ((*rit)->is_release())
1570 if (rit == list->rend()) {
1571 /* No write-release in this thread */
1572 return true; /* complete */
1574 ModelAction *release = *rit;
1576 ASSERT(rf->same_thread(release));
1578 pending->writes.clear();
1580 bool certain = true;
1581 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1582 if (id_to_int(rf->get_tid()) == (int)i)
1584 list = &(*thrd_lists)[i];
1586 /* Can we ensure no future writes from this thread may break
1587 * the release seq? */
1588 bool future_ordered = false;
1590 ModelAction *last = get_last_action(int_to_id(i));
1591 Thread *th = get_thread(int_to_id(i));
1592 if ((last && rf->happens_before(last)) ||
1595 future_ordered = true;
1597 ASSERT(!th->is_model_thread() || future_ordered);
1599 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1600 const ModelAction *act = *rit;
1601 /* Reach synchronization -> this thread is complete */
1602 if (act->happens_before(release))
1604 if (rf->happens_before(act)) {
1605 future_ordered = true;
1609 /* Only non-RMW writes can break release sequences */
1610 if (!act->is_write() || act->is_rmw())
1613 /* Check modification order */
1614 if (mo_graph->checkReachable(rf, act)) {
1615 /* rf --mo--> act */
1616 future_ordered = true;
1619 if (mo_graph->checkReachable(act, release))
1620 /* act --mo--> release */
1622 if (mo_graph->checkReachable(release, act) &&
1623 mo_graph->checkReachable(act, rf)) {
1624 /* release --mo-> act --mo--> rf */
1625 return true; /* complete */
1627 /* act may break release sequence */
1628 pending->writes.push_back(act);
1631 if (!future_ordered)
1632 certain = false; /* This thread is uncertain */
1636 release_heads->push_back(release);
1637 pending->writes.clear();
1639 pending->release = release;
1646 * A public interface for getting the release sequence head(s) with which a
1647 * given ModelAction must synchronize. This function only returns a non-empty
1648 * result when it can locate a release sequence head with certainty. Otherwise,
1649 * it may mark the internal state of the ModelChecker so that it will handle
1650 * the release sequence at a later time, causing @a act to update its
1651 * synchronization at some later point in execution.
1652 * @param act The 'acquire' action that may read from a release sequence
1653 * @param release_heads A pass-by-reference return parameter. Will be filled
1654 * with the head(s) of the release sequence(s), if they exists with certainty.
1655 * @see ModelChecker::release_seq_heads
1657 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1659 const ModelAction *rf = act->get_reads_from();
1660 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1661 sequence->acquire = act;
1663 if (!release_seq_heads(rf, release_heads, sequence)) {
1664 /* add act to 'lazy checking' list */
1665 pending_rel_seqs->push_back(sequence);
1667 snapshot_free(sequence);
1672 * Attempt to resolve all stashed operations that might synchronize with a
1673 * release sequence for a given location. This implements the "lazy" portion of
1674 * determining whether or not a release sequence was contiguous, since not all
1675 * modification order information is present at the time an action occurs.
1677 * @param location The location/object that should be checked for release
1678 * sequence resolutions. A NULL value means to check all locations.
1679 * @param work_queue The work queue to which to add work items as they are
1681 * @return True if any updates occurred (new synchronization, new mo_graph
1684 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1686 bool updated = false;
1687 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1688 while (it != pending_rel_seqs->end()) {
1689 struct release_seq *pending = *it;
1690 ModelAction *act = pending->acquire;
1692 /* Only resolve sequences on the given location, if provided */
1693 if (location && act->get_location() != location) {
1698 const ModelAction *rf = act->get_reads_from();
1699 rel_heads_list_t release_heads;
1701 complete = release_seq_heads(rf, &release_heads, pending);
1702 for (unsigned int i = 0; i < release_heads.size(); i++) {
1703 if (!act->has_synchronized_with(release_heads[i])) {
1704 if (act->synchronize_with(release_heads[i]))
1707 set_bad_synchronization();
1712 /* Re-check all pending release sequences */
1713 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1714 /* Re-check act for mo_graph edges */
1715 work_queue->push_back(MOEdgeWorkEntry(act));
1717 /* propagate synchronization to later actions */
1718 action_list_t::reverse_iterator rit = action_trace->rbegin();
1719 for (; (*rit) != act; rit++) {
1720 ModelAction *propagate = *rit;
1721 if (act->happens_before(propagate)) {
1722 propagate->synchronize_with(act);
1723 /* Re-check 'propagate' for mo_graph edges */
1724 work_queue->push_back(MOEdgeWorkEntry(propagate));
1729 it = pending_rel_seqs->erase(it);
1730 snapshot_free(pending);
1736 // If we resolved promises or data races, see if we have realized a data race.
1737 if (checkDataRaces()) {
1745 * Performs various bookkeeping operations for the current ModelAction. For
1746 * instance, adds action to the per-object, per-thread action vector and to the
1747 * action trace list of all thread actions.
1749 * @param act is the ModelAction to add.
1751 void ModelChecker::add_action_to_lists(ModelAction *act)
1753 int tid = id_to_int(act->get_tid());
1754 action_trace->push_back(act);
1756 get_safe_ptr_action(obj_map, act->get_location())->push_back(act);
1758 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
1759 if (tid >= (int)vec->size())
1760 vec->resize(priv->next_thread_id);
1761 (*vec)[tid].push_back(act);
1763 if ((int)thrd_last_action->size() <= tid)
1764 thrd_last_action->resize(get_num_threads());
1765 (*thrd_last_action)[tid] = act;
1767 if (act->is_wait()) {
1768 void *mutex_loc=(void *) act->get_value();
1769 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
1771 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
1772 if (tid >= (int)vec->size())
1773 vec->resize(priv->next_thread_id);
1774 (*vec)[tid].push_back(act);
1776 if ((int)thrd_last_action->size() <= tid)
1777 thrd_last_action->resize(get_num_threads());
1778 (*thrd_last_action)[tid] = act;
1783 * @brief Get the last action performed by a particular Thread
1784 * @param tid The thread ID of the Thread in question
1785 * @return The last action in the thread
1787 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1789 int threadid = id_to_int(tid);
1790 if (threadid < (int)thrd_last_action->size())
1791 return (*thrd_last_action)[id_to_int(tid)];
1797 * Gets the last memory_order_seq_cst write (in the total global sequence)
1798 * performed on a particular object (i.e., memory location), not including the
1800 * @param curr The current ModelAction; also denotes the object location to
1802 * @return The last seq_cst write
1804 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1806 void *location = curr->get_location();
1807 action_list_t *list = get_safe_ptr_action(obj_map, location);
1808 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1809 action_list_t::reverse_iterator rit;
1810 for (rit = list->rbegin(); rit != list->rend(); rit++)
1811 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1817 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1818 * location). This function identifies the mutex according to the current
1819 * action, which is presumed to perform on the same mutex.
1820 * @param curr The current ModelAction; also denotes the object location to
1822 * @return The last unlock operation
1824 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1826 void *location = curr->get_location();
1827 action_list_t *list = get_safe_ptr_action(obj_map, location);
1828 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1829 action_list_t::reverse_iterator rit;
1830 for (rit = list->rbegin(); rit != list->rend(); rit++)
1831 if ((*rit)->is_unlock() || (*rit)->is_wait())
1836 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1838 ModelAction *parent = get_last_action(tid);
1840 parent = get_thread(tid)->get_creation();
1845 * Returns the clock vector for a given thread.
1846 * @param tid The thread whose clock vector we want
1847 * @return Desired clock vector
1849 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1851 return get_parent_action(tid)->get_cv();
1855 * Resolve a set of Promises with a current write. The set is provided in the
1856 * Node corresponding to @a write.
1857 * @param write The ModelAction that is fulfilling Promises
1858 * @return True if promises were resolved; false otherwise
1860 bool ModelChecker::resolve_promises(ModelAction *write)
1862 bool resolved = false;
1863 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
1865 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1866 Promise *promise = (*promises)[promise_index];
1867 if (write->get_node()->get_promise(i)) {
1868 ModelAction *read = promise->get_action();
1869 if (read->is_rmw()) {
1870 mo_graph->addRMWEdge(write, read);
1872 read->read_from(write);
1873 //First fix up the modification order for actions that happened
1875 r_modification_order(read, write);
1876 //Next fix up the modification order for actions that happened
1878 post_r_modification_order(read, write);
1879 //Make sure the promise's value matches the write's value
1880 ASSERT(promise->get_value() == write->get_value());
1883 promises->erase(promises->begin() + promise_index);
1884 threads_to_check.push_back(read->get_tid());
1891 //Check whether reading these writes has made threads unable to
1894 for(unsigned int i=0;i<threads_to_check.size();i++)
1895 mo_check_promises(threads_to_check[i], write);
1901 * Compute the set of promises that could potentially be satisfied by this
1902 * action. Note that the set computation actually appears in the Node, not in
1904 * @param curr The ModelAction that may satisfy promises
1906 void ModelChecker::compute_promises(ModelAction *curr)
1908 for (unsigned int i = 0; i < promises->size(); i++) {
1909 Promise *promise = (*promises)[i];
1910 const ModelAction *act = promise->get_action();
1911 if (!act->happens_before(curr) &&
1913 !act->could_synchronize_with(curr) &&
1914 !act->same_thread(curr) &&
1915 act->get_location() == curr->get_location() &&
1916 promise->get_value() == curr->get_value()) {
1917 curr->get_node()->set_promise(i, act->is_rmw());
1922 /** Checks promises in response to change in ClockVector Threads. */
1923 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
1925 for (unsigned int i = 0; i < promises->size(); i++) {
1926 Promise *promise = (*promises)[i];
1927 const ModelAction *act = promise->get_action();
1928 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1929 merge_cv->synchronized_since(act)) {
1930 if (promise->increment_threads(tid)) {
1931 //Promise has failed
1932 failed_promise = true;
1939 void ModelChecker::check_promises_thread_disabled() {
1940 for (unsigned int i = 0; i < promises->size(); i++) {
1941 Promise *promise = (*promises)[i];
1942 if (promise->check_promise()) {
1943 failed_promise = true;
1949 /** Checks promises in response to addition to modification order for threads.
1951 * pthread is the thread that performed the read that created the promise
1953 * pread is the read that created the promise
1955 * pwrite is either the first write to same location as pread by
1956 * pthread that is sequenced after pread or the value read by the
1957 * first read to the same lcoation as pread by pthread that is
1958 * sequenced after pread..
1960 * 1. If tid=pthread, then we check what other threads are reachable
1961 * through the mode order starting with pwrite. Those threads cannot
1962 * perform a write that will resolve the promise due to modification
1963 * order constraints.
1965 * 2. If the tid is not pthread, we check whether pwrite can reach the
1966 * action write through the modification order. If so, that thread
1967 * cannot perform a future write that will resolve the promise due to
1968 * modificatin order constraints.
1970 * @parem tid The thread that either read from the model action
1971 * write, or actually did the model action write.
1973 * @parem write The ModelAction representing the relevant write.
1976 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
1977 void * location = write->get_location();
1978 for (unsigned int i = 0; i < promises->size(); i++) {
1979 Promise *promise = (*promises)[i];
1980 const ModelAction *act = promise->get_action();
1982 //Is this promise on the same location?
1983 if ( act->get_location() != location )
1986 //same thread as the promise
1987 if ( act->get_tid()==tid ) {
1989 //do we have a pwrite for the promise, if not, set it
1990 if (promise->get_write() == NULL ) {
1991 promise->set_write(write);
1992 //The pwrite cannot happen before the promise
1993 if (write->happens_before(act) && (write != act)) {
1994 failed_promise = true;
1998 if (mo_graph->checkPromise(write, promise)) {
1999 failed_promise = true;
2004 //Don't do any lookups twice for the same thread
2005 if (promise->has_sync_thread(tid))
2008 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2009 if (promise->increment_threads(tid)) {
2010 failed_promise = true;
2018 * Compute the set of writes that may break the current pending release
2019 * sequence. This information is extracted from previou release sequence
2022 * @param curr The current ModelAction. Must be a release sequence fixup
2025 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2027 if (pending_rel_seqs->empty())
2030 struct release_seq *pending = pending_rel_seqs->back();
2031 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2032 const ModelAction *write = pending->writes[i];
2033 curr->get_node()->add_relseq_break(write);
2036 /* NULL means don't break the sequence; just synchronize */
2037 curr->get_node()->add_relseq_break(NULL);
2041 * Build up an initial set of all past writes that this 'read' action may read
2042 * from. This set is determined by the clock vector's "happens before"
2044 * @param curr is the current ModelAction that we are exploring; it must be a
2047 void ModelChecker::build_reads_from_past(ModelAction *curr)
2049 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2051 ASSERT(curr->is_read());
2053 ModelAction *last_seq_cst = NULL;
2055 /* Track whether this object has been initialized */
2056 bool initialized = false;
2058 if (curr->is_seqcst()) {
2059 last_seq_cst = get_last_seq_cst(curr);
2060 /* We have to at least see the last sequentially consistent write,
2061 so we are initialized. */
2062 if (last_seq_cst != NULL)
2066 /* Iterate over all threads */
2067 for (i = 0; i < thrd_lists->size(); i++) {
2068 /* Iterate over actions in thread, starting from most recent */
2069 action_list_t *list = &(*thrd_lists)[i];
2070 action_list_t::reverse_iterator rit;
2071 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2072 ModelAction *act = *rit;
2074 /* Only consider 'write' actions */
2075 if (!act->is_write() || act == curr)
2078 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2079 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
2080 if (!curr->get_sleep_flag() || curr->is_seqcst() || sleep_can_read_from(curr, act)) {
2081 DEBUG("Adding action to may_read_from:\n");
2082 if (DBG_ENABLED()) {
2086 curr->get_node()->add_read_from(act);
2090 /* Include at most one act per-thread that "happens before" curr */
2091 if (act->happens_before(curr)) {
2099 /** @todo Need a more informative way of reporting errors. */
2100 printf("ERROR: may read from uninitialized atomic\n");
2104 if (DBG_ENABLED() || !initialized) {
2105 printf("Reached read action:\n");
2107 printf("Printing may_read_from\n");
2108 curr->get_node()->print_may_read_from();
2109 printf("End printing may_read_from\n");
2113 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2115 Node *prevnode=write->get_node()->get_parent();
2117 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2118 if (write->is_release()&&thread_sleep)
2120 if (!write->is_rmw()) {
2123 if (write->get_reads_from()==NULL)
2125 write=write->get_reads_from();
2129 static void print_list(action_list_t *list)
2131 action_list_t::iterator it;
2133 printf("---------------------------------------------------------------------\n");
2135 unsigned int hash=0;
2137 for (it = list->begin(); it != list->end(); it++) {
2139 hash=hash^(hash<<3)^((*it)->hash());
2141 printf("HASH %u\n", hash);
2142 printf("---------------------------------------------------------------------\n");
2145 #if SUPPORT_MOD_ORDER_DUMP
2146 void ModelChecker::dumpGraph(char *filename) {
2148 sprintf(buffer, "%s.dot",filename);
2149 FILE *file=fopen(buffer, "w");
2150 fprintf(file, "digraph %s {\n",filename);
2151 mo_graph->dumpNodes(file);
2152 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2154 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2155 ModelAction *action=*it;
2156 if (action->is_read()) {
2157 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2158 if (action->get_reads_from()!=NULL)
2159 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2161 if (thread_array[action->get_tid()] != NULL) {
2162 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2165 thread_array[action->get_tid()]=action;
2167 fprintf(file,"}\n");
2168 model_free(thread_array);
2173 void ModelChecker::print_summary()
2176 printf("Number of executions: %d\n", num_executions);
2177 printf("Number of feasible executions: %d\n", num_feasible_executions);
2178 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
2180 #if SUPPORT_MOD_ORDER_DUMP
2182 char buffername[100];
2183 sprintf(buffername, "exec%04u", num_executions);
2184 mo_graph->dumpGraphToFile(buffername);
2185 sprintf(buffername, "graph%04u", num_executions);
2186 dumpGraph(buffername);
2189 if (!isfinalfeasible())
2190 printf("INFEASIBLE EXECUTION!\n");
2191 print_list(action_trace);
2196 * Add a Thread to the system for the first time. Should only be called once
2198 * @param t The Thread to add
2200 void ModelChecker::add_thread(Thread *t)
2202 thread_map->put(id_to_int(t->get_id()), t);
2203 scheduler->add_thread(t);
2207 * Removes a thread from the scheduler.
2208 * @param the thread to remove.
2210 void ModelChecker::remove_thread(Thread *t)
2212 scheduler->remove_thread(t);
2216 * @brief Get a Thread reference by its ID
2217 * @param tid The Thread's ID
2218 * @return A Thread reference
2220 Thread * ModelChecker::get_thread(thread_id_t tid) const
2222 return thread_map->get(id_to_int(tid));
2226 * @brief Get a reference to the Thread in which a ModelAction was executed
2227 * @param act The ModelAction
2228 * @return A Thread reference
2230 Thread * ModelChecker::get_thread(ModelAction *act) const
2232 return get_thread(act->get_tid());
2236 * @brief Check if a Thread is currently enabled
2237 * @param t The Thread to check
2238 * @return True if the Thread is currently enabled
2240 bool ModelChecker::is_enabled(Thread *t) const
2242 return scheduler->is_enabled(t);
2246 * @brief Check if a Thread is currently enabled
2247 * @param tid The ID of the Thread to check
2248 * @return True if the Thread is currently enabled
2250 bool ModelChecker::is_enabled(thread_id_t tid) const
2252 return scheduler->is_enabled(tid);
2256 * Switch from a user-context to the "master thread" context (a.k.a. system
2257 * context). This switch is made with the intention of exploring a particular
2258 * model-checking action (described by a ModelAction object). Must be called
2259 * from a user-thread context.
2261 * @param act The current action that will be explored. May be NULL only if
2262 * trace is exiting via an assertion (see ModelChecker::set_assert and
2263 * ModelChecker::has_asserted).
2264 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2266 int ModelChecker::switch_to_master(ModelAction *act)
2269 Thread *old = thread_current();
2270 set_current_action(act);
2271 old->set_state(THREAD_READY);
2272 return Thread::swap(old, &system_context);
2276 * Takes the next step in the execution, if possible.
2277 * @return Returns true (success) if a step was taken and false otherwise.
2279 bool ModelChecker::take_step() {
2283 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2285 if (curr->get_state() == THREAD_READY) {
2286 ASSERT(priv->current_action);
2288 priv->nextThread = check_current_action(priv->current_action);
2289 priv->current_action = NULL;
2291 if (curr->is_blocked() || curr->is_complete())
2292 scheduler->remove_thread(curr);
2297 Thread *next = scheduler->next_thread(priv->nextThread);
2299 /* Infeasible -> don't take any more steps */
2303 if (params.bound != 0) {
2304 if (priv->used_sequence_numbers > params.bound) {
2309 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2310 next ? id_to_int(next->get_id()) : -1);
2313 * Launch end-of-execution release sequence fixups only when there are:
2315 * (1) no more user threads to run (or when execution replay chooses
2316 * the 'model_thread')
2317 * (2) pending release sequences
2318 * (3) pending assertions (i.e., data races)
2319 * (4) no pending promises
2321 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2322 isfinalfeasible() && !unrealizedraces.empty()) {
2323 printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2324 pending_rel_seqs->size());
2325 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2326 std::memory_order_seq_cst, NULL, VALUE_NONE,
2328 set_current_action(fixup);
2332 /* next == NULL -> don't take any more steps */
2336 next->set_state(THREAD_RUNNING);
2338 if (next->get_pending() != NULL) {
2339 /* restart a pending action */
2340 set_current_action(next->get_pending());
2341 next->set_pending(NULL);
2342 next->set_state(THREAD_READY);
2346 /* Return false only if swap fails with an error */
2347 return (Thread::swap(&system_context, next) == 0);
2350 /** Runs the current execution until threre are no more steps to take. */
2351 void ModelChecker::finish_execution() {
2354 while (take_step());