9 #include "snapshot-interface.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
15 #include "threads-model.h"
17 #define INITIAL_THREAD_ID 0
22 bug_message(const char *str) {
23 const char *fmt = " [BUG] %s\n";
24 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
25 sprintf(msg, fmt, str);
27 ~bug_message() { if (msg) snapshot_free(msg); }
30 void print() { printf("%s", msg); }
34 * Structure for holding small ModelChecker members that should be snapshotted
36 struct model_snapshot_members {
37 ModelAction *current_action;
38 unsigned int next_thread_id;
39 modelclock_t used_sequence_numbers;
41 ModelAction *next_backtrack;
42 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
45 /** @brief Constructor */
46 ModelChecker::ModelChecker(struct model_params params) :
47 /* Initialize default scheduler */
49 scheduler(new Scheduler()),
51 num_feasible_executions(0),
53 earliest_diverge(NULL),
54 action_trace(new action_list_t()),
55 thread_map(new HashTable<int, Thread *, int>()),
56 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
57 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
58 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
59 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
60 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
61 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
62 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
63 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
64 node_stack(new NodeStack()),
65 mo_graph(new CycleGraph()),
66 failed_promise(false),
67 too_many_reads(false),
69 bad_synchronization(false)
71 /* Allocate this "size" on the snapshotting heap */
72 priv = (struct model_snapshot_members *)snapshot_calloc(1, sizeof(*priv));
73 /* First thread created will have id INITIAL_THREAD_ID */
74 priv->next_thread_id = INITIAL_THREAD_ID;
76 /* Initialize a model-checker thread, for special ModelActions */
77 model_thread = new Thread(get_next_id());
78 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
81 /** @brief Destructor */
82 ModelChecker::~ModelChecker()
84 for (unsigned int i = 0; i < get_num_threads(); i++)
85 delete thread_map->get(i);
90 delete lock_waiters_map;
91 delete condvar_waiters_map;
94 for (unsigned int i = 0; i < promises->size(); i++)
95 delete (*promises)[i];
98 delete pending_rel_seqs;
100 delete thrd_last_action;
105 for (unsigned int i = 0; i < priv->bugs.size(); i++)
106 delete priv->bugs[i];
111 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
112 action_list_t * tmp=hash->get(ptr);
114 tmp=new action_list_t();
120 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
121 std::vector<action_list_t> * tmp=hash->get(ptr);
123 tmp=new std::vector<action_list_t>();
130 * Restores user program to initial state and resets all model-checker data
133 void ModelChecker::reset_to_initial_state()
135 DEBUG("+++ Resetting to initial state +++\n");
136 node_stack->reset_execution();
137 failed_promise = false;
138 too_many_reads = false;
139 bad_synchronization = false;
141 snapshotObject->backTrackBeforeStep(0);
144 /** @return a thread ID for a new Thread */
145 thread_id_t ModelChecker::get_next_id()
147 return priv->next_thread_id++;
150 /** @return the number of user threads created during this execution */
151 unsigned int ModelChecker::get_num_threads() const
153 return priv->next_thread_id;
156 /** @return The currently executing Thread. */
157 Thread * ModelChecker::get_current_thread()
159 return scheduler->get_current_thread();
162 /** @return a sequence number for a new ModelAction */
163 modelclock_t ModelChecker::get_next_seq_num()
165 return ++priv->used_sequence_numbers;
168 Node * ModelChecker::get_curr_node() {
169 return node_stack->get_head();
173 * @brief Choose the next thread to execute.
175 * This function chooses the next thread that should execute. It can force the
176 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
177 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
178 * The model-checker may have no preference regarding the next thread (i.e.,
179 * when exploring a new execution ordering), in which case this will return
181 * @param curr The current ModelAction. This action might guide the choice of
183 * @return The next thread to run. If the model-checker has no preference, NULL.
185 Thread * ModelChecker::get_next_thread(ModelAction *curr)
190 /* Do not split atomic actions. */
192 return thread_current();
193 /* The THREAD_CREATE action points to the created Thread */
194 else if (curr->get_type() == THREAD_CREATE)
195 return (Thread *)curr->get_location();
198 /* Have we completed exploring the preselected path? */
202 /* Else, we are trying to replay an execution */
203 ModelAction *next = node_stack->get_next()->get_action();
205 if (next == diverge) {
206 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
207 earliest_diverge=diverge;
209 Node *nextnode = next->get_node();
210 Node *prevnode = nextnode->get_parent();
211 scheduler->update_sleep_set(prevnode);
213 /* Reached divergence point */
214 if (nextnode->increment_misc()) {
215 /* The next node will try to satisfy a different misc_index values. */
216 tid = next->get_tid();
217 node_stack->pop_restofstack(2);
218 } else if (nextnode->increment_promise()) {
219 /* The next node will try to satisfy a different set of promises. */
220 tid = next->get_tid();
221 node_stack->pop_restofstack(2);
222 } else if (nextnode->increment_read_from()) {
223 /* The next node will read from a different value. */
224 tid = next->get_tid();
225 node_stack->pop_restofstack(2);
226 } else if (nextnode->increment_future_value()) {
227 /* The next node will try to read from a different future value. */
228 tid = next->get_tid();
229 node_stack->pop_restofstack(2);
230 } else if (nextnode->increment_relseq_break()) {
231 /* The next node will try to resolve a release sequence differently */
232 tid = next->get_tid();
233 node_stack->pop_restofstack(2);
235 /* Make a different thread execute for next step */
236 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
237 tid = prevnode->get_next_backtrack();
238 /* Make sure the backtracked thread isn't sleeping. */
239 node_stack->pop_restofstack(1);
240 if (diverge==earliest_diverge) {
241 earliest_diverge=prevnode->get_action();
244 /* The correct sleep set is in the parent node. */
247 DEBUG("*** Divergence point ***\n");
251 tid = next->get_tid();
253 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
254 ASSERT(tid != THREAD_ID_T_NONE);
255 return thread_map->get(id_to_int(tid));
259 * We need to know what the next actions of all threads in the sleep
260 * set will be. This method computes them and stores the actions at
261 * the corresponding thread object's pending action.
264 void ModelChecker::execute_sleep_set() {
265 for(unsigned int i=0;i<get_num_threads();i++) {
266 thread_id_t tid=int_to_id(i);
267 Thread *thr=get_thread(tid);
268 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
269 thr->get_pending() == NULL ) {
270 thr->set_state(THREAD_RUNNING);
271 scheduler->next_thread(thr);
272 Thread::swap(&system_context, thr);
273 priv->current_action->set_sleep_flag();
274 thr->set_pending(priv->current_action);
277 priv->current_action = NULL;
280 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
281 for(unsigned int i=0;i<get_num_threads();i++) {
282 thread_id_t tid=int_to_id(i);
283 Thread *thr=get_thread(tid);
284 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
285 ModelAction *pending_act=thr->get_pending();
286 if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
287 //Remove this thread from sleep set
288 scheduler->remove_sleep(thr);
295 * Check if we are in a deadlock. Should only be called at the end of an
296 * execution, although it should not give false positives in the middle of an
297 * execution (there should be some ENABLED thread).
299 * @return True if program is in a deadlock; false otherwise
301 bool ModelChecker::is_deadlocked() const
303 bool blocking_threads = false;
304 for (unsigned int i = 0; i < get_num_threads(); i++) {
305 thread_id_t tid = int_to_id(i);
308 Thread *t = get_thread(tid);
309 if (!t->is_model_thread() && t->get_pending())
310 blocking_threads = true;
312 return blocking_threads;
316 * Check if this is a complete execution. That is, have all thread completed
317 * execution (rather than exiting because sleep sets have forced a redundant
320 * @return True if the execution is complete.
322 bool ModelChecker::is_complete_execution() const
324 for (unsigned int i = 0; i < get_num_threads(); i++)
325 if (is_enabled(int_to_id(i)))
331 * @brief Assert a bug in the executing program.
333 * Use this function to assert any sort of bug in the user program. If the
334 * current trace is feasible (actually, a prefix of some feasible execution),
335 * then this execution will be aborted, printing the appropriate message. If
336 * the current trace is not yet feasible, the error message will be stashed and
337 * printed if the execution ever becomes feasible.
339 * This function can also be used to immediately trigger the bug; that is, we
340 * don't wait for a feasible execution before aborting. Only use the
341 * "immediate" option when you know that the infeasibility is justified (e.g.,
342 * pending release sequences are not a problem)
344 * @param msg Descriptive message for the bug (do not include newline char)
345 * @param user_thread Was this assertion triggered from a user thread?
346 * @param immediate Should this bug be triggered immediately?
348 void ModelChecker::assert_bug(const char *msg, bool user_thread, bool immediate)
350 priv->bugs.push_back(new bug_message(msg));
352 if (immediate || isfeasibleprefix()) {
355 switch_to_master(NULL);
360 * @brief Assert a bug in the executing program, with a default message
361 * @see ModelChecker::assert_bug
362 * @param user_thread Was this assertion triggered from a user thread?
364 void ModelChecker::assert_bug(bool user_thread)
366 assert_bug("bug detected", user_thread);
370 * @brief Assert a bug in the executing program immediately
371 * @see ModelChecker::assert_bug
372 * @param msg Descriptive message for the bug (do not include newline char)
374 void ModelChecker::assert_bug_immediate(const char *msg)
376 printf("Feasible: %s\n", isfeasibleprefix() ? "yes" : "no");
377 assert_bug(msg, false, true);
380 /** @return True, if any bugs have been reported for this execution */
381 bool ModelChecker::have_bug_reports() const
383 return priv->bugs.size() != 0;
386 /** @brief Print bug report listing for this execution (if any bugs exist) */
387 void ModelChecker::print_bugs() const
389 if (have_bug_reports()) {
390 printf("Bug report: %zu bugs detected\n", priv->bugs.size());
391 for (unsigned int i = 0; i < priv->bugs.size(); i++)
392 priv->bugs[i]->print();
397 * Queries the model-checker for more executions to explore and, if one
398 * exists, resets the model-checker state to execute a new execution.
400 * @return If there are more executions to explore, return true. Otherwise,
403 bool ModelChecker::next_execution()
409 if (isfinalfeasible() && (is_complete_execution() || have_bug_reports())) {
410 printf("Earliest divergence point since last feasible execution:\n");
411 if (earliest_diverge)
412 earliest_diverge->print();
414 printf("(Not set)\n");
416 earliest_diverge = NULL;
417 num_feasible_executions++;
420 assert_bug("Deadlock detected");
425 } else if (DBG_ENABLED()) {
429 if ((diverge = get_next_backtrack()) == NULL)
433 printf("Next execution will diverge at:\n");
437 reset_to_initial_state();
441 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
443 switch (act->get_type()) {
447 /* linear search: from most recent to oldest */
448 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
449 action_list_t::reverse_iterator rit;
450 for (rit = list->rbegin(); rit != list->rend(); rit++) {
451 ModelAction *prev = *rit;
452 if (prev->could_synchronize_with(act))
458 case ATOMIC_TRYLOCK: {
459 /* linear search: from most recent to oldest */
460 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
461 action_list_t::reverse_iterator rit;
462 for (rit = list->rbegin(); rit != list->rend(); rit++) {
463 ModelAction *prev = *rit;
464 if (act->is_conflicting_lock(prev))
469 case ATOMIC_UNLOCK: {
470 /* linear search: from most recent to oldest */
471 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
472 action_list_t::reverse_iterator rit;
473 for (rit = list->rbegin(); rit != list->rend(); rit++) {
474 ModelAction *prev = *rit;
475 if (!act->same_thread(prev)&&prev->is_failed_trylock())
481 /* linear search: from most recent to oldest */
482 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
483 action_list_t::reverse_iterator rit;
484 for (rit = list->rbegin(); rit != list->rend(); rit++) {
485 ModelAction *prev = *rit;
486 if (!act->same_thread(prev)&&prev->is_failed_trylock())
488 if (!act->same_thread(prev)&&prev->is_notify())
494 case ATOMIC_NOTIFY_ALL:
495 case ATOMIC_NOTIFY_ONE: {
496 /* linear search: from most recent to oldest */
497 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
498 action_list_t::reverse_iterator rit;
499 for (rit = list->rbegin(); rit != list->rend(); rit++) {
500 ModelAction *prev = *rit;
501 if (!act->same_thread(prev)&&prev->is_wait())
512 /** This method finds backtracking points where we should try to
513 * reorder the parameter ModelAction against.
515 * @param the ModelAction to find backtracking points for.
517 void ModelChecker::set_backtracking(ModelAction *act)
519 Thread *t = get_thread(act);
520 ModelAction * prev = get_last_conflict(act);
524 Node * node = prev->get_node()->get_parent();
526 int low_tid, high_tid;
527 if (node->is_enabled(t)) {
528 low_tid = id_to_int(act->get_tid());
529 high_tid = low_tid+1;
532 high_tid = get_num_threads();
535 for(int i = low_tid; i < high_tid; i++) {
536 thread_id_t tid = int_to_id(i);
538 /* Make sure this thread can be enabled here. */
539 if (i >= node->get_num_threads())
542 /* Don't backtrack into a point where the thread is disabled or sleeping. */
543 if (node->enabled_status(tid)!=THREAD_ENABLED)
546 /* Check if this has been explored already */
547 if (node->has_been_explored(tid))
550 /* See if fairness allows */
551 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
553 for(int t=0;t<node->get_num_threads();t++) {
554 thread_id_t tother=int_to_id(t);
555 if (node->is_enabled(tother) && node->has_priority(tother)) {
563 /* Cache the latest backtracking point */
564 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
565 priv->next_backtrack = prev;
567 /* If this is a new backtracking point, mark the tree */
568 if (!node->set_backtrack(tid))
570 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
571 id_to_int(prev->get_tid()),
572 id_to_int(t->get_id()));
581 * Returns last backtracking point. The model checker will explore a different
582 * path for this point in the next execution.
583 * @return The ModelAction at which the next execution should diverge.
585 ModelAction * ModelChecker::get_next_backtrack()
587 ModelAction *next = priv->next_backtrack;
588 priv->next_backtrack = NULL;
593 * Processes a read or rmw model action.
594 * @param curr is the read model action to process.
595 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
596 * @return True if processing this read updates the mo_graph.
598 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
600 uint64_t value = VALUE_NONE;
601 bool updated = false;
603 const ModelAction *reads_from = curr->get_node()->get_read_from();
604 if (reads_from != NULL) {
605 mo_graph->startChanges();
607 value = reads_from->get_value();
608 bool r_status = false;
610 if (!second_part_of_rmw) {
611 check_recency(curr, reads_from);
612 r_status = r_modification_order(curr, reads_from);
616 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
617 mo_graph->rollbackChanges();
618 too_many_reads = false;
622 curr->read_from(reads_from);
623 mo_graph->commitChanges();
624 mo_check_promises(curr->get_tid(), reads_from);
627 } else if (!second_part_of_rmw) {
628 /* Read from future value */
629 value = curr->get_node()->get_future_value();
630 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
631 curr->read_from(NULL);
632 Promise *valuepromise = new Promise(curr, value, expiration);
633 promises->push_back(valuepromise);
635 get_thread(curr)->set_return_value(value);
641 * Processes a lock, trylock, or unlock model action. @param curr is
642 * the read model action to process.
644 * The try lock operation checks whether the lock is taken. If not,
645 * it falls to the normal lock operation case. If so, it returns
648 * The lock operation has already been checked that it is enabled, so
649 * it just grabs the lock and synchronizes with the previous unlock.
651 * The unlock operation has to re-enable all of the threads that are
652 * waiting on the lock.
654 * @return True if synchronization was updated; false otherwise
656 bool ModelChecker::process_mutex(ModelAction *curr) {
657 std::mutex *mutex=NULL;
658 struct std::mutex_state *state=NULL;
660 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
661 mutex = (std::mutex *)curr->get_location();
662 state = mutex->get_state();
663 } else if(curr->is_wait()) {
664 mutex = (std::mutex *)curr->get_value();
665 state = mutex->get_state();
668 switch (curr->get_type()) {
669 case ATOMIC_TRYLOCK: {
670 bool success = !state->islocked;
671 curr->set_try_lock(success);
673 get_thread(curr)->set_return_value(0);
676 get_thread(curr)->set_return_value(1);
678 //otherwise fall into the lock case
680 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
681 assert_bug("Lock access before initialization");
682 state->islocked = true;
683 ModelAction *unlock = get_last_unlock(curr);
684 //synchronize with the previous unlock statement
685 if (unlock != NULL) {
686 curr->synchronize_with(unlock);
691 case ATOMIC_UNLOCK: {
693 state->islocked = false;
694 //wake up the other threads
695 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
696 //activate all the waiting threads
697 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
698 scheduler->wake(get_thread(*rit));
705 state->islocked = false;
706 //wake up the other threads
707 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
708 //activate all the waiting threads
709 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
710 scheduler->wake(get_thread(*rit));
713 //check whether we should go to sleep or not...simulate spurious failures
714 if (curr->get_node()->get_misc()==0) {
715 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
717 scheduler->sleep(get_current_thread());
721 case ATOMIC_NOTIFY_ALL: {
722 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
723 //activate all the waiting threads
724 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
725 scheduler->wake(get_thread(*rit));
730 case ATOMIC_NOTIFY_ONE: {
731 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
732 int wakeupthread=curr->get_node()->get_misc();
733 action_list_t::iterator it = waiters->begin();
734 advance(it, wakeupthread);
735 scheduler->wake(get_thread(*it));
747 * Process a write ModelAction
748 * @param curr The ModelAction to process
749 * @return True if the mo_graph was updated or promises were resolved
751 bool ModelChecker::process_write(ModelAction *curr)
753 bool updated_mod_order = w_modification_order(curr);
754 bool updated_promises = resolve_promises(curr);
756 if (promises->size() == 0) {
757 for (unsigned int i = 0; i < futurevalues->size(); i++) {
758 struct PendingFutureValue pfv = (*futurevalues)[i];
759 //Do more ambitious checks now that mo is more complete
760 if (mo_may_allow(pfv.writer, pfv.act)&&
761 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
762 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
763 priv->next_backtrack = pfv.act;
765 futurevalues->resize(0);
768 mo_graph->commitChanges();
769 mo_check_promises(curr->get_tid(), curr);
771 get_thread(curr)->set_return_value(VALUE_NONE);
772 return updated_mod_order || updated_promises;
776 * @brief Process the current action for thread-related activity
778 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
779 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
780 * synchronization, etc. This function is a no-op for non-THREAD actions
781 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
783 * @param curr The current action
784 * @return True if synchronization was updated or a thread completed
786 bool ModelChecker::process_thread_action(ModelAction *curr)
788 bool updated = false;
790 switch (curr->get_type()) {
791 case THREAD_CREATE: {
792 Thread *th = (Thread *)curr->get_location();
793 th->set_creation(curr);
797 Thread *blocking = (Thread *)curr->get_location();
798 ModelAction *act = get_last_action(blocking->get_id());
799 curr->synchronize_with(act);
800 updated = true; /* trigger rel-seq checks */
803 case THREAD_FINISH: {
804 Thread *th = get_thread(curr);
805 while (!th->wait_list_empty()) {
806 ModelAction *act = th->pop_wait_list();
807 scheduler->wake(get_thread(act));
810 updated = true; /* trigger rel-seq checks */
814 check_promises(curr->get_tid(), NULL, curr->get_cv());
825 * @brief Process the current action for release sequence fixup activity
827 * Performs model-checker release sequence fixups for the current action,
828 * forcing a single pending release sequence to break (with a given, potential
829 * "loose" write) or to complete (i.e., synchronize). If a pending release
830 * sequence forms a complete release sequence, then we must perform the fixup
831 * synchronization, mo_graph additions, etc.
833 * @param curr The current action; must be a release sequence fixup action
834 * @param work_queue The work queue to which to add work items as they are
837 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
839 const ModelAction *write = curr->get_node()->get_relseq_break();
840 struct release_seq *sequence = pending_rel_seqs->back();
841 pending_rel_seqs->pop_back();
843 ModelAction *acquire = sequence->acquire;
844 const ModelAction *rf = sequence->rf;
845 const ModelAction *release = sequence->release;
849 ASSERT(release->same_thread(rf));
853 * @todo Forcing a synchronization requires that we set
854 * modification order constraints. For instance, we can't allow
855 * a fixup sequence in which two separate read-acquire
856 * operations read from the same sequence, where the first one
857 * synchronizes and the other doesn't. Essentially, we can't
858 * allow any writes to insert themselves between 'release' and
862 /* Must synchronize */
863 if (!acquire->synchronize_with(release)) {
864 set_bad_synchronization();
867 /* Re-check all pending release sequences */
868 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
869 /* Re-check act for mo_graph edges */
870 work_queue->push_back(MOEdgeWorkEntry(acquire));
872 /* propagate synchronization to later actions */
873 action_list_t::reverse_iterator rit = action_trace->rbegin();
874 for (; (*rit) != acquire; rit++) {
875 ModelAction *propagate = *rit;
876 if (acquire->happens_before(propagate)) {
877 propagate->synchronize_with(acquire);
878 /* Re-check 'propagate' for mo_graph edges */
879 work_queue->push_back(MOEdgeWorkEntry(propagate));
883 /* Break release sequence with new edges:
884 * release --mo--> write --mo--> rf */
885 mo_graph->addEdge(release, write);
886 mo_graph->addEdge(write, rf);
889 /* See if we have realized a data race */
890 if (checkDataRaces())
891 assert_bug("Datarace");
895 * Initialize the current action by performing one or more of the following
896 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
897 * in the NodeStack, manipulating backtracking sets, allocating and
898 * initializing clock vectors, and computing the promises to fulfill.
900 * @param curr The current action, as passed from the user context; may be
901 * freed/invalidated after the execution of this function, with a different
902 * action "returned" its place (pass-by-reference)
903 * @return True if curr is a newly-explored action; false otherwise
905 bool ModelChecker::initialize_curr_action(ModelAction **curr)
907 ModelAction *newcurr;
909 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
910 newcurr = process_rmw(*curr);
913 if (newcurr->is_rmw())
914 compute_promises(newcurr);
920 (*curr)->set_seq_number(get_next_seq_num());
922 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled());
924 /* First restore type and order in case of RMW operation */
925 if ((*curr)->is_rmwr())
926 newcurr->copy_typeandorder(*curr);
928 ASSERT((*curr)->get_location() == newcurr->get_location());
929 newcurr->copy_from_new(*curr);
931 /* Discard duplicate ModelAction; use action from NodeStack */
934 /* Always compute new clock vector */
935 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
938 return false; /* Action was explored previously */
942 /* Always compute new clock vector */
943 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
945 * Perform one-time actions when pushing new ModelAction onto
948 if (newcurr->is_write())
949 compute_promises(newcurr);
950 else if (newcurr->is_relseq_fixup())
951 compute_relseq_breakwrites(newcurr);
952 else if (newcurr->is_wait())
953 newcurr->get_node()->set_misc_max(2);
954 else if (newcurr->is_notify_one()) {
955 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
957 return true; /* This was a new ModelAction */
962 * @brief Check whether a model action is enabled.
964 * Checks whether a lock or join operation would be successful (i.e., is the
965 * lock already locked, or is the joined thread already complete). If not, put
966 * the action in a waiter list.
968 * @param curr is the ModelAction to check whether it is enabled.
969 * @return a bool that indicates whether the action is enabled.
971 bool ModelChecker::check_action_enabled(ModelAction *curr) {
972 if (curr->is_lock()) {
973 std::mutex * lock = (std::mutex *)curr->get_location();
974 struct std::mutex_state * state = lock->get_state();
975 if (state->islocked) {
976 //Stick the action in the appropriate waiting queue
977 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
980 } else if (curr->get_type() == THREAD_JOIN) {
981 Thread *blocking = (Thread *)curr->get_location();
982 if (!blocking->is_complete()) {
983 blocking->push_wait_list(curr);
992 * Stores the ModelAction for the current thread action. Call this
993 * immediately before switching from user- to system-context to pass
995 * @param act The ModelAction created by the user-thread action
997 void ModelChecker::set_current_action(ModelAction *act) {
998 priv->current_action = act;
1002 * This is the heart of the model checker routine. It performs model-checking
1003 * actions corresponding to a given "current action." Among other processes, it
1004 * calculates reads-from relationships, updates synchronization clock vectors,
1005 * forms a memory_order constraints graph, and handles replay/backtrack
1006 * execution when running permutations of previously-observed executions.
1008 * @param curr The current action to process
1009 * @return The next Thread that must be executed. May be NULL if ModelChecker
1010 * makes no choice (e.g., according to replay execution, combining RMW actions,
1013 Thread * ModelChecker::check_current_action(ModelAction *curr)
1016 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1018 if (!check_action_enabled(curr)) {
1019 /* Make the execution look like we chose to run this action
1020 * much later, when a lock/join can succeed */
1021 get_current_thread()->set_pending(curr);
1022 scheduler->sleep(get_current_thread());
1023 return get_next_thread(NULL);
1026 bool newly_explored = initialize_curr_action(&curr);
1028 wake_up_sleeping_actions(curr);
1030 /* Add the action to lists before any other model-checking tasks */
1031 if (!second_part_of_rmw)
1032 add_action_to_lists(curr);
1034 /* Build may_read_from set for newly-created actions */
1035 if (newly_explored && curr->is_read())
1036 build_reads_from_past(curr);
1038 /* Initialize work_queue with the "current action" work */
1039 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1040 while (!work_queue.empty() && !has_asserted()) {
1041 WorkQueueEntry work = work_queue.front();
1042 work_queue.pop_front();
1044 switch (work.type) {
1045 case WORK_CHECK_CURR_ACTION: {
1046 ModelAction *act = work.action;
1047 bool update = false; /* update this location's release seq's */
1048 bool update_all = false; /* update all release seq's */
1050 if (process_thread_action(curr))
1053 if (act->is_read() && process_read(act, second_part_of_rmw))
1056 if (act->is_write() && process_write(act))
1059 if (act->is_mutex_op() && process_mutex(act))
1062 if (act->is_relseq_fixup())
1063 process_relseq_fixup(curr, &work_queue);
1066 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1068 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1071 case WORK_CHECK_RELEASE_SEQ:
1072 resolve_release_sequences(work.location, &work_queue);
1074 case WORK_CHECK_MO_EDGES: {
1075 /** @todo Complete verification of work_queue */
1076 ModelAction *act = work.action;
1077 bool updated = false;
1079 if (act->is_read()) {
1080 const ModelAction *rf = act->get_reads_from();
1081 if (rf != NULL && r_modification_order(act, rf))
1084 if (act->is_write()) {
1085 if (w_modification_order(act))
1088 mo_graph->commitChanges();
1091 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1100 check_curr_backtracking(curr);
1101 set_backtracking(curr);
1102 return get_next_thread(curr);
1105 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
1106 Node *currnode = curr->get_node();
1107 Node *parnode = currnode->get_parent();
1109 if ((!parnode->backtrack_empty() ||
1110 !currnode->misc_empty() ||
1111 !currnode->read_from_empty() ||
1112 !currnode->future_value_empty() ||
1113 !currnode->promise_empty() ||
1114 !currnode->relseq_break_empty())
1115 && (!priv->next_backtrack ||
1116 *curr > *priv->next_backtrack)) {
1117 priv->next_backtrack = curr;
1121 bool ModelChecker::promises_expired() const
1123 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1124 Promise *promise = (*promises)[promise_index];
1125 if (promise->get_expiration()<priv->used_sequence_numbers) {
1132 /** @return whether the current partial trace must be a prefix of a
1133 * feasible trace. */
1134 bool ModelChecker::isfeasibleprefix() const
1136 return promises->size() == 0 && pending_rel_seqs->size() == 0 && isfeasible();
1139 /** @return whether the current partial trace is feasible. */
1140 bool ModelChecker::isfeasible() const
1142 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1143 DEBUG("Infeasible: RMW violation\n");
1145 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
1148 /** @return whether the current partial trace is feasible other than
1149 * multiple RMW reading from the same store. */
1150 bool ModelChecker::isfeasibleotherthanRMW() const
1152 if (DBG_ENABLED()) {
1153 if (mo_graph->checkForCycles())
1154 DEBUG("Infeasible: modification order cycles\n");
1156 DEBUG("Infeasible: failed promise\n");
1158 DEBUG("Infeasible: too many reads\n");
1159 if (bad_synchronization)
1160 DEBUG("Infeasible: bad synchronization ordering\n");
1161 if (promises_expired())
1162 DEBUG("Infeasible: promises expired\n");
1164 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
1167 /** Returns whether the current completed trace is feasible. */
1168 bool ModelChecker::isfinalfeasible() const
1170 if (DBG_ENABLED() && promises->size() != 0)
1171 DEBUG("Infeasible: unrevolved promises\n");
1173 return isfeasible() && promises->size() == 0;
1176 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1177 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1178 ModelAction *lastread = get_last_action(act->get_tid());
1179 lastread->process_rmw(act);
1180 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1181 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1182 mo_graph->commitChanges();
1188 * Checks whether a thread has read from the same write for too many times
1189 * without seeing the effects of a later write.
1192 * 1) there must a different write that we could read from that would satisfy the modification order,
1193 * 2) we must have read from the same value in excess of maxreads times, and
1194 * 3) that other write must have been in the reads_from set for maxreads times.
1196 * If so, we decide that the execution is no longer feasible.
1198 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1199 if (params.maxreads != 0) {
1201 if (curr->get_node()->get_read_from_size() <= 1)
1203 //Must make sure that execution is currently feasible... We could
1204 //accidentally clear by rolling back
1207 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1208 int tid = id_to_int(curr->get_tid());
1211 if ((int)thrd_lists->size() <= tid)
1213 action_list_t *list = &(*thrd_lists)[tid];
1215 action_list_t::reverse_iterator rit = list->rbegin();
1216 /* Skip past curr */
1217 for (; (*rit) != curr; rit++)
1219 /* go past curr now */
1222 action_list_t::reverse_iterator ritcopy = rit;
1223 //See if we have enough reads from the same value
1225 for (; count < params.maxreads; rit++,count++) {
1226 if (rit==list->rend())
1228 ModelAction *act = *rit;
1229 if (!act->is_read())
1232 if (act->get_reads_from() != rf)
1234 if (act->get_node()->get_read_from_size() <= 1)
1237 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1239 const ModelAction * write = curr->get_node()->get_read_from_at(i);
1241 //Need a different write
1245 /* Test to see whether this is a feasible write to read from*/
1246 mo_graph->startChanges();
1247 r_modification_order(curr, write);
1248 bool feasiblereadfrom = isfeasible();
1249 mo_graph->rollbackChanges();
1251 if (!feasiblereadfrom)
1255 bool feasiblewrite = true;
1256 //new we need to see if this write works for everyone
1258 for (int loop = count; loop>0; loop--,rit++) {
1259 ModelAction *act=*rit;
1260 bool foundvalue = false;
1261 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1262 if (act->get_node()->get_read_from_at(j)==write) {
1268 feasiblewrite = false;
1272 if (feasiblewrite) {
1273 too_many_reads = true;
1281 * Updates the mo_graph with the constraints imposed from the current
1284 * Basic idea is the following: Go through each other thread and find
1285 * the lastest action that happened before our read. Two cases:
1287 * (1) The action is a write => that write must either occur before
1288 * the write we read from or be the write we read from.
1290 * (2) The action is a read => the write that that action read from
1291 * must occur before the write we read from or be the same write.
1293 * @param curr The current action. Must be a read.
1294 * @param rf The action that curr reads from. Must be a write.
1295 * @return True if modification order edges were added; false otherwise
1297 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1299 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1302 ASSERT(curr->is_read());
1304 /* Iterate over all threads */
1305 for (i = 0; i < thrd_lists->size(); i++) {
1306 /* Iterate over actions in thread, starting from most recent */
1307 action_list_t *list = &(*thrd_lists)[i];
1308 action_list_t::reverse_iterator rit;
1309 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1310 ModelAction *act = *rit;
1313 * Include at most one act per-thread that "happens
1314 * before" curr. Don't consider reflexively.
1316 if (act->happens_before(curr) && act != curr) {
1317 if (act->is_write()) {
1319 mo_graph->addEdge(act, rf);
1323 const ModelAction *prevreadfrom = act->get_reads_from();
1324 //if the previous read is unresolved, keep going...
1325 if (prevreadfrom == NULL)
1328 if (rf != prevreadfrom) {
1329 mo_graph->addEdge(prevreadfrom, rf);
1341 /** This method fixes up the modification order when we resolve a
1342 * promises. The basic problem is that actions that occur after the
1343 * read curr could not property add items to the modification order
1346 * So for each thread, we find the earliest item that happens after
1347 * the read curr. This is the item we have to fix up with additional
1348 * constraints. If that action is write, we add a MO edge between
1349 * the Action rf and that action. If the action is a read, we add a
1350 * MO edge between the Action rf, and whatever the read accessed.
1352 * @param curr is the read ModelAction that we are fixing up MO edges for.
1353 * @param rf is the write ModelAction that curr reads from.
1356 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1358 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1360 ASSERT(curr->is_read());
1362 /* Iterate over all threads */
1363 for (i = 0; i < thrd_lists->size(); i++) {
1364 /* Iterate over actions in thread, starting from most recent */
1365 action_list_t *list = &(*thrd_lists)[i];
1366 action_list_t::reverse_iterator rit;
1367 ModelAction *lastact = NULL;
1369 /* Find last action that happens after curr that is either not curr or a rmw */
1370 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1371 ModelAction *act = *rit;
1372 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1378 /* Include at most one act per-thread that "happens before" curr */
1379 if (lastact != NULL) {
1380 if (lastact==curr) {
1381 //Case 1: The resolved read is a RMW, and we need to make sure
1382 //that the write portion of the RMW mod order after rf
1384 mo_graph->addEdge(rf, lastact);
1385 } else if (lastact->is_read()) {
1386 //Case 2: The resolved read is a normal read and the next
1387 //operation is a read, and we need to make sure the value read
1388 //is mod ordered after rf
1390 const ModelAction *postreadfrom = lastact->get_reads_from();
1391 if (postreadfrom != NULL&&rf != postreadfrom)
1392 mo_graph->addEdge(rf, postreadfrom);
1394 //Case 3: The resolved read is a normal read and the next
1395 //operation is a write, and we need to make sure that the
1396 //write is mod ordered after rf
1398 mo_graph->addEdge(rf, lastact);
1406 * Updates the mo_graph with the constraints imposed from the current write.
1408 * Basic idea is the following: Go through each other thread and find
1409 * the lastest action that happened before our write. Two cases:
1411 * (1) The action is a write => that write must occur before
1414 * (2) The action is a read => the write that that action read from
1415 * must occur before the current write.
1417 * This method also handles two other issues:
1419 * (I) Sequential Consistency: Making sure that if the current write is
1420 * seq_cst, that it occurs after the previous seq_cst write.
1422 * (II) Sending the write back to non-synchronizing reads.
1424 * @param curr The current action. Must be a write.
1425 * @return True if modification order edges were added; false otherwise
1427 bool ModelChecker::w_modification_order(ModelAction *curr)
1429 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1432 ASSERT(curr->is_write());
1434 if (curr->is_seqcst()) {
1435 /* We have to at least see the last sequentially consistent write,
1436 so we are initialized. */
1437 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1438 if (last_seq_cst != NULL) {
1439 mo_graph->addEdge(last_seq_cst, curr);
1444 /* Iterate over all threads */
1445 for (i = 0; i < thrd_lists->size(); i++) {
1446 /* Iterate over actions in thread, starting from most recent */
1447 action_list_t *list = &(*thrd_lists)[i];
1448 action_list_t::reverse_iterator rit;
1449 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1450 ModelAction *act = *rit;
1453 * 1) If RMW and it actually read from something, then we
1454 * already have all relevant edges, so just skip to next
1457 * 2) If RMW and it didn't read from anything, we should
1458 * whatever edge we can get to speed up convergence.
1460 * 3) If normal write, we need to look at earlier actions, so
1461 * continue processing list.
1463 if (curr->is_rmw()) {
1464 if (curr->get_reads_from()!=NULL)
1473 * Include at most one act per-thread that "happens
1476 if (act->happens_before(curr)) {
1478 * Note: if act is RMW, just add edge:
1480 * The following edge should be handled elsewhere:
1481 * readfrom(act) --mo--> act
1483 if (act->is_write())
1484 mo_graph->addEdge(act, curr);
1485 else if (act->is_read()) {
1486 //if previous read accessed a null, just keep going
1487 if (act->get_reads_from() == NULL)
1489 mo_graph->addEdge(act->get_reads_from(), curr);
1493 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1494 !act->same_thread(curr)) {
1495 /* We have an action that:
1496 (1) did not happen before us
1497 (2) is a read and we are a write
1498 (3) cannot synchronize with us
1499 (4) is in a different thread
1501 that read could potentially read from our write. Note that
1502 these checks are overly conservative at this point, we'll
1503 do more checks before actually removing the
1507 if (thin_air_constraint_may_allow(curr, act)) {
1509 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1510 struct PendingFutureValue pfv = {curr,act};
1511 futurevalues->push_back(pfv);
1521 /** Arbitrary reads from the future are not allowed. Section 29.3
1522 * part 9 places some constraints. This method checks one result of constraint
1523 * constraint. Others require compiler support. */
1524 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1525 if (!writer->is_rmw())
1528 if (!reader->is_rmw())
1531 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1532 if (search == reader)
1534 if (search->get_tid() == reader->get_tid() &&
1535 search->happens_before(reader))
1543 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1544 * some constraints. This method checks one the following constraint (others
1545 * require compiler support):
1547 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1549 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1551 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1553 /* Iterate over all threads */
1554 for (i = 0; i < thrd_lists->size(); i++) {
1555 const ModelAction *write_after_read = NULL;
1557 /* Iterate over actions in thread, starting from most recent */
1558 action_list_t *list = &(*thrd_lists)[i];
1559 action_list_t::reverse_iterator rit;
1560 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1561 ModelAction *act = *rit;
1563 if (!reader->happens_before(act))
1565 else if (act->is_write())
1566 write_after_read = act;
1567 else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1568 write_after_read = act->get_reads_from();
1572 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1579 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1580 * The ModelAction under consideration is expected to be taking part in
1581 * release/acquire synchronization as an object of the "reads from" relation.
1582 * Note that this can only provide release sequence support for RMW chains
1583 * which do not read from the future, as those actions cannot be traced until
1584 * their "promise" is fulfilled. Similarly, we may not even establish the
1585 * presence of a release sequence with certainty, as some modification order
1586 * constraints may be decided further in the future. Thus, this function
1587 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1588 * and a boolean representing certainty.
1590 * @param rf The action that might be part of a release sequence. Must be a
1592 * @param release_heads A pass-by-reference style return parameter. After
1593 * execution of this function, release_heads will contain the heads of all the
1594 * relevant release sequences, if any exists with certainty
1595 * @param pending A pass-by-reference style return parameter which is only used
1596 * when returning false (i.e., uncertain). Returns most information regarding
1597 * an uncertain release sequence, including any write operations that might
1598 * break the sequence.
1599 * @return true, if the ModelChecker is certain that release_heads is complete;
1602 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1603 rel_heads_list_t *release_heads,
1604 struct release_seq *pending) const
1606 /* Only check for release sequences if there are no cycles */
1607 if (mo_graph->checkForCycles())
1611 ASSERT(rf->is_write());
1613 if (rf->is_release())
1614 release_heads->push_back(rf);
1616 break; /* End of RMW chain */
1618 /** @todo Need to be smarter here... In the linux lock
1619 * example, this will run to the beginning of the program for
1621 /** @todo The way to be smarter here is to keep going until 1
1622 * thread has a release preceded by an acquire and you've seen
1625 /* acq_rel RMW is a sufficient stopping condition */
1626 if (rf->is_acquire() && rf->is_release())
1627 return true; /* complete */
1629 rf = rf->get_reads_from();
1632 /* read from future: need to settle this later */
1634 return false; /* incomplete */
1637 if (rf->is_release())
1638 return true; /* complete */
1640 /* else relaxed write; check modification order for contiguous subsequence
1641 * -> rf must be same thread as release */
1642 int tid = id_to_int(rf->get_tid());
1643 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1644 action_list_t *list = &(*thrd_lists)[tid];
1645 action_list_t::const_reverse_iterator rit;
1647 /* Find rf in the thread list */
1648 rit = std::find(list->rbegin(), list->rend(), rf);
1649 ASSERT(rit != list->rend());
1651 /* Find the last write/release */
1652 for (; rit != list->rend(); rit++)
1653 if ((*rit)->is_release())
1655 if (rit == list->rend()) {
1656 /* No write-release in this thread */
1657 return true; /* complete */
1659 ModelAction *release = *rit;
1661 ASSERT(rf->same_thread(release));
1663 pending->writes.clear();
1665 bool certain = true;
1666 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1667 if (id_to_int(rf->get_tid()) == (int)i)
1669 list = &(*thrd_lists)[i];
1671 /* Can we ensure no future writes from this thread may break
1672 * the release seq? */
1673 bool future_ordered = false;
1675 ModelAction *last = get_last_action(int_to_id(i));
1676 Thread *th = get_thread(int_to_id(i));
1677 if ((last && rf->happens_before(last)) ||
1680 future_ordered = true;
1682 ASSERT(!th->is_model_thread() || future_ordered);
1684 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1685 const ModelAction *act = *rit;
1686 /* Reach synchronization -> this thread is complete */
1687 if (act->happens_before(release))
1689 if (rf->happens_before(act)) {
1690 future_ordered = true;
1694 /* Only non-RMW writes can break release sequences */
1695 if (!act->is_write() || act->is_rmw())
1698 /* Check modification order */
1699 if (mo_graph->checkReachable(rf, act)) {
1700 /* rf --mo--> act */
1701 future_ordered = true;
1704 if (mo_graph->checkReachable(act, release))
1705 /* act --mo--> release */
1707 if (mo_graph->checkReachable(release, act) &&
1708 mo_graph->checkReachable(act, rf)) {
1709 /* release --mo-> act --mo--> rf */
1710 return true; /* complete */
1712 /* act may break release sequence */
1713 pending->writes.push_back(act);
1716 if (!future_ordered)
1717 certain = false; /* This thread is uncertain */
1721 release_heads->push_back(release);
1722 pending->writes.clear();
1724 pending->release = release;
1731 * A public interface for getting the release sequence head(s) with which a
1732 * given ModelAction must synchronize. This function only returns a non-empty
1733 * result when it can locate a release sequence head with certainty. Otherwise,
1734 * it may mark the internal state of the ModelChecker so that it will handle
1735 * the release sequence at a later time, causing @a act to update its
1736 * synchronization at some later point in execution.
1737 * @param act The 'acquire' action that may read from a release sequence
1738 * @param release_heads A pass-by-reference return parameter. Will be filled
1739 * with the head(s) of the release sequence(s), if they exists with certainty.
1740 * @see ModelChecker::release_seq_heads
1742 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1744 const ModelAction *rf = act->get_reads_from();
1745 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1746 sequence->acquire = act;
1748 if (!release_seq_heads(rf, release_heads, sequence)) {
1749 /* add act to 'lazy checking' list */
1750 pending_rel_seqs->push_back(sequence);
1752 snapshot_free(sequence);
1757 * Attempt to resolve all stashed operations that might synchronize with a
1758 * release sequence for a given location. This implements the "lazy" portion of
1759 * determining whether or not a release sequence was contiguous, since not all
1760 * modification order information is present at the time an action occurs.
1762 * @param location The location/object that should be checked for release
1763 * sequence resolutions. A NULL value means to check all locations.
1764 * @param work_queue The work queue to which to add work items as they are
1766 * @return True if any updates occurred (new synchronization, new mo_graph
1769 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1771 bool updated = false;
1772 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1773 while (it != pending_rel_seqs->end()) {
1774 struct release_seq *pending = *it;
1775 ModelAction *act = pending->acquire;
1777 /* Only resolve sequences on the given location, if provided */
1778 if (location && act->get_location() != location) {
1783 const ModelAction *rf = act->get_reads_from();
1784 rel_heads_list_t release_heads;
1786 complete = release_seq_heads(rf, &release_heads, pending);
1787 for (unsigned int i = 0; i < release_heads.size(); i++) {
1788 if (!act->has_synchronized_with(release_heads[i])) {
1789 if (act->synchronize_with(release_heads[i]))
1792 set_bad_synchronization();
1797 /* Re-check all pending release sequences */
1798 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1799 /* Re-check act for mo_graph edges */
1800 work_queue->push_back(MOEdgeWorkEntry(act));
1802 /* propagate synchronization to later actions */
1803 action_list_t::reverse_iterator rit = action_trace->rbegin();
1804 for (; (*rit) != act; rit++) {
1805 ModelAction *propagate = *rit;
1806 if (act->happens_before(propagate)) {
1807 propagate->synchronize_with(act);
1808 /* Re-check 'propagate' for mo_graph edges */
1809 work_queue->push_back(MOEdgeWorkEntry(propagate));
1814 it = pending_rel_seqs->erase(it);
1815 snapshot_free(pending);
1821 // If we resolved promises or data races, see if we have realized a data race.
1822 if (checkDataRaces())
1823 assert_bug("Datarace");
1829 * Performs various bookkeeping operations for the current ModelAction. For
1830 * instance, adds action to the per-object, per-thread action vector and to the
1831 * action trace list of all thread actions.
1833 * @param act is the ModelAction to add.
1835 void ModelChecker::add_action_to_lists(ModelAction *act)
1837 int tid = id_to_int(act->get_tid());
1838 action_trace->push_back(act);
1840 get_safe_ptr_action(obj_map, act->get_location())->push_back(act);
1842 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
1843 if (tid >= (int)vec->size())
1844 vec->resize(priv->next_thread_id);
1845 (*vec)[tid].push_back(act);
1847 if ((int)thrd_last_action->size() <= tid)
1848 thrd_last_action->resize(get_num_threads());
1849 (*thrd_last_action)[tid] = act;
1851 if (act->is_wait()) {
1852 void *mutex_loc=(void *) act->get_value();
1853 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
1855 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
1856 if (tid >= (int)vec->size())
1857 vec->resize(priv->next_thread_id);
1858 (*vec)[tid].push_back(act);
1860 if ((int)thrd_last_action->size() <= tid)
1861 thrd_last_action->resize(get_num_threads());
1862 (*thrd_last_action)[tid] = act;
1867 * @brief Get the last action performed by a particular Thread
1868 * @param tid The thread ID of the Thread in question
1869 * @return The last action in the thread
1871 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1873 int threadid = id_to_int(tid);
1874 if (threadid < (int)thrd_last_action->size())
1875 return (*thrd_last_action)[id_to_int(tid)];
1881 * Gets the last memory_order_seq_cst write (in the total global sequence)
1882 * performed on a particular object (i.e., memory location), not including the
1884 * @param curr The current ModelAction; also denotes the object location to
1886 * @return The last seq_cst write
1888 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1890 void *location = curr->get_location();
1891 action_list_t *list = get_safe_ptr_action(obj_map, location);
1892 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1893 action_list_t::reverse_iterator rit;
1894 for (rit = list->rbegin(); rit != list->rend(); rit++)
1895 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1901 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1902 * location). This function identifies the mutex according to the current
1903 * action, which is presumed to perform on the same mutex.
1904 * @param curr The current ModelAction; also denotes the object location to
1906 * @return The last unlock operation
1908 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1910 void *location = curr->get_location();
1911 action_list_t *list = get_safe_ptr_action(obj_map, location);
1912 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1913 action_list_t::reverse_iterator rit;
1914 for (rit = list->rbegin(); rit != list->rend(); rit++)
1915 if ((*rit)->is_unlock() || (*rit)->is_wait())
1920 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1922 ModelAction *parent = get_last_action(tid);
1924 parent = get_thread(tid)->get_creation();
1929 * Returns the clock vector for a given thread.
1930 * @param tid The thread whose clock vector we want
1931 * @return Desired clock vector
1933 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1935 return get_parent_action(tid)->get_cv();
1939 * Resolve a set of Promises with a current write. The set is provided in the
1940 * Node corresponding to @a write.
1941 * @param write The ModelAction that is fulfilling Promises
1942 * @return True if promises were resolved; false otherwise
1944 bool ModelChecker::resolve_promises(ModelAction *write)
1946 bool resolved = false;
1947 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
1949 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1950 Promise *promise = (*promises)[promise_index];
1951 if (write->get_node()->get_promise(i)) {
1952 ModelAction *read = promise->get_action();
1953 if (read->is_rmw()) {
1954 mo_graph->addRMWEdge(write, read);
1956 read->read_from(write);
1957 //First fix up the modification order for actions that happened
1959 r_modification_order(read, write);
1960 //Next fix up the modification order for actions that happened
1962 post_r_modification_order(read, write);
1963 //Make sure the promise's value matches the write's value
1964 ASSERT(promise->get_value() == write->get_value());
1967 promises->erase(promises->begin() + promise_index);
1968 threads_to_check.push_back(read->get_tid());
1975 //Check whether reading these writes has made threads unable to
1978 for(unsigned int i=0;i<threads_to_check.size();i++)
1979 mo_check_promises(threads_to_check[i], write);
1985 * Compute the set of promises that could potentially be satisfied by this
1986 * action. Note that the set computation actually appears in the Node, not in
1988 * @param curr The ModelAction that may satisfy promises
1990 void ModelChecker::compute_promises(ModelAction *curr)
1992 for (unsigned int i = 0; i < promises->size(); i++) {
1993 Promise *promise = (*promises)[i];
1994 const ModelAction *act = promise->get_action();
1995 if (!act->happens_before(curr) &&
1997 !act->could_synchronize_with(curr) &&
1998 !act->same_thread(curr) &&
1999 act->get_location() == curr->get_location() &&
2000 promise->get_value() == curr->get_value()) {
2001 curr->get_node()->set_promise(i, act->is_rmw());
2006 /** Checks promises in response to change in ClockVector Threads. */
2007 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2009 for (unsigned int i = 0; i < promises->size(); i++) {
2010 Promise *promise = (*promises)[i];
2011 const ModelAction *act = promise->get_action();
2012 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2013 merge_cv->synchronized_since(act)) {
2014 if (promise->increment_threads(tid)) {
2015 //Promise has failed
2016 failed_promise = true;
2023 void ModelChecker::check_promises_thread_disabled() {
2024 for (unsigned int i = 0; i < promises->size(); i++) {
2025 Promise *promise = (*promises)[i];
2026 if (promise->check_promise()) {
2027 failed_promise = true;
2033 /** Checks promises in response to addition to modification order for threads.
2035 * pthread is the thread that performed the read that created the promise
2037 * pread is the read that created the promise
2039 * pwrite is either the first write to same location as pread by
2040 * pthread that is sequenced after pread or the value read by the
2041 * first read to the same lcoation as pread by pthread that is
2042 * sequenced after pread..
2044 * 1. If tid=pthread, then we check what other threads are reachable
2045 * through the mode order starting with pwrite. Those threads cannot
2046 * perform a write that will resolve the promise due to modification
2047 * order constraints.
2049 * 2. If the tid is not pthread, we check whether pwrite can reach the
2050 * action write through the modification order. If so, that thread
2051 * cannot perform a future write that will resolve the promise due to
2052 * modificatin order constraints.
2054 * @parem tid The thread that either read from the model action
2055 * write, or actually did the model action write.
2057 * @parem write The ModelAction representing the relevant write.
2060 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
2061 void * location = write->get_location();
2062 for (unsigned int i = 0; i < promises->size(); i++) {
2063 Promise *promise = (*promises)[i];
2064 const ModelAction *act = promise->get_action();
2066 //Is this promise on the same location?
2067 if ( act->get_location() != location )
2070 //same thread as the promise
2071 if ( act->get_tid()==tid ) {
2073 //do we have a pwrite for the promise, if not, set it
2074 if (promise->get_write() == NULL ) {
2075 promise->set_write(write);
2076 //The pwrite cannot happen before the promise
2077 if (write->happens_before(act) && (write != act)) {
2078 failed_promise = true;
2082 if (mo_graph->checkPromise(write, promise)) {
2083 failed_promise = true;
2088 //Don't do any lookups twice for the same thread
2089 if (promise->has_sync_thread(tid))
2092 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2093 if (promise->increment_threads(tid)) {
2094 failed_promise = true;
2102 * Compute the set of writes that may break the current pending release
2103 * sequence. This information is extracted from previou release sequence
2106 * @param curr The current ModelAction. Must be a release sequence fixup
2109 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2111 if (pending_rel_seqs->empty())
2114 struct release_seq *pending = pending_rel_seqs->back();
2115 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2116 const ModelAction *write = pending->writes[i];
2117 curr->get_node()->add_relseq_break(write);
2120 /* NULL means don't break the sequence; just synchronize */
2121 curr->get_node()->add_relseq_break(NULL);
2125 * Build up an initial set of all past writes that this 'read' action may read
2126 * from. This set is determined by the clock vector's "happens before"
2128 * @param curr is the current ModelAction that we are exploring; it must be a
2131 void ModelChecker::build_reads_from_past(ModelAction *curr)
2133 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2135 ASSERT(curr->is_read());
2137 ModelAction *last_seq_cst = NULL;
2139 /* Track whether this object has been initialized */
2140 bool initialized = false;
2142 if (curr->is_seqcst()) {
2143 last_seq_cst = get_last_seq_cst(curr);
2144 /* We have to at least see the last sequentially consistent write,
2145 so we are initialized. */
2146 if (last_seq_cst != NULL)
2150 /* Iterate over all threads */
2151 for (i = 0; i < thrd_lists->size(); i++) {
2152 /* Iterate over actions in thread, starting from most recent */
2153 action_list_t *list = &(*thrd_lists)[i];
2154 action_list_t::reverse_iterator rit;
2155 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2156 ModelAction *act = *rit;
2158 /* Only consider 'write' actions */
2159 if (!act->is_write() || act == curr)
2162 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2163 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
2164 if (!curr->get_sleep_flag() || curr->is_seqcst() || sleep_can_read_from(curr, act)) {
2165 DEBUG("Adding action to may_read_from:\n");
2166 if (DBG_ENABLED()) {
2170 curr->get_node()->add_read_from(act);
2174 /* Include at most one act per-thread that "happens before" curr */
2175 if (act->happens_before(curr)) {
2183 assert_bug("May read from uninitialized atomic");
2185 if (DBG_ENABLED() || !initialized) {
2186 printf("Reached read action:\n");
2188 printf("Printing may_read_from\n");
2189 curr->get_node()->print_may_read_from();
2190 printf("End printing may_read_from\n");
2194 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2196 Node *prevnode=write->get_node()->get_parent();
2198 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2199 if (write->is_release()&&thread_sleep)
2201 if (!write->is_rmw()) {
2204 if (write->get_reads_from()==NULL)
2206 write=write->get_reads_from();
2210 static void print_list(action_list_t *list)
2212 action_list_t::iterator it;
2214 printf("---------------------------------------------------------------------\n");
2216 unsigned int hash=0;
2218 for (it = list->begin(); it != list->end(); it++) {
2220 hash=hash^(hash<<3)^((*it)->hash());
2222 printf("HASH %u\n", hash);
2223 printf("---------------------------------------------------------------------\n");
2226 #if SUPPORT_MOD_ORDER_DUMP
2227 void ModelChecker::dumpGraph(char *filename) {
2229 sprintf(buffer, "%s.dot",filename);
2230 FILE *file=fopen(buffer, "w");
2231 fprintf(file, "digraph %s {\n",filename);
2232 mo_graph->dumpNodes(file);
2233 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2235 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2236 ModelAction *action=*it;
2237 if (action->is_read()) {
2238 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2239 if (action->get_reads_from()!=NULL)
2240 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2242 if (thread_array[action->get_tid()] != NULL) {
2243 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2246 thread_array[action->get_tid()]=action;
2248 fprintf(file,"}\n");
2249 model_free(thread_array);
2254 void ModelChecker::print_summary()
2257 printf("Number of executions: %d\n", num_executions);
2258 printf("Number of feasible executions: %d\n", num_feasible_executions);
2259 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
2261 #if SUPPORT_MOD_ORDER_DUMP
2263 char buffername[100];
2264 sprintf(buffername, "exec%04u", num_executions);
2265 mo_graph->dumpGraphToFile(buffername);
2266 sprintf(buffername, "graph%04u", num_executions);
2267 dumpGraph(buffername);
2270 if (!isfinalfeasible())
2271 printf("INFEASIBLE EXECUTION!\n");
2272 print_list(action_trace);
2277 * Add a Thread to the system for the first time. Should only be called once
2279 * @param t The Thread to add
2281 void ModelChecker::add_thread(Thread *t)
2283 thread_map->put(id_to_int(t->get_id()), t);
2284 scheduler->add_thread(t);
2288 * Removes a thread from the scheduler.
2289 * @param the thread to remove.
2291 void ModelChecker::remove_thread(Thread *t)
2293 scheduler->remove_thread(t);
2297 * @brief Get a Thread reference by its ID
2298 * @param tid The Thread's ID
2299 * @return A Thread reference
2301 Thread * ModelChecker::get_thread(thread_id_t tid) const
2303 return thread_map->get(id_to_int(tid));
2307 * @brief Get a reference to the Thread in which a ModelAction was executed
2308 * @param act The ModelAction
2309 * @return A Thread reference
2311 Thread * ModelChecker::get_thread(ModelAction *act) const
2313 return get_thread(act->get_tid());
2317 * @brief Check if a Thread is currently enabled
2318 * @param t The Thread to check
2319 * @return True if the Thread is currently enabled
2321 bool ModelChecker::is_enabled(Thread *t) const
2323 return scheduler->is_enabled(t);
2327 * @brief Check if a Thread is currently enabled
2328 * @param tid The ID of the Thread to check
2329 * @return True if the Thread is currently enabled
2331 bool ModelChecker::is_enabled(thread_id_t tid) const
2333 return scheduler->is_enabled(tid);
2337 * Switch from a user-context to the "master thread" context (a.k.a. system
2338 * context). This switch is made with the intention of exploring a particular
2339 * model-checking action (described by a ModelAction object). Must be called
2340 * from a user-thread context.
2342 * @param act The current action that will be explored. May be NULL only if
2343 * trace is exiting via an assertion (see ModelChecker::set_assert and
2344 * ModelChecker::has_asserted).
2345 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2347 int ModelChecker::switch_to_master(ModelAction *act)
2350 Thread *old = thread_current();
2351 set_current_action(act);
2352 old->set_state(THREAD_READY);
2353 return Thread::swap(old, &system_context);
2357 * Takes the next step in the execution, if possible.
2358 * @return Returns true (success) if a step was taken and false otherwise.
2360 bool ModelChecker::take_step() {
2364 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2366 if (curr->get_state() == THREAD_READY) {
2367 ASSERT(priv->current_action);
2369 priv->nextThread = check_current_action(priv->current_action);
2370 priv->current_action = NULL;
2372 if (curr->is_blocked() || curr->is_complete())
2373 scheduler->remove_thread(curr);
2378 Thread *next = scheduler->next_thread(priv->nextThread);
2380 /* Infeasible -> don't take any more steps */
2383 else if (isfeasibleprefix() && have_bug_reports()) {
2388 if (params.bound != 0) {
2389 if (priv->used_sequence_numbers > params.bound) {
2394 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2395 next ? id_to_int(next->get_id()) : -1);
2398 * Launch end-of-execution release sequence fixups only when there are:
2400 * (1) no more user threads to run (or when execution replay chooses
2401 * the 'model_thread')
2402 * (2) pending release sequences
2403 * (3) pending assertions (i.e., data races)
2404 * (4) no pending promises
2406 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2407 isfinalfeasible() && !unrealizedraces.empty()) {
2408 printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2409 pending_rel_seqs->size());
2410 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2411 std::memory_order_seq_cst, NULL, VALUE_NONE,
2413 set_current_action(fixup);
2417 /* next == NULL -> don't take any more steps */
2421 next->set_state(THREAD_RUNNING);
2423 if (next->get_pending() != NULL) {
2424 /* restart a pending action */
2425 set_current_action(next->get_pending());
2426 next->set_pending(NULL);
2427 next->set_state(THREAD_READY);
2431 /* Return false only if swap fails with an error */
2432 return (Thread::swap(&system_context, next) == 0);
2435 /** Runs the current execution until threre are no more steps to take. */
2436 void ModelChecker::finish_execution() {
2439 while (take_step());