9 #include "snapshot-interface.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
15 #include "threads-model.h"
18 #define INITIAL_THREAD_ID 0
23 bug_message(const char *str) {
24 const char *fmt = " [BUG] %s\n";
25 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
26 sprintf(msg, fmt, str);
28 ~bug_message() { if (msg) snapshot_free(msg); }
31 void print() { model_print("%s", msg); }
37 * Structure for holding small ModelChecker members that should be snapshotted
39 struct model_snapshot_members {
40 ModelAction *current_action;
41 unsigned int next_thread_id;
42 modelclock_t used_sequence_numbers;
44 ModelAction *next_backtrack;
45 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
46 struct execution_stats stats;
49 /** @brief Constructor */
50 ModelChecker::ModelChecker(struct model_params params) :
51 /* Initialize default scheduler */
53 scheduler(new Scheduler()),
55 earliest_diverge(NULL),
56 action_trace(new action_list_t()),
57 thread_map(new HashTable<int, Thread *, int>()),
58 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
59 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
60 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
61 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
62 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
63 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
64 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
65 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
66 node_stack(new NodeStack()),
67 mo_graph(new CycleGraph()),
68 failed_promise(false),
69 too_many_reads(false),
71 bad_synchronization(false)
73 /* Allocate this "size" on the snapshotting heap */
74 priv = (struct model_snapshot_members *)snapshot_calloc(1, sizeof(*priv));
75 /* First thread created will have id INITIAL_THREAD_ID */
76 priv->next_thread_id = INITIAL_THREAD_ID;
78 /* Initialize a model-checker thread, for special ModelActions */
79 model_thread = new Thread(get_next_id());
80 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
83 /** @brief Destructor */
84 ModelChecker::~ModelChecker()
86 for (unsigned int i = 0; i < get_num_threads(); i++)
87 delete thread_map->get(i);
92 delete lock_waiters_map;
93 delete condvar_waiters_map;
96 for (unsigned int i = 0; i < promises->size(); i++)
97 delete (*promises)[i];
100 delete pending_rel_seqs;
102 delete thrd_last_action;
107 for (unsigned int i = 0; i < priv->bugs.size(); i++)
108 delete priv->bugs[i];
113 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
114 action_list_t * tmp=hash->get(ptr);
116 tmp=new action_list_t();
122 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
123 std::vector<action_list_t> * tmp=hash->get(ptr);
125 tmp=new std::vector<action_list_t>();
132 * Restores user program to initial state and resets all model-checker data
135 void ModelChecker::reset_to_initial_state()
137 DEBUG("+++ Resetting to initial state +++\n");
138 node_stack->reset_execution();
139 failed_promise = false;
140 too_many_reads = false;
141 bad_synchronization = false;
144 /* Print all model-checker output before rollback */
147 snapshotObject->backTrackBeforeStep(0);
150 /** @return a thread ID for a new Thread */
151 thread_id_t ModelChecker::get_next_id()
153 return priv->next_thread_id++;
156 /** @return the number of user threads created during this execution */
157 unsigned int ModelChecker::get_num_threads() const
159 return priv->next_thread_id;
162 /** @return The currently executing Thread. */
163 Thread * ModelChecker::get_current_thread()
165 return scheduler->get_current_thread();
168 /** @return a sequence number for a new ModelAction */
169 modelclock_t ModelChecker::get_next_seq_num()
171 return ++priv->used_sequence_numbers;
174 Node * ModelChecker::get_curr_node() {
175 return node_stack->get_head();
179 * @brief Choose the next thread to execute.
181 * This function chooses the next thread that should execute. It can force the
182 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
183 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
184 * The model-checker may have no preference regarding the next thread (i.e.,
185 * when exploring a new execution ordering), in which case this will return
187 * @param curr The current ModelAction. This action might guide the choice of
189 * @return The next thread to run. If the model-checker has no preference, NULL.
191 Thread * ModelChecker::get_next_thread(ModelAction *curr)
196 /* Do not split atomic actions. */
198 return thread_current();
199 /* The THREAD_CREATE action points to the created Thread */
200 else if (curr->get_type() == THREAD_CREATE)
201 return (Thread *)curr->get_location();
204 /* Have we completed exploring the preselected path? */
208 /* Else, we are trying to replay an execution */
209 ModelAction *next = node_stack->get_next()->get_action();
211 if (next == diverge) {
212 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
213 earliest_diverge=diverge;
215 Node *nextnode = next->get_node();
216 Node *prevnode = nextnode->get_parent();
217 scheduler->update_sleep_set(prevnode);
219 /* Reached divergence point */
220 if (nextnode->increment_misc()) {
221 /* The next node will try to satisfy a different misc_index values. */
222 tid = next->get_tid();
223 node_stack->pop_restofstack(2);
224 } else if (nextnode->increment_promise()) {
225 /* The next node will try to satisfy a different set of promises. */
226 tid = next->get_tid();
227 node_stack->pop_restofstack(2);
228 } else if (nextnode->increment_read_from()) {
229 /* The next node will read from a different value. */
230 tid = next->get_tid();
231 node_stack->pop_restofstack(2);
232 } else if (nextnode->increment_future_value()) {
233 /* The next node will try to read from a different future value. */
234 tid = next->get_tid();
235 node_stack->pop_restofstack(2);
236 } else if (nextnode->increment_relseq_break()) {
237 /* The next node will try to resolve a release sequence differently */
238 tid = next->get_tid();
239 node_stack->pop_restofstack(2);
241 /* Make a different thread execute for next step */
242 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
243 tid = prevnode->get_next_backtrack();
244 /* Make sure the backtracked thread isn't sleeping. */
245 node_stack->pop_restofstack(1);
246 if (diverge==earliest_diverge) {
247 earliest_diverge=prevnode->get_action();
250 /* The correct sleep set is in the parent node. */
253 DEBUG("*** Divergence point ***\n");
257 tid = next->get_tid();
259 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
260 ASSERT(tid != THREAD_ID_T_NONE);
261 return thread_map->get(id_to_int(tid));
265 * We need to know what the next actions of all threads in the sleep
266 * set will be. This method computes them and stores the actions at
267 * the corresponding thread object's pending action.
270 void ModelChecker::execute_sleep_set() {
271 for(unsigned int i=0;i<get_num_threads();i++) {
272 thread_id_t tid=int_to_id(i);
273 Thread *thr=get_thread(tid);
274 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
275 thr->get_pending() == NULL ) {
276 thr->set_state(THREAD_RUNNING);
277 scheduler->next_thread(thr);
278 Thread::swap(&system_context, thr);
279 priv->current_action->set_sleep_flag();
280 thr->set_pending(priv->current_action);
283 priv->current_action = NULL;
286 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
287 for(unsigned int i=0;i<get_num_threads();i++) {
288 thread_id_t tid=int_to_id(i);
289 Thread *thr=get_thread(tid);
290 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
291 ModelAction *pending_act=thr->get_pending();
292 if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
293 //Remove this thread from sleep set
294 scheduler->remove_sleep(thr);
301 * Check if we are in a deadlock. Should only be called at the end of an
302 * execution, although it should not give false positives in the middle of an
303 * execution (there should be some ENABLED thread).
305 * @return True if program is in a deadlock; false otherwise
307 bool ModelChecker::is_deadlocked() const
309 bool blocking_threads = false;
310 for (unsigned int i = 0; i < get_num_threads(); i++) {
311 thread_id_t tid = int_to_id(i);
314 Thread *t = get_thread(tid);
315 if (!t->is_model_thread() && t->get_pending())
316 blocking_threads = true;
318 return blocking_threads;
322 * Check if this is a complete execution. That is, have all thread completed
323 * execution (rather than exiting because sleep sets have forced a redundant
326 * @return True if the execution is complete.
328 bool ModelChecker::is_complete_execution() const
330 for (unsigned int i = 0; i < get_num_threads(); i++)
331 if (is_enabled(int_to_id(i)))
337 * @brief Assert a bug in the executing program.
339 * Use this function to assert any sort of bug in the user program. If the
340 * current trace is feasible (actually, a prefix of some feasible execution),
341 * then this execution will be aborted, printing the appropriate message. If
342 * the current trace is not yet feasible, the error message will be stashed and
343 * printed if the execution ever becomes feasible.
345 * @param msg Descriptive message for the bug (do not include newline char)
346 * @return True if bug is immediately-feasible
348 bool ModelChecker::assert_bug(const char *msg)
350 priv->bugs.push_back(new bug_message(msg));
352 if (isfeasibleprefix()) {
360 * @brief Assert a bug in the executing program, asserted by a user thread
361 * @see ModelChecker::assert_bug
362 * @param msg Descriptive message for the bug (do not include newline char)
364 void ModelChecker::assert_user_bug(const char *msg)
366 /* If feasible bug, bail out now */
368 switch_to_master(NULL);
371 /** @return True, if any bugs have been reported for this execution */
372 bool ModelChecker::have_bug_reports() const
374 return priv->bugs.size() != 0;
377 /** @brief Print bug report listing for this execution (if any bugs exist) */
378 void ModelChecker::print_bugs() const
380 if (have_bug_reports()) {
381 model_print("Bug report: %zu bug%s detected\n",
383 priv->bugs.size() > 1 ? "s" : "");
384 for (unsigned int i = 0; i < priv->bugs.size(); i++)
385 priv->bugs[i]->print();
390 * @brief Record end-of-execution stats
392 * Must be run when exiting an execution. Records various stats.
393 * @see struct execution_stats
395 void ModelChecker::record_stats()
398 if (!isfinalfeasible())
399 stats.num_infeasible++;
400 else if (have_bug_reports())
401 stats.num_buggy_executions++;
402 else if (is_complete_execution())
403 stats.num_complete++;
405 stats.num_redundant++;
408 /** @brief Print execution stats */
409 void ModelChecker::print_stats() const
411 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
412 model_print("Number of redundant executions: %d\n", stats.num_redundant);
413 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
414 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
415 model_print("Total executions: %d\n", stats.num_total);
416 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
420 * @brief End-of-exeuction print
421 * @param printbugs Should any existing bugs be printed?
423 void ModelChecker::print_execution(bool printbugs) const
425 print_program_output();
427 if (DBG_ENABLED() || params.verbose) {
428 model_print("Earliest divergence point since last feasible execution:\n");
429 if (earliest_diverge)
430 earliest_diverge->print();
432 model_print("(Not set)\n");
438 /* Don't print invalid bugs */
447 * Queries the model-checker for more executions to explore and, if one
448 * exists, resets the model-checker state to execute a new execution.
450 * @return If there are more executions to explore, return true. Otherwise,
453 bool ModelChecker::next_execution()
456 /* Is this execution a feasible execution that's worth bug-checking? */
457 bool complete = isfinalfeasible() && (is_complete_execution() ||
460 /* End-of-execution bug checks */
463 assert_bug("Deadlock detected");
471 if (DBG_ENABLED() || params.verbose || have_bug_reports())
472 print_execution(complete);
474 clear_program_output();
477 earliest_diverge = NULL;
479 if ((diverge = get_next_backtrack()) == NULL)
483 model_print("Next execution will diverge at:\n");
487 reset_to_initial_state();
491 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
493 switch (act->get_type()) {
497 /* Optimization: relaxed operations don't need backtracking */
498 if (act->is_relaxed())
500 /* linear search: from most recent to oldest */
501 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
502 action_list_t::reverse_iterator rit;
503 for (rit = list->rbegin(); rit != list->rend(); rit++) {
504 ModelAction *prev = *rit;
505 if (prev->could_synchronize_with(act))
511 case ATOMIC_TRYLOCK: {
512 /* linear search: from most recent to oldest */
513 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
514 action_list_t::reverse_iterator rit;
515 for (rit = list->rbegin(); rit != list->rend(); rit++) {
516 ModelAction *prev = *rit;
517 if (act->is_conflicting_lock(prev))
522 case ATOMIC_UNLOCK: {
523 /* linear search: from most recent to oldest */
524 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
525 action_list_t::reverse_iterator rit;
526 for (rit = list->rbegin(); rit != list->rend(); rit++) {
527 ModelAction *prev = *rit;
528 if (!act->same_thread(prev)&&prev->is_failed_trylock())
534 /* linear search: from most recent to oldest */
535 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
536 action_list_t::reverse_iterator rit;
537 for (rit = list->rbegin(); rit != list->rend(); rit++) {
538 ModelAction *prev = *rit;
539 if (!act->same_thread(prev)&&prev->is_failed_trylock())
541 if (!act->same_thread(prev)&&prev->is_notify())
547 case ATOMIC_NOTIFY_ALL:
548 case ATOMIC_NOTIFY_ONE: {
549 /* linear search: from most recent to oldest */
550 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
551 action_list_t::reverse_iterator rit;
552 for (rit = list->rbegin(); rit != list->rend(); rit++) {
553 ModelAction *prev = *rit;
554 if (!act->same_thread(prev)&&prev->is_wait())
565 /** This method finds backtracking points where we should try to
566 * reorder the parameter ModelAction against.
568 * @param the ModelAction to find backtracking points for.
570 void ModelChecker::set_backtracking(ModelAction *act)
572 Thread *t = get_thread(act);
573 ModelAction * prev = get_last_conflict(act);
577 Node * node = prev->get_node()->get_parent();
579 int low_tid, high_tid;
580 if (node->is_enabled(t)) {
581 low_tid = id_to_int(act->get_tid());
582 high_tid = low_tid+1;
585 high_tid = get_num_threads();
588 for(int i = low_tid; i < high_tid; i++) {
589 thread_id_t tid = int_to_id(i);
591 /* Make sure this thread can be enabled here. */
592 if (i >= node->get_num_threads())
595 /* Don't backtrack into a point where the thread is disabled or sleeping. */
596 if (node->enabled_status(tid)!=THREAD_ENABLED)
599 /* Check if this has been explored already */
600 if (node->has_been_explored(tid))
603 /* See if fairness allows */
604 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
606 for(int t=0;t<node->get_num_threads();t++) {
607 thread_id_t tother=int_to_id(t);
608 if (node->is_enabled(tother) && node->has_priority(tother)) {
616 /* Cache the latest backtracking point */
617 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
618 priv->next_backtrack = prev;
620 /* If this is a new backtracking point, mark the tree */
621 if (!node->set_backtrack(tid))
623 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
624 id_to_int(prev->get_tid()),
625 id_to_int(t->get_id()));
634 * Returns last backtracking point. The model checker will explore a different
635 * path for this point in the next execution.
636 * @return The ModelAction at which the next execution should diverge.
638 ModelAction * ModelChecker::get_next_backtrack()
640 ModelAction *next = priv->next_backtrack;
641 priv->next_backtrack = NULL;
646 * Processes a read or rmw model action.
647 * @param curr is the read model action to process.
648 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
649 * @return True if processing this read updates the mo_graph.
651 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
653 uint64_t value = VALUE_NONE;
654 bool updated = false;
656 const ModelAction *reads_from = curr->get_node()->get_read_from();
657 if (reads_from != NULL) {
658 mo_graph->startChanges();
660 value = reads_from->get_value();
661 bool r_status = false;
663 if (!second_part_of_rmw) {
664 check_recency(curr, reads_from);
665 r_status = r_modification_order(curr, reads_from);
669 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
670 mo_graph->rollbackChanges();
671 too_many_reads = false;
675 curr->read_from(reads_from);
676 mo_graph->commitChanges();
677 mo_check_promises(curr->get_tid(), reads_from);
680 } else if (!second_part_of_rmw) {
681 /* Read from future value */
682 value = curr->get_node()->get_future_value();
683 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
684 curr->read_from(NULL);
685 Promise *valuepromise = new Promise(curr, value, expiration);
686 promises->push_back(valuepromise);
688 get_thread(curr)->set_return_value(value);
694 * Processes a lock, trylock, or unlock model action. @param curr is
695 * the read model action to process.
697 * The try lock operation checks whether the lock is taken. If not,
698 * it falls to the normal lock operation case. If so, it returns
701 * The lock operation has already been checked that it is enabled, so
702 * it just grabs the lock and synchronizes with the previous unlock.
704 * The unlock operation has to re-enable all of the threads that are
705 * waiting on the lock.
707 * @return True if synchronization was updated; false otherwise
709 bool ModelChecker::process_mutex(ModelAction *curr) {
710 std::mutex *mutex=NULL;
711 struct std::mutex_state *state=NULL;
713 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
714 mutex = (std::mutex *)curr->get_location();
715 state = mutex->get_state();
716 } else if(curr->is_wait()) {
717 mutex = (std::mutex *)curr->get_value();
718 state = mutex->get_state();
721 switch (curr->get_type()) {
722 case ATOMIC_TRYLOCK: {
723 bool success = !state->islocked;
724 curr->set_try_lock(success);
726 get_thread(curr)->set_return_value(0);
729 get_thread(curr)->set_return_value(1);
731 //otherwise fall into the lock case
733 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
734 assert_bug("Lock access before initialization");
735 state->islocked = true;
736 ModelAction *unlock = get_last_unlock(curr);
737 //synchronize with the previous unlock statement
738 if (unlock != NULL) {
739 curr->synchronize_with(unlock);
744 case ATOMIC_UNLOCK: {
746 state->islocked = false;
747 //wake up the other threads
748 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
749 //activate all the waiting threads
750 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
751 scheduler->wake(get_thread(*rit));
758 state->islocked = false;
759 //wake up the other threads
760 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
761 //activate all the waiting threads
762 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
763 scheduler->wake(get_thread(*rit));
766 //check whether we should go to sleep or not...simulate spurious failures
767 if (curr->get_node()->get_misc()==0) {
768 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
770 scheduler->sleep(get_current_thread());
774 case ATOMIC_NOTIFY_ALL: {
775 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
776 //activate all the waiting threads
777 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
778 scheduler->wake(get_thread(*rit));
783 case ATOMIC_NOTIFY_ONE: {
784 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
785 int wakeupthread=curr->get_node()->get_misc();
786 action_list_t::iterator it = waiters->begin();
787 advance(it, wakeupthread);
788 scheduler->wake(get_thread(*it));
800 * Process a write ModelAction
801 * @param curr The ModelAction to process
802 * @return True if the mo_graph was updated or promises were resolved
804 bool ModelChecker::process_write(ModelAction *curr)
806 bool updated_mod_order = w_modification_order(curr);
807 bool updated_promises = resolve_promises(curr);
809 if (promises->size() == 0) {
810 for (unsigned int i = 0; i < futurevalues->size(); i++) {
811 struct PendingFutureValue pfv = (*futurevalues)[i];
812 //Do more ambitious checks now that mo is more complete
813 if (mo_may_allow(pfv.writer, pfv.act)&&
814 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
815 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
816 priv->next_backtrack = pfv.act;
818 futurevalues->resize(0);
821 mo_graph->commitChanges();
822 mo_check_promises(curr->get_tid(), curr);
824 get_thread(curr)->set_return_value(VALUE_NONE);
825 return updated_mod_order || updated_promises;
829 * @brief Process the current action for thread-related activity
831 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
832 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
833 * synchronization, etc. This function is a no-op for non-THREAD actions
834 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
836 * @param curr The current action
837 * @return True if synchronization was updated or a thread completed
839 bool ModelChecker::process_thread_action(ModelAction *curr)
841 bool updated = false;
843 switch (curr->get_type()) {
844 case THREAD_CREATE: {
845 Thread *th = (Thread *)curr->get_location();
846 th->set_creation(curr);
850 Thread *blocking = (Thread *)curr->get_location();
851 ModelAction *act = get_last_action(blocking->get_id());
852 curr->synchronize_with(act);
853 updated = true; /* trigger rel-seq checks */
856 case THREAD_FINISH: {
857 Thread *th = get_thread(curr);
858 while (!th->wait_list_empty()) {
859 ModelAction *act = th->pop_wait_list();
860 scheduler->wake(get_thread(act));
863 updated = true; /* trigger rel-seq checks */
867 check_promises(curr->get_tid(), NULL, curr->get_cv());
878 * @brief Process the current action for release sequence fixup activity
880 * Performs model-checker release sequence fixups for the current action,
881 * forcing a single pending release sequence to break (with a given, potential
882 * "loose" write) or to complete (i.e., synchronize). If a pending release
883 * sequence forms a complete release sequence, then we must perform the fixup
884 * synchronization, mo_graph additions, etc.
886 * @param curr The current action; must be a release sequence fixup action
887 * @param work_queue The work queue to which to add work items as they are
890 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
892 const ModelAction *write = curr->get_node()->get_relseq_break();
893 struct release_seq *sequence = pending_rel_seqs->back();
894 pending_rel_seqs->pop_back();
896 ModelAction *acquire = sequence->acquire;
897 const ModelAction *rf = sequence->rf;
898 const ModelAction *release = sequence->release;
902 ASSERT(release->same_thread(rf));
906 * @todo Forcing a synchronization requires that we set
907 * modification order constraints. For instance, we can't allow
908 * a fixup sequence in which two separate read-acquire
909 * operations read from the same sequence, where the first one
910 * synchronizes and the other doesn't. Essentially, we can't
911 * allow any writes to insert themselves between 'release' and
915 /* Must synchronize */
916 if (!acquire->synchronize_with(release)) {
917 set_bad_synchronization();
920 /* Re-check all pending release sequences */
921 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
922 /* Re-check act for mo_graph edges */
923 work_queue->push_back(MOEdgeWorkEntry(acquire));
925 /* propagate synchronization to later actions */
926 action_list_t::reverse_iterator rit = action_trace->rbegin();
927 for (; (*rit) != acquire; rit++) {
928 ModelAction *propagate = *rit;
929 if (acquire->happens_before(propagate)) {
930 propagate->synchronize_with(acquire);
931 /* Re-check 'propagate' for mo_graph edges */
932 work_queue->push_back(MOEdgeWorkEntry(propagate));
936 /* Break release sequence with new edges:
937 * release --mo--> write --mo--> rf */
938 mo_graph->addEdge(release, write);
939 mo_graph->addEdge(write, rf);
942 /* See if we have realized a data race */
947 * Initialize the current action by performing one or more of the following
948 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
949 * in the NodeStack, manipulating backtracking sets, allocating and
950 * initializing clock vectors, and computing the promises to fulfill.
952 * @param curr The current action, as passed from the user context; may be
953 * freed/invalidated after the execution of this function, with a different
954 * action "returned" its place (pass-by-reference)
955 * @return True if curr is a newly-explored action; false otherwise
957 bool ModelChecker::initialize_curr_action(ModelAction **curr)
959 ModelAction *newcurr;
961 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
962 newcurr = process_rmw(*curr);
965 if (newcurr->is_rmw())
966 compute_promises(newcurr);
972 (*curr)->set_seq_number(get_next_seq_num());
974 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled());
976 /* First restore type and order in case of RMW operation */
977 if ((*curr)->is_rmwr())
978 newcurr->copy_typeandorder(*curr);
980 ASSERT((*curr)->get_location() == newcurr->get_location());
981 newcurr->copy_from_new(*curr);
983 /* Discard duplicate ModelAction; use action from NodeStack */
986 /* Always compute new clock vector */
987 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
990 return false; /* Action was explored previously */
994 /* Always compute new clock vector */
995 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
997 * Perform one-time actions when pushing new ModelAction onto
1000 if (newcurr->is_write())
1001 compute_promises(newcurr);
1002 else if (newcurr->is_relseq_fixup())
1003 compute_relseq_breakwrites(newcurr);
1004 else if (newcurr->is_wait())
1005 newcurr->get_node()->set_misc_max(2);
1006 else if (newcurr->is_notify_one()) {
1007 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1009 return true; /* This was a new ModelAction */
1014 * @brief Check whether a model action is enabled.
1016 * Checks whether a lock or join operation would be successful (i.e., is the
1017 * lock already locked, or is the joined thread already complete). If not, put
1018 * the action in a waiter list.
1020 * @param curr is the ModelAction to check whether it is enabled.
1021 * @return a bool that indicates whether the action is enabled.
1023 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1024 if (curr->is_lock()) {
1025 std::mutex * lock = (std::mutex *)curr->get_location();
1026 struct std::mutex_state * state = lock->get_state();
1027 if (state->islocked) {
1028 //Stick the action in the appropriate waiting queue
1029 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1032 } else if (curr->get_type() == THREAD_JOIN) {
1033 Thread *blocking = (Thread *)curr->get_location();
1034 if (!blocking->is_complete()) {
1035 blocking->push_wait_list(curr);
1044 * Stores the ModelAction for the current thread action. Call this
1045 * immediately before switching from user- to system-context to pass
1046 * data between them.
1047 * @param act The ModelAction created by the user-thread action
1049 void ModelChecker::set_current_action(ModelAction *act) {
1050 priv->current_action = act;
1054 * This is the heart of the model checker routine. It performs model-checking
1055 * actions corresponding to a given "current action." Among other processes, it
1056 * calculates reads-from relationships, updates synchronization clock vectors,
1057 * forms a memory_order constraints graph, and handles replay/backtrack
1058 * execution when running permutations of previously-observed executions.
1060 * @param curr The current action to process
1061 * @return The next Thread that must be executed. May be NULL if ModelChecker
1062 * makes no choice (e.g., according to replay execution, combining RMW actions,
1065 Thread * ModelChecker::check_current_action(ModelAction *curr)
1068 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1070 if (!check_action_enabled(curr)) {
1071 /* Make the execution look like we chose to run this action
1072 * much later, when a lock/join can succeed */
1073 get_current_thread()->set_pending(curr);
1074 scheduler->sleep(get_current_thread());
1075 return get_next_thread(NULL);
1078 bool newly_explored = initialize_curr_action(&curr);
1080 wake_up_sleeping_actions(curr);
1082 /* Add the action to lists before any other model-checking tasks */
1083 if (!second_part_of_rmw)
1084 add_action_to_lists(curr);
1086 /* Build may_read_from set for newly-created actions */
1087 if (newly_explored && curr->is_read())
1088 build_reads_from_past(curr);
1090 /* Initialize work_queue with the "current action" work */
1091 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1092 while (!work_queue.empty() && !has_asserted()) {
1093 WorkQueueEntry work = work_queue.front();
1094 work_queue.pop_front();
1096 switch (work.type) {
1097 case WORK_CHECK_CURR_ACTION: {
1098 ModelAction *act = work.action;
1099 bool update = false; /* update this location's release seq's */
1100 bool update_all = false; /* update all release seq's */
1102 if (process_thread_action(curr))
1105 if (act->is_read() && process_read(act, second_part_of_rmw))
1108 if (act->is_write() && process_write(act))
1111 if (act->is_mutex_op() && process_mutex(act))
1114 if (act->is_relseq_fixup())
1115 process_relseq_fixup(curr, &work_queue);
1118 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1120 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1123 case WORK_CHECK_RELEASE_SEQ:
1124 resolve_release_sequences(work.location, &work_queue);
1126 case WORK_CHECK_MO_EDGES: {
1127 /** @todo Complete verification of work_queue */
1128 ModelAction *act = work.action;
1129 bool updated = false;
1131 if (act->is_read()) {
1132 const ModelAction *rf = act->get_reads_from();
1133 if (rf != NULL && r_modification_order(act, rf))
1136 if (act->is_write()) {
1137 if (w_modification_order(act))
1140 mo_graph->commitChanges();
1143 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1152 check_curr_backtracking(curr);
1153 set_backtracking(curr);
1154 return get_next_thread(curr);
1157 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
1158 Node *currnode = curr->get_node();
1159 Node *parnode = currnode->get_parent();
1161 if ((!parnode->backtrack_empty() ||
1162 !currnode->misc_empty() ||
1163 !currnode->read_from_empty() ||
1164 !currnode->future_value_empty() ||
1165 !currnode->promise_empty() ||
1166 !currnode->relseq_break_empty())
1167 && (!priv->next_backtrack ||
1168 *curr > *priv->next_backtrack)) {
1169 priv->next_backtrack = curr;
1173 bool ModelChecker::promises_expired() const
1175 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1176 Promise *promise = (*promises)[promise_index];
1177 if (promise->get_expiration()<priv->used_sequence_numbers) {
1184 /** @return whether the current partial trace must be a prefix of a
1185 * feasible trace. */
1186 bool ModelChecker::isfeasibleprefix() const
1188 return promises->size() == 0 && pending_rel_seqs->size() == 0 && isfeasible();
1191 /** @return whether the current partial trace is feasible. */
1192 bool ModelChecker::isfeasible() const
1194 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1195 DEBUG("Infeasible: RMW violation\n");
1197 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
1200 /** @return whether the current partial trace is feasible other than
1201 * multiple RMW reading from the same store. */
1202 bool ModelChecker::isfeasibleotherthanRMW() const
1204 if (DBG_ENABLED()) {
1205 if (mo_graph->checkForCycles())
1206 DEBUG("Infeasible: modification order cycles\n");
1208 DEBUG("Infeasible: failed promise\n");
1210 DEBUG("Infeasible: too many reads\n");
1211 if (bad_synchronization)
1212 DEBUG("Infeasible: bad synchronization ordering\n");
1213 if (promises_expired())
1214 DEBUG("Infeasible: promises expired\n");
1216 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
1219 /** Returns whether the current completed trace is feasible. */
1220 bool ModelChecker::isfinalfeasible() const
1222 if (DBG_ENABLED() && promises->size() != 0)
1223 DEBUG("Infeasible: unrevolved promises\n");
1225 return isfeasible() && promises->size() == 0;
1228 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1229 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1230 ModelAction *lastread = get_last_action(act->get_tid());
1231 lastread->process_rmw(act);
1232 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1233 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1234 mo_graph->commitChanges();
1240 * Checks whether a thread has read from the same write for too many times
1241 * without seeing the effects of a later write.
1244 * 1) there must a different write that we could read from that would satisfy the modification order,
1245 * 2) we must have read from the same value in excess of maxreads times, and
1246 * 3) that other write must have been in the reads_from set for maxreads times.
1248 * If so, we decide that the execution is no longer feasible.
1250 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1251 if (params.maxreads != 0) {
1253 if (curr->get_node()->get_read_from_size() <= 1)
1255 //Must make sure that execution is currently feasible... We could
1256 //accidentally clear by rolling back
1259 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1260 int tid = id_to_int(curr->get_tid());
1263 if ((int)thrd_lists->size() <= tid)
1265 action_list_t *list = &(*thrd_lists)[tid];
1267 action_list_t::reverse_iterator rit = list->rbegin();
1268 /* Skip past curr */
1269 for (; (*rit) != curr; rit++)
1271 /* go past curr now */
1274 action_list_t::reverse_iterator ritcopy = rit;
1275 //See if we have enough reads from the same value
1277 for (; count < params.maxreads; rit++,count++) {
1278 if (rit==list->rend())
1280 ModelAction *act = *rit;
1281 if (!act->is_read())
1284 if (act->get_reads_from() != rf)
1286 if (act->get_node()->get_read_from_size() <= 1)
1289 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1291 const ModelAction * write = curr->get_node()->get_read_from_at(i);
1293 //Need a different write
1297 /* Test to see whether this is a feasible write to read from*/
1298 mo_graph->startChanges();
1299 r_modification_order(curr, write);
1300 bool feasiblereadfrom = isfeasible();
1301 mo_graph->rollbackChanges();
1303 if (!feasiblereadfrom)
1307 bool feasiblewrite = true;
1308 //new we need to see if this write works for everyone
1310 for (int loop = count; loop>0; loop--,rit++) {
1311 ModelAction *act=*rit;
1312 bool foundvalue = false;
1313 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1314 if (act->get_node()->get_read_from_at(j)==write) {
1320 feasiblewrite = false;
1324 if (feasiblewrite) {
1325 too_many_reads = true;
1333 * Updates the mo_graph with the constraints imposed from the current
1336 * Basic idea is the following: Go through each other thread and find
1337 * the lastest action that happened before our read. Two cases:
1339 * (1) The action is a write => that write must either occur before
1340 * the write we read from or be the write we read from.
1342 * (2) The action is a read => the write that that action read from
1343 * must occur before the write we read from or be the same write.
1345 * @param curr The current action. Must be a read.
1346 * @param rf The action that curr reads from. Must be a write.
1347 * @return True if modification order edges were added; false otherwise
1349 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1351 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1354 ASSERT(curr->is_read());
1356 /* Iterate over all threads */
1357 for (i = 0; i < thrd_lists->size(); i++) {
1358 /* Iterate over actions in thread, starting from most recent */
1359 action_list_t *list = &(*thrd_lists)[i];
1360 action_list_t::reverse_iterator rit;
1361 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1362 ModelAction *act = *rit;
1365 * Include at most one act per-thread that "happens
1366 * before" curr. Don't consider reflexively.
1368 if (act->happens_before(curr) && act != curr) {
1369 if (act->is_write()) {
1371 mo_graph->addEdge(act, rf);
1375 const ModelAction *prevreadfrom = act->get_reads_from();
1376 //if the previous read is unresolved, keep going...
1377 if (prevreadfrom == NULL)
1380 if (rf != prevreadfrom) {
1381 mo_graph->addEdge(prevreadfrom, rf);
1393 /** This method fixes up the modification order when we resolve a
1394 * promises. The basic problem is that actions that occur after the
1395 * read curr could not property add items to the modification order
1398 * So for each thread, we find the earliest item that happens after
1399 * the read curr. This is the item we have to fix up with additional
1400 * constraints. If that action is write, we add a MO edge between
1401 * the Action rf and that action. If the action is a read, we add a
1402 * MO edge between the Action rf, and whatever the read accessed.
1404 * @param curr is the read ModelAction that we are fixing up MO edges for.
1405 * @param rf is the write ModelAction that curr reads from.
1408 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1410 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1412 ASSERT(curr->is_read());
1414 /* Iterate over all threads */
1415 for (i = 0; i < thrd_lists->size(); i++) {
1416 /* Iterate over actions in thread, starting from most recent */
1417 action_list_t *list = &(*thrd_lists)[i];
1418 action_list_t::reverse_iterator rit;
1419 ModelAction *lastact = NULL;
1421 /* Find last action that happens after curr that is either not curr or a rmw */
1422 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1423 ModelAction *act = *rit;
1424 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1430 /* Include at most one act per-thread that "happens before" curr */
1431 if (lastact != NULL) {
1432 if (lastact==curr) {
1433 //Case 1: The resolved read is a RMW, and we need to make sure
1434 //that the write portion of the RMW mod order after rf
1436 mo_graph->addEdge(rf, lastact);
1437 } else if (lastact->is_read()) {
1438 //Case 2: The resolved read is a normal read and the next
1439 //operation is a read, and we need to make sure the value read
1440 //is mod ordered after rf
1442 const ModelAction *postreadfrom = lastact->get_reads_from();
1443 if (postreadfrom != NULL&&rf != postreadfrom)
1444 mo_graph->addEdge(rf, postreadfrom);
1446 //Case 3: The resolved read is a normal read and the next
1447 //operation is a write, and we need to make sure that the
1448 //write is mod ordered after rf
1450 mo_graph->addEdge(rf, lastact);
1458 * Updates the mo_graph with the constraints imposed from the current write.
1460 * Basic idea is the following: Go through each other thread and find
1461 * the lastest action that happened before our write. Two cases:
1463 * (1) The action is a write => that write must occur before
1466 * (2) The action is a read => the write that that action read from
1467 * must occur before the current write.
1469 * This method also handles two other issues:
1471 * (I) Sequential Consistency: Making sure that if the current write is
1472 * seq_cst, that it occurs after the previous seq_cst write.
1474 * (II) Sending the write back to non-synchronizing reads.
1476 * @param curr The current action. Must be a write.
1477 * @return True if modification order edges were added; false otherwise
1479 bool ModelChecker::w_modification_order(ModelAction *curr)
1481 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1484 ASSERT(curr->is_write());
1486 if (curr->is_seqcst()) {
1487 /* We have to at least see the last sequentially consistent write,
1488 so we are initialized. */
1489 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1490 if (last_seq_cst != NULL) {
1491 mo_graph->addEdge(last_seq_cst, curr);
1496 /* Iterate over all threads */
1497 for (i = 0; i < thrd_lists->size(); i++) {
1498 /* Iterate over actions in thread, starting from most recent */
1499 action_list_t *list = &(*thrd_lists)[i];
1500 action_list_t::reverse_iterator rit;
1501 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1502 ModelAction *act = *rit;
1505 * 1) If RMW and it actually read from something, then we
1506 * already have all relevant edges, so just skip to next
1509 * 2) If RMW and it didn't read from anything, we should
1510 * whatever edge we can get to speed up convergence.
1512 * 3) If normal write, we need to look at earlier actions, so
1513 * continue processing list.
1515 if (curr->is_rmw()) {
1516 if (curr->get_reads_from()!=NULL)
1525 * Include at most one act per-thread that "happens
1528 if (act->happens_before(curr)) {
1530 * Note: if act is RMW, just add edge:
1532 * The following edge should be handled elsewhere:
1533 * readfrom(act) --mo--> act
1535 if (act->is_write())
1536 mo_graph->addEdge(act, curr);
1537 else if (act->is_read()) {
1538 //if previous read accessed a null, just keep going
1539 if (act->get_reads_from() == NULL)
1541 mo_graph->addEdge(act->get_reads_from(), curr);
1545 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1546 !act->same_thread(curr)) {
1547 /* We have an action that:
1548 (1) did not happen before us
1549 (2) is a read and we are a write
1550 (3) cannot synchronize with us
1551 (4) is in a different thread
1553 that read could potentially read from our write. Note that
1554 these checks are overly conservative at this point, we'll
1555 do more checks before actually removing the
1559 if (thin_air_constraint_may_allow(curr, act)) {
1561 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1562 struct PendingFutureValue pfv = {curr,act};
1563 futurevalues->push_back(pfv);
1573 /** Arbitrary reads from the future are not allowed. Section 29.3
1574 * part 9 places some constraints. This method checks one result of constraint
1575 * constraint. Others require compiler support. */
1576 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1577 if (!writer->is_rmw())
1580 if (!reader->is_rmw())
1583 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1584 if (search == reader)
1586 if (search->get_tid() == reader->get_tid() &&
1587 search->happens_before(reader))
1595 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1596 * some constraints. This method checks one the following constraint (others
1597 * require compiler support):
1599 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1601 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1603 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1605 /* Iterate over all threads */
1606 for (i = 0; i < thrd_lists->size(); i++) {
1607 const ModelAction *write_after_read = NULL;
1609 /* Iterate over actions in thread, starting from most recent */
1610 action_list_t *list = &(*thrd_lists)[i];
1611 action_list_t::reverse_iterator rit;
1612 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1613 ModelAction *act = *rit;
1615 if (!reader->happens_before(act))
1617 else if (act->is_write())
1618 write_after_read = act;
1619 else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1620 write_after_read = act->get_reads_from();
1624 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1631 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1632 * The ModelAction under consideration is expected to be taking part in
1633 * release/acquire synchronization as an object of the "reads from" relation.
1634 * Note that this can only provide release sequence support for RMW chains
1635 * which do not read from the future, as those actions cannot be traced until
1636 * their "promise" is fulfilled. Similarly, we may not even establish the
1637 * presence of a release sequence with certainty, as some modification order
1638 * constraints may be decided further in the future. Thus, this function
1639 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1640 * and a boolean representing certainty.
1642 * @param rf The action that might be part of a release sequence. Must be a
1644 * @param release_heads A pass-by-reference style return parameter. After
1645 * execution of this function, release_heads will contain the heads of all the
1646 * relevant release sequences, if any exists with certainty
1647 * @param pending A pass-by-reference style return parameter which is only used
1648 * when returning false (i.e., uncertain). Returns most information regarding
1649 * an uncertain release sequence, including any write operations that might
1650 * break the sequence.
1651 * @return true, if the ModelChecker is certain that release_heads is complete;
1654 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1655 rel_heads_list_t *release_heads,
1656 struct release_seq *pending) const
1658 /* Only check for release sequences if there are no cycles */
1659 if (mo_graph->checkForCycles())
1663 ASSERT(rf->is_write());
1665 if (rf->is_release())
1666 release_heads->push_back(rf);
1668 break; /* End of RMW chain */
1670 /** @todo Need to be smarter here... In the linux lock
1671 * example, this will run to the beginning of the program for
1673 /** @todo The way to be smarter here is to keep going until 1
1674 * thread has a release preceded by an acquire and you've seen
1677 /* acq_rel RMW is a sufficient stopping condition */
1678 if (rf->is_acquire() && rf->is_release())
1679 return true; /* complete */
1681 rf = rf->get_reads_from();
1684 /* read from future: need to settle this later */
1686 return false; /* incomplete */
1689 if (rf->is_release())
1690 return true; /* complete */
1692 /* else relaxed write; check modification order for contiguous subsequence
1693 * -> rf must be same thread as release */
1694 int tid = id_to_int(rf->get_tid());
1695 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1696 action_list_t *list = &(*thrd_lists)[tid];
1697 action_list_t::const_reverse_iterator rit;
1699 /* Find rf in the thread list */
1700 rit = std::find(list->rbegin(), list->rend(), rf);
1701 ASSERT(rit != list->rend());
1703 /* Find the last write/release */
1704 for (; rit != list->rend(); rit++)
1705 if ((*rit)->is_release())
1707 if (rit == list->rend()) {
1708 /* No write-release in this thread */
1709 return true; /* complete */
1711 ModelAction *release = *rit;
1713 ASSERT(rf->same_thread(release));
1715 pending->writes.clear();
1717 bool certain = true;
1718 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1719 if (id_to_int(rf->get_tid()) == (int)i)
1721 list = &(*thrd_lists)[i];
1723 /* Can we ensure no future writes from this thread may break
1724 * the release seq? */
1725 bool future_ordered = false;
1727 ModelAction *last = get_last_action(int_to_id(i));
1728 Thread *th = get_thread(int_to_id(i));
1729 if ((last && rf->happens_before(last)) ||
1732 future_ordered = true;
1734 ASSERT(!th->is_model_thread() || future_ordered);
1736 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1737 const ModelAction *act = *rit;
1738 /* Reach synchronization -> this thread is complete */
1739 if (act->happens_before(release))
1741 if (rf->happens_before(act)) {
1742 future_ordered = true;
1746 /* Only non-RMW writes can break release sequences */
1747 if (!act->is_write() || act->is_rmw())
1750 /* Check modification order */
1751 if (mo_graph->checkReachable(rf, act)) {
1752 /* rf --mo--> act */
1753 future_ordered = true;
1756 if (mo_graph->checkReachable(act, release))
1757 /* act --mo--> release */
1759 if (mo_graph->checkReachable(release, act) &&
1760 mo_graph->checkReachable(act, rf)) {
1761 /* release --mo-> act --mo--> rf */
1762 return true; /* complete */
1764 /* act may break release sequence */
1765 pending->writes.push_back(act);
1768 if (!future_ordered)
1769 certain = false; /* This thread is uncertain */
1773 release_heads->push_back(release);
1774 pending->writes.clear();
1776 pending->release = release;
1783 * A public interface for getting the release sequence head(s) with which a
1784 * given ModelAction must synchronize. This function only returns a non-empty
1785 * result when it can locate a release sequence head with certainty. Otherwise,
1786 * it may mark the internal state of the ModelChecker so that it will handle
1787 * the release sequence at a later time, causing @a act to update its
1788 * synchronization at some later point in execution.
1789 * @param act The 'acquire' action that may read from a release sequence
1790 * @param release_heads A pass-by-reference return parameter. Will be filled
1791 * with the head(s) of the release sequence(s), if they exists with certainty.
1792 * @see ModelChecker::release_seq_heads
1794 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1796 const ModelAction *rf = act->get_reads_from();
1797 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1798 sequence->acquire = act;
1800 if (!release_seq_heads(rf, release_heads, sequence)) {
1801 /* add act to 'lazy checking' list */
1802 pending_rel_seqs->push_back(sequence);
1804 snapshot_free(sequence);
1809 * Attempt to resolve all stashed operations that might synchronize with a
1810 * release sequence for a given location. This implements the "lazy" portion of
1811 * determining whether or not a release sequence was contiguous, since not all
1812 * modification order information is present at the time an action occurs.
1814 * @param location The location/object that should be checked for release
1815 * sequence resolutions. A NULL value means to check all locations.
1816 * @param work_queue The work queue to which to add work items as they are
1818 * @return True if any updates occurred (new synchronization, new mo_graph
1821 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1823 bool updated = false;
1824 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1825 while (it != pending_rel_seqs->end()) {
1826 struct release_seq *pending = *it;
1827 ModelAction *act = pending->acquire;
1829 /* Only resolve sequences on the given location, if provided */
1830 if (location && act->get_location() != location) {
1835 const ModelAction *rf = act->get_reads_from();
1836 rel_heads_list_t release_heads;
1838 complete = release_seq_heads(rf, &release_heads, pending);
1839 for (unsigned int i = 0; i < release_heads.size(); i++) {
1840 if (!act->has_synchronized_with(release_heads[i])) {
1841 if (act->synchronize_with(release_heads[i]))
1844 set_bad_synchronization();
1849 /* Re-check all pending release sequences */
1850 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1851 /* Re-check act for mo_graph edges */
1852 work_queue->push_back(MOEdgeWorkEntry(act));
1854 /* propagate synchronization to later actions */
1855 action_list_t::reverse_iterator rit = action_trace->rbegin();
1856 for (; (*rit) != act; rit++) {
1857 ModelAction *propagate = *rit;
1858 if (act->happens_before(propagate)) {
1859 propagate->synchronize_with(act);
1860 /* Re-check 'propagate' for mo_graph edges */
1861 work_queue->push_back(MOEdgeWorkEntry(propagate));
1866 it = pending_rel_seqs->erase(it);
1867 snapshot_free(pending);
1873 // If we resolved promises or data races, see if we have realized a data race.
1880 * Performs various bookkeeping operations for the current ModelAction. For
1881 * instance, adds action to the per-object, per-thread action vector and to the
1882 * action trace list of all thread actions.
1884 * @param act is the ModelAction to add.
1886 void ModelChecker::add_action_to_lists(ModelAction *act)
1888 int tid = id_to_int(act->get_tid());
1889 action_trace->push_back(act);
1891 get_safe_ptr_action(obj_map, act->get_location())->push_back(act);
1893 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
1894 if (tid >= (int)vec->size())
1895 vec->resize(priv->next_thread_id);
1896 (*vec)[tid].push_back(act);
1898 if ((int)thrd_last_action->size() <= tid)
1899 thrd_last_action->resize(get_num_threads());
1900 (*thrd_last_action)[tid] = act;
1902 if (act->is_wait()) {
1903 void *mutex_loc=(void *) act->get_value();
1904 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
1906 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
1907 if (tid >= (int)vec->size())
1908 vec->resize(priv->next_thread_id);
1909 (*vec)[tid].push_back(act);
1911 if ((int)thrd_last_action->size() <= tid)
1912 thrd_last_action->resize(get_num_threads());
1913 (*thrd_last_action)[tid] = act;
1918 * @brief Get the last action performed by a particular Thread
1919 * @param tid The thread ID of the Thread in question
1920 * @return The last action in the thread
1922 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1924 int threadid = id_to_int(tid);
1925 if (threadid < (int)thrd_last_action->size())
1926 return (*thrd_last_action)[id_to_int(tid)];
1932 * Gets the last memory_order_seq_cst write (in the total global sequence)
1933 * performed on a particular object (i.e., memory location), not including the
1935 * @param curr The current ModelAction; also denotes the object location to
1937 * @return The last seq_cst write
1939 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1941 void *location = curr->get_location();
1942 action_list_t *list = get_safe_ptr_action(obj_map, location);
1943 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1944 action_list_t::reverse_iterator rit;
1945 for (rit = list->rbegin(); rit != list->rend(); rit++)
1946 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1952 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1953 * location). This function identifies the mutex according to the current
1954 * action, which is presumed to perform on the same mutex.
1955 * @param curr The current ModelAction; also denotes the object location to
1957 * @return The last unlock operation
1959 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1961 void *location = curr->get_location();
1962 action_list_t *list = get_safe_ptr_action(obj_map, location);
1963 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1964 action_list_t::reverse_iterator rit;
1965 for (rit = list->rbegin(); rit != list->rend(); rit++)
1966 if ((*rit)->is_unlock() || (*rit)->is_wait())
1971 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1973 ModelAction *parent = get_last_action(tid);
1975 parent = get_thread(tid)->get_creation();
1980 * Returns the clock vector for a given thread.
1981 * @param tid The thread whose clock vector we want
1982 * @return Desired clock vector
1984 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1986 return get_parent_action(tid)->get_cv();
1990 * Resolve a set of Promises with a current write. The set is provided in the
1991 * Node corresponding to @a write.
1992 * @param write The ModelAction that is fulfilling Promises
1993 * @return True if promises were resolved; false otherwise
1995 bool ModelChecker::resolve_promises(ModelAction *write)
1997 bool resolved = false;
1998 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2000 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2001 Promise *promise = (*promises)[promise_index];
2002 if (write->get_node()->get_promise(i)) {
2003 ModelAction *read = promise->get_action();
2004 if (read->is_rmw()) {
2005 mo_graph->addRMWEdge(write, read);
2007 read->read_from(write);
2008 //First fix up the modification order for actions that happened
2010 r_modification_order(read, write);
2011 //Next fix up the modification order for actions that happened
2013 post_r_modification_order(read, write);
2014 //Make sure the promise's value matches the write's value
2015 ASSERT(promise->get_value() == write->get_value());
2018 promises->erase(promises->begin() + promise_index);
2019 threads_to_check.push_back(read->get_tid());
2026 //Check whether reading these writes has made threads unable to
2029 for(unsigned int i=0;i<threads_to_check.size();i++)
2030 mo_check_promises(threads_to_check[i], write);
2036 * Compute the set of promises that could potentially be satisfied by this
2037 * action. Note that the set computation actually appears in the Node, not in
2039 * @param curr The ModelAction that may satisfy promises
2041 void ModelChecker::compute_promises(ModelAction *curr)
2043 for (unsigned int i = 0; i < promises->size(); i++) {
2044 Promise *promise = (*promises)[i];
2045 const ModelAction *act = promise->get_action();
2046 if (!act->happens_before(curr) &&
2048 !act->could_synchronize_with(curr) &&
2049 !act->same_thread(curr) &&
2050 act->get_location() == curr->get_location() &&
2051 promise->get_value() == curr->get_value()) {
2052 curr->get_node()->set_promise(i, act->is_rmw());
2057 /** Checks promises in response to change in ClockVector Threads. */
2058 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2060 for (unsigned int i = 0; i < promises->size(); i++) {
2061 Promise *promise = (*promises)[i];
2062 const ModelAction *act = promise->get_action();
2063 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2064 merge_cv->synchronized_since(act)) {
2065 if (promise->increment_threads(tid)) {
2066 //Promise has failed
2067 failed_promise = true;
2074 void ModelChecker::check_promises_thread_disabled() {
2075 for (unsigned int i = 0; i < promises->size(); i++) {
2076 Promise *promise = (*promises)[i];
2077 if (promise->check_promise()) {
2078 failed_promise = true;
2084 /** Checks promises in response to addition to modification order for threads.
2086 * pthread is the thread that performed the read that created the promise
2088 * pread is the read that created the promise
2090 * pwrite is either the first write to same location as pread by
2091 * pthread that is sequenced after pread or the value read by the
2092 * first read to the same lcoation as pread by pthread that is
2093 * sequenced after pread..
2095 * 1. If tid=pthread, then we check what other threads are reachable
2096 * through the mode order starting with pwrite. Those threads cannot
2097 * perform a write that will resolve the promise due to modification
2098 * order constraints.
2100 * 2. If the tid is not pthread, we check whether pwrite can reach the
2101 * action write through the modification order. If so, that thread
2102 * cannot perform a future write that will resolve the promise due to
2103 * modificatin order constraints.
2105 * @parem tid The thread that either read from the model action
2106 * write, or actually did the model action write.
2108 * @parem write The ModelAction representing the relevant write.
2111 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
2112 void * location = write->get_location();
2113 for (unsigned int i = 0; i < promises->size(); i++) {
2114 Promise *promise = (*promises)[i];
2115 const ModelAction *act = promise->get_action();
2117 //Is this promise on the same location?
2118 if ( act->get_location() != location )
2121 //same thread as the promise
2122 if ( act->get_tid()==tid ) {
2124 //do we have a pwrite for the promise, if not, set it
2125 if (promise->get_write() == NULL ) {
2126 promise->set_write(write);
2127 //The pwrite cannot happen before the promise
2128 if (write->happens_before(act) && (write != act)) {
2129 failed_promise = true;
2133 if (mo_graph->checkPromise(write, promise)) {
2134 failed_promise = true;
2139 //Don't do any lookups twice for the same thread
2140 if (promise->has_sync_thread(tid))
2143 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2144 if (promise->increment_threads(tid)) {
2145 failed_promise = true;
2153 * Compute the set of writes that may break the current pending release
2154 * sequence. This information is extracted from previou release sequence
2157 * @param curr The current ModelAction. Must be a release sequence fixup
2160 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2162 if (pending_rel_seqs->empty())
2165 struct release_seq *pending = pending_rel_seqs->back();
2166 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2167 const ModelAction *write = pending->writes[i];
2168 curr->get_node()->add_relseq_break(write);
2171 /* NULL means don't break the sequence; just synchronize */
2172 curr->get_node()->add_relseq_break(NULL);
2176 * Build up an initial set of all past writes that this 'read' action may read
2177 * from. This set is determined by the clock vector's "happens before"
2179 * @param curr is the current ModelAction that we are exploring; it must be a
2182 void ModelChecker::build_reads_from_past(ModelAction *curr)
2184 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2186 ASSERT(curr->is_read());
2188 ModelAction *last_seq_cst = NULL;
2190 /* Track whether this object has been initialized */
2191 bool initialized = false;
2193 if (curr->is_seqcst()) {
2194 last_seq_cst = get_last_seq_cst(curr);
2195 /* We have to at least see the last sequentially consistent write,
2196 so we are initialized. */
2197 if (last_seq_cst != NULL)
2201 /* Iterate over all threads */
2202 for (i = 0; i < thrd_lists->size(); i++) {
2203 /* Iterate over actions in thread, starting from most recent */
2204 action_list_t *list = &(*thrd_lists)[i];
2205 action_list_t::reverse_iterator rit;
2206 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2207 ModelAction *act = *rit;
2209 /* Only consider 'write' actions */
2210 if (!act->is_write() || act == curr)
2213 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2214 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
2215 if (!curr->get_sleep_flag() || curr->is_seqcst() || sleep_can_read_from(curr, act)) {
2216 DEBUG("Adding action to may_read_from:\n");
2217 if (DBG_ENABLED()) {
2221 curr->get_node()->add_read_from(act);
2225 /* Include at most one act per-thread that "happens before" curr */
2226 if (act->happens_before(curr)) {
2234 assert_bug("May read from uninitialized atomic");
2236 if (DBG_ENABLED() || !initialized) {
2237 model_print("Reached read action:\n");
2239 model_print("Printing may_read_from\n");
2240 curr->get_node()->print_may_read_from();
2241 model_print("End printing may_read_from\n");
2245 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2247 Node *prevnode=write->get_node()->get_parent();
2249 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2250 if (write->is_release()&&thread_sleep)
2252 if (!write->is_rmw()) {
2255 if (write->get_reads_from()==NULL)
2257 write=write->get_reads_from();
2261 static void print_list(action_list_t *list, int exec_num = -1)
2263 action_list_t::iterator it;
2265 model_print("---------------------------------------------------------------------\n");
2267 model_print("Execution %d:\n", exec_num);
2269 unsigned int hash=0;
2271 for (it = list->begin(); it != list->end(); it++) {
2273 hash=hash^(hash<<3)^((*it)->hash());
2275 model_print("HASH %u\n", hash);
2276 model_print("---------------------------------------------------------------------\n");
2279 #if SUPPORT_MOD_ORDER_DUMP
2280 void ModelChecker::dumpGraph(char *filename) {
2282 sprintf(buffer, "%s.dot",filename);
2283 FILE *file=fopen(buffer, "w");
2284 fprintf(file, "digraph %s {\n",filename);
2285 mo_graph->dumpNodes(file);
2286 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2288 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2289 ModelAction *action=*it;
2290 if (action->is_read()) {
2291 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2292 if (action->get_reads_from()!=NULL)
2293 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2295 if (thread_array[action->get_tid()] != NULL) {
2296 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2299 thread_array[action->get_tid()]=action;
2301 fprintf(file,"}\n");
2302 model_free(thread_array);
2307 /** @brief Prints an execution trace summary. */
2308 void ModelChecker::print_summary() const
2310 #if SUPPORT_MOD_ORDER_DUMP
2312 char buffername[100];
2313 sprintf(buffername, "exec%04u", stats.num_total);
2314 mo_graph->dumpGraphToFile(buffername);
2315 sprintf(buffername, "graph%04u", stats.num_total);
2316 dumpGraph(buffername);
2319 if (!isfinalfeasible())
2320 model_print("INFEASIBLE EXECUTION!\n");
2321 print_list(action_trace, stats.num_total);
2326 * Add a Thread to the system for the first time. Should only be called once
2328 * @param t The Thread to add
2330 void ModelChecker::add_thread(Thread *t)
2332 thread_map->put(id_to_int(t->get_id()), t);
2333 scheduler->add_thread(t);
2337 * Removes a thread from the scheduler.
2338 * @param the thread to remove.
2340 void ModelChecker::remove_thread(Thread *t)
2342 scheduler->remove_thread(t);
2346 * @brief Get a Thread reference by its ID
2347 * @param tid The Thread's ID
2348 * @return A Thread reference
2350 Thread * ModelChecker::get_thread(thread_id_t tid) const
2352 return thread_map->get(id_to_int(tid));
2356 * @brief Get a reference to the Thread in which a ModelAction was executed
2357 * @param act The ModelAction
2358 * @return A Thread reference
2360 Thread * ModelChecker::get_thread(ModelAction *act) const
2362 return get_thread(act->get_tid());
2366 * @brief Check if a Thread is currently enabled
2367 * @param t The Thread to check
2368 * @return True if the Thread is currently enabled
2370 bool ModelChecker::is_enabled(Thread *t) const
2372 return scheduler->is_enabled(t);
2376 * @brief Check if a Thread is currently enabled
2377 * @param tid The ID of the Thread to check
2378 * @return True if the Thread is currently enabled
2380 bool ModelChecker::is_enabled(thread_id_t tid) const
2382 return scheduler->is_enabled(tid);
2386 * Switch from a user-context to the "master thread" context (a.k.a. system
2387 * context). This switch is made with the intention of exploring a particular
2388 * model-checking action (described by a ModelAction object). Must be called
2389 * from a user-thread context.
2391 * @param act The current action that will be explored. May be NULL only if
2392 * trace is exiting via an assertion (see ModelChecker::set_assert and
2393 * ModelChecker::has_asserted).
2394 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2396 int ModelChecker::switch_to_master(ModelAction *act)
2399 Thread *old = thread_current();
2400 set_current_action(act);
2401 old->set_state(THREAD_READY);
2402 return Thread::swap(old, &system_context);
2406 * Takes the next step in the execution, if possible.
2407 * @return Returns true (success) if a step was taken and false otherwise.
2409 bool ModelChecker::take_step() {
2413 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2415 if (curr->get_state() == THREAD_READY) {
2416 ASSERT(priv->current_action);
2418 priv->nextThread = check_current_action(priv->current_action);
2419 priv->current_action = NULL;
2421 if (curr->is_blocked() || curr->is_complete())
2422 scheduler->remove_thread(curr);
2427 Thread *next = scheduler->next_thread(priv->nextThread);
2429 /* Infeasible -> don't take any more steps */
2432 else if (isfeasibleprefix() && have_bug_reports()) {
2437 if (params.bound != 0) {
2438 if (priv->used_sequence_numbers > params.bound) {
2443 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2444 next ? id_to_int(next->get_id()) : -1);
2447 * Launch end-of-execution release sequence fixups only when there are:
2449 * (1) no more user threads to run (or when execution replay chooses
2450 * the 'model_thread')
2451 * (2) pending release sequences
2452 * (3) pending assertions (i.e., data races)
2453 * (4) no pending promises
2455 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2456 isfinalfeasible() && !unrealizedraces.empty()) {
2457 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2458 pending_rel_seqs->size());
2459 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2460 std::memory_order_seq_cst, NULL, VALUE_NONE,
2462 set_current_action(fixup);
2466 /* next == NULL -> don't take any more steps */
2470 next->set_state(THREAD_RUNNING);
2472 if (next->get_pending() != NULL) {
2473 /* restart a pending action */
2474 set_current_action(next->get_pending());
2475 next->set_pending(NULL);
2476 next->set_state(THREAD_READY);
2480 /* Return false only if swap fails with an error */
2481 return (Thread::swap(&system_context, next) == 0);
2484 /** Runs the current execution until threre are no more steps to take. */
2485 void ModelChecker::finish_execution() {
2488 while (take_step());