10 #include "snapshot-interface.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
16 #include "threads-model.h"
19 #define INITIAL_THREAD_ID 0
24 bug_message(const char *str) {
25 const char *fmt = " [BUG] %s\n";
26 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27 sprintf(msg, fmt, str);
29 ~bug_message() { if (msg) snapshot_free(msg); }
32 void print() { model_print("%s", msg); }
38 * Structure for holding small ModelChecker members that should be snapshotted
40 struct model_snapshot_members {
41 model_snapshot_members() :
43 /* First thread created will have id INITIAL_THREAD_ID */
44 next_thread_id(INITIAL_THREAD_ID),
45 used_sequence_numbers(0),
49 failed_promise(false),
50 too_many_reads(false),
51 bad_synchronization(false),
55 ~model_snapshot_members() {
56 for (unsigned int i = 0; i < bugs.size(); i++)
61 ModelAction *current_action;
62 unsigned int next_thread_id;
63 modelclock_t used_sequence_numbers;
64 ModelAction *next_backtrack;
65 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
66 struct execution_stats stats;
69 /** @brief Incorrectly-ordered synchronization was made */
70 bool bad_synchronization;
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78 /* Initialize default scheduler */
80 scheduler(new Scheduler()),
82 earliest_diverge(NULL),
83 action_trace(new action_list_t()),
84 thread_map(new HashTable<int, Thread *, int>()),
85 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93 thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94 node_stack(new NodeStack()),
95 priv(new struct model_snapshot_members()),
96 mo_graph(new CycleGraph())
98 /* Initialize a model-checker thread, for special ModelActions */
99 model_thread = new Thread(get_next_id());
100 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
106 for (unsigned int i = 0; i < get_num_threads(); i++)
107 delete thread_map->get(i);
112 delete lock_waiters_map;
113 delete condvar_waiters_map;
116 for (unsigned int i = 0; i < promises->size(); i++)
117 delete (*promises)[i];
120 delete pending_rel_seqs;
122 delete thrd_last_action;
123 delete thrd_last_fence_release;
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
132 action_list_t *tmp = hash->get(ptr);
134 tmp = new action_list_t();
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
142 std::vector<action_list_t> *tmp = hash->get(ptr);
144 tmp = new std::vector<action_list_t>();
151 * Restores user program to initial state and resets all model-checker data
154 void ModelChecker::reset_to_initial_state()
156 DEBUG("+++ Resetting to initial state +++\n");
157 node_stack->reset_execution();
159 /* Print all model-checker output before rollback */
162 snapshotObject->backTrackBeforeStep(0);
165 /** @return a thread ID for a new Thread */
166 thread_id_t ModelChecker::get_next_id()
168 return priv->next_thread_id++;
171 /** @return the number of user threads created during this execution */
172 unsigned int ModelChecker::get_num_threads() const
174 return priv->next_thread_id;
177 /** @return The currently executing Thread. */
178 Thread * ModelChecker::get_current_thread() const
180 return scheduler->get_current_thread();
183 /** @return a sequence number for a new ModelAction */
184 modelclock_t ModelChecker::get_next_seq_num()
186 return ++priv->used_sequence_numbers;
189 Node * ModelChecker::get_curr_node() const
191 return node_stack->get_head();
195 * @brief Choose the next thread to execute.
197 * This function chooses the next thread that should execute. It can force the
198 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
199 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
200 * The model-checker may have no preference regarding the next thread (i.e.,
201 * when exploring a new execution ordering), in which case this will return
203 * @param curr The current ModelAction. This action might guide the choice of
205 * @return The next thread to run. If the model-checker has no preference, NULL.
207 Thread * ModelChecker::get_next_thread(ModelAction *curr)
212 /* Do not split atomic actions. */
214 return thread_current();
215 /* The THREAD_CREATE action points to the created Thread */
216 else if (curr->get_type() == THREAD_CREATE)
217 return (Thread *)curr->get_location();
220 /* Have we completed exploring the preselected path? */
224 /* Else, we are trying to replay an execution */
225 ModelAction *next = node_stack->get_next()->get_action();
227 if (next == diverge) {
228 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
229 earliest_diverge = diverge;
231 Node *nextnode = next->get_node();
232 Node *prevnode = nextnode->get_parent();
233 scheduler->update_sleep_set(prevnode);
235 /* Reached divergence point */
236 if (nextnode->increment_misc()) {
237 /* The next node will try to satisfy a different misc_index values. */
238 tid = next->get_tid();
239 node_stack->pop_restofstack(2);
240 } else if (nextnode->increment_promise()) {
241 /* The next node will try to satisfy a different set of promises. */
242 tid = next->get_tid();
243 node_stack->pop_restofstack(2);
244 } else if (nextnode->increment_read_from()) {
245 /* The next node will read from a different value. */
246 tid = next->get_tid();
247 node_stack->pop_restofstack(2);
248 } else if (nextnode->increment_future_value()) {
249 /* The next node will try to read from a different future value. */
250 tid = next->get_tid();
251 node_stack->pop_restofstack(2);
252 } else if (nextnode->increment_relseq_break()) {
253 /* The next node will try to resolve a release sequence differently */
254 tid = next->get_tid();
255 node_stack->pop_restofstack(2);
257 /* Make a different thread execute for next step */
258 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
259 tid = prevnode->get_next_backtrack();
260 /* Make sure the backtracked thread isn't sleeping. */
261 node_stack->pop_restofstack(1);
262 if (diverge == earliest_diverge) {
263 earliest_diverge = prevnode->get_action();
266 /* The correct sleep set is in the parent node. */
269 DEBUG("*** Divergence point ***\n");
273 tid = next->get_tid();
275 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
276 ASSERT(tid != THREAD_ID_T_NONE);
277 return thread_map->get(id_to_int(tid));
281 * We need to know what the next actions of all threads in the sleep
282 * set will be. This method computes them and stores the actions at
283 * the corresponding thread object's pending action.
286 void ModelChecker::execute_sleep_set()
288 for (unsigned int i = 0; i < get_num_threads(); i++) {
289 thread_id_t tid = int_to_id(i);
290 Thread *thr = get_thread(tid);
291 if (scheduler->is_sleep_set(thr) && thr->get_pending() == NULL) {
292 thr->set_state(THREAD_RUNNING);
293 scheduler->next_thread(thr);
294 Thread::swap(&system_context, thr);
295 priv->current_action->set_sleep_flag();
296 thr->set_pending(priv->current_action);
301 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
303 for (unsigned int i = 0; i < get_num_threads(); i++) {
304 Thread *thr = get_thread(int_to_id(i));
305 if (scheduler->is_sleep_set(thr)) {
306 ModelAction *pending_act = thr->get_pending();
307 if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
308 //Remove this thread from sleep set
309 scheduler->remove_sleep(thr);
314 /** @brief Alert the model-checker that an incorrectly-ordered
315 * synchronization was made */
316 void ModelChecker::set_bad_synchronization()
318 priv->bad_synchronization = true;
321 bool ModelChecker::has_asserted() const
323 return priv->asserted;
326 void ModelChecker::set_assert()
328 priv->asserted = true;
332 * Check if we are in a deadlock. Should only be called at the end of an
333 * execution, although it should not give false positives in the middle of an
334 * execution (there should be some ENABLED thread).
336 * @return True if program is in a deadlock; false otherwise
338 bool ModelChecker::is_deadlocked() const
340 bool blocking_threads = false;
341 for (unsigned int i = 0; i < get_num_threads(); i++) {
342 thread_id_t tid = int_to_id(i);
345 Thread *t = get_thread(tid);
346 if (!t->is_model_thread() && t->get_pending())
347 blocking_threads = true;
349 return blocking_threads;
353 * Check if this is a complete execution. That is, have all thread completed
354 * execution (rather than exiting because sleep sets have forced a redundant
357 * @return True if the execution is complete.
359 bool ModelChecker::is_complete_execution() const
361 for (unsigned int i = 0; i < get_num_threads(); i++)
362 if (is_enabled(int_to_id(i)))
368 * @brief Assert a bug in the executing program.
370 * Use this function to assert any sort of bug in the user program. If the
371 * current trace is feasible (actually, a prefix of some feasible execution),
372 * then this execution will be aborted, printing the appropriate message. If
373 * the current trace is not yet feasible, the error message will be stashed and
374 * printed if the execution ever becomes feasible.
376 * @param msg Descriptive message for the bug (do not include newline char)
377 * @return True if bug is immediately-feasible
379 bool ModelChecker::assert_bug(const char *msg)
381 priv->bugs.push_back(new bug_message(msg));
383 if (isfeasibleprefix()) {
391 * @brief Assert a bug in the executing program, asserted by a user thread
392 * @see ModelChecker::assert_bug
393 * @param msg Descriptive message for the bug (do not include newline char)
395 void ModelChecker::assert_user_bug(const char *msg)
397 /* If feasible bug, bail out now */
399 switch_to_master(NULL);
402 /** @return True, if any bugs have been reported for this execution */
403 bool ModelChecker::have_bug_reports() const
405 return priv->bugs.size() != 0;
408 /** @brief Print bug report listing for this execution (if any bugs exist) */
409 void ModelChecker::print_bugs() const
411 if (have_bug_reports()) {
412 model_print("Bug report: %zu bug%s detected\n",
414 priv->bugs.size() > 1 ? "s" : "");
415 for (unsigned int i = 0; i < priv->bugs.size(); i++)
416 priv->bugs[i]->print();
421 * @brief Record end-of-execution stats
423 * Must be run when exiting an execution. Records various stats.
424 * @see struct execution_stats
426 void ModelChecker::record_stats()
429 if (!isfeasibleprefix())
430 stats.num_infeasible++;
431 else if (have_bug_reports())
432 stats.num_buggy_executions++;
433 else if (is_complete_execution())
434 stats.num_complete++;
436 stats.num_redundant++;
439 /** @brief Print execution stats */
440 void ModelChecker::print_stats() const
442 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
443 model_print("Number of redundant executions: %d\n", stats.num_redundant);
444 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
445 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
446 model_print("Total executions: %d\n", stats.num_total);
447 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
451 * @brief End-of-exeuction print
452 * @param printbugs Should any existing bugs be printed?
454 void ModelChecker::print_execution(bool printbugs) const
456 print_program_output();
458 if (DBG_ENABLED() || params.verbose) {
459 model_print("Earliest divergence point since last feasible execution:\n");
460 if (earliest_diverge)
461 earliest_diverge->print();
463 model_print("(Not set)\n");
469 /* Don't print invalid bugs */
478 * Queries the model-checker for more executions to explore and, if one
479 * exists, resets the model-checker state to execute a new execution.
481 * @return If there are more executions to explore, return true. Otherwise,
484 bool ModelChecker::next_execution()
487 /* Is this execution a feasible execution that's worth bug-checking? */
488 bool complete = isfeasibleprefix() && (is_complete_execution() ||
491 /* End-of-execution bug checks */
494 assert_bug("Deadlock detected");
502 if (DBG_ENABLED() || params.verbose || have_bug_reports())
503 print_execution(complete);
505 clear_program_output();
508 earliest_diverge = NULL;
510 if ((diverge = get_next_backtrack()) == NULL)
514 model_print("Next execution will diverge at:\n");
518 reset_to_initial_state();
522 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
524 switch (act->get_type()) {
529 /* Optimization: relaxed operations don't need backtracking */
530 if (act->is_relaxed())
532 /* linear search: from most recent to oldest */
533 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
534 action_list_t::reverse_iterator rit;
535 for (rit = list->rbegin(); rit != list->rend(); rit++) {
536 ModelAction *prev = *rit;
537 if (prev->could_synchronize_with(act))
543 case ATOMIC_TRYLOCK: {
544 /* linear search: from most recent to oldest */
545 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
546 action_list_t::reverse_iterator rit;
547 for (rit = list->rbegin(); rit != list->rend(); rit++) {
548 ModelAction *prev = *rit;
549 if (act->is_conflicting_lock(prev))
554 case ATOMIC_UNLOCK: {
555 /* linear search: from most recent to oldest */
556 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
557 action_list_t::reverse_iterator rit;
558 for (rit = list->rbegin(); rit != list->rend(); rit++) {
559 ModelAction *prev = *rit;
560 if (!act->same_thread(prev)&&prev->is_failed_trylock())
566 /* linear search: from most recent to oldest */
567 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
568 action_list_t::reverse_iterator rit;
569 for (rit = list->rbegin(); rit != list->rend(); rit++) {
570 ModelAction *prev = *rit;
571 if (!act->same_thread(prev)&&prev->is_failed_trylock())
573 if (!act->same_thread(prev)&&prev->is_notify())
579 case ATOMIC_NOTIFY_ALL:
580 case ATOMIC_NOTIFY_ONE: {
581 /* linear search: from most recent to oldest */
582 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
583 action_list_t::reverse_iterator rit;
584 for (rit = list->rbegin(); rit != list->rend(); rit++) {
585 ModelAction *prev = *rit;
586 if (!act->same_thread(prev)&&prev->is_wait())
597 /** This method finds backtracking points where we should try to
598 * reorder the parameter ModelAction against.
600 * @param the ModelAction to find backtracking points for.
602 void ModelChecker::set_backtracking(ModelAction *act)
604 Thread *t = get_thread(act);
605 ModelAction *prev = get_last_conflict(act);
609 Node *node = prev->get_node()->get_parent();
611 int low_tid, high_tid;
612 if (node->is_enabled(t)) {
613 low_tid = id_to_int(act->get_tid());
614 high_tid = low_tid + 1;
617 high_tid = get_num_threads();
620 for(int i = low_tid; i < high_tid; i++) {
621 thread_id_t tid = int_to_id(i);
623 /* Make sure this thread can be enabled here. */
624 if (i >= node->get_num_threads())
627 /* Don't backtrack into a point where the thread is disabled or sleeping. */
628 if (node->enabled_status(tid) != THREAD_ENABLED)
631 /* Check if this has been explored already */
632 if (node->has_been_explored(tid))
635 /* See if fairness allows */
636 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
638 for (int t = 0; t < node->get_num_threads(); t++) {
639 thread_id_t tother = int_to_id(t);
640 if (node->is_enabled(tother) && node->has_priority(tother)) {
648 /* Cache the latest backtracking point */
649 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
650 priv->next_backtrack = prev;
652 /* If this is a new backtracking point, mark the tree */
653 if (!node->set_backtrack(tid))
655 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
656 id_to_int(prev->get_tid()),
657 id_to_int(t->get_id()));
666 * Returns last backtracking point. The model checker will explore a different
667 * path for this point in the next execution.
668 * @return The ModelAction at which the next execution should diverge.
670 ModelAction * ModelChecker::get_next_backtrack()
672 ModelAction *next = priv->next_backtrack;
673 priv->next_backtrack = NULL;
678 * Processes a read or rmw model action.
679 * @param curr is the read model action to process.
680 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
681 * @return True if processing this read updates the mo_graph.
683 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
685 uint64_t value = VALUE_NONE;
686 bool updated = false;
688 const ModelAction *reads_from = curr->get_node()->get_read_from();
689 if (reads_from != NULL) {
690 mo_graph->startChanges();
692 value = reads_from->get_value();
693 bool r_status = false;
695 if (!second_part_of_rmw) {
696 check_recency(curr, reads_from);
697 r_status = r_modification_order(curr, reads_from);
701 if (!second_part_of_rmw&&is_infeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
702 mo_graph->rollbackChanges();
703 priv->too_many_reads = false;
707 read_from(curr, reads_from);
708 mo_graph->commitChanges();
709 mo_check_promises(curr->get_tid(), reads_from);
712 } else if (!second_part_of_rmw) {
713 /* Read from future value */
714 value = curr->get_node()->get_future_value();
715 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
716 curr->set_read_from(NULL);
717 Promise *valuepromise = new Promise(curr, value, expiration);
718 promises->push_back(valuepromise);
720 get_thread(curr)->set_return_value(value);
726 * Processes a lock, trylock, or unlock model action. @param curr is
727 * the read model action to process.
729 * The try lock operation checks whether the lock is taken. If not,
730 * it falls to the normal lock operation case. If so, it returns
733 * The lock operation has already been checked that it is enabled, so
734 * it just grabs the lock and synchronizes with the previous unlock.
736 * The unlock operation has to re-enable all of the threads that are
737 * waiting on the lock.
739 * @return True if synchronization was updated; false otherwise
741 bool ModelChecker::process_mutex(ModelAction *curr)
743 std::mutex *mutex = NULL;
744 struct std::mutex_state *state = NULL;
746 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
747 mutex = (std::mutex *)curr->get_location();
748 state = mutex->get_state();
749 } else if(curr->is_wait()) {
750 mutex = (std::mutex *)curr->get_value();
751 state = mutex->get_state();
754 switch (curr->get_type()) {
755 case ATOMIC_TRYLOCK: {
756 bool success = !state->islocked;
757 curr->set_try_lock(success);
759 get_thread(curr)->set_return_value(0);
762 get_thread(curr)->set_return_value(1);
764 //otherwise fall into the lock case
766 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
767 assert_bug("Lock access before initialization");
768 state->islocked = true;
769 ModelAction *unlock = get_last_unlock(curr);
770 //synchronize with the previous unlock statement
771 if (unlock != NULL) {
772 curr->synchronize_with(unlock);
777 case ATOMIC_UNLOCK: {
779 state->islocked = false;
780 //wake up the other threads
781 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
782 //activate all the waiting threads
783 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
784 scheduler->wake(get_thread(*rit));
791 state->islocked = false;
792 //wake up the other threads
793 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
794 //activate all the waiting threads
795 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
796 scheduler->wake(get_thread(*rit));
799 //check whether we should go to sleep or not...simulate spurious failures
800 if (curr->get_node()->get_misc() == 0) {
801 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
803 scheduler->sleep(get_current_thread());
807 case ATOMIC_NOTIFY_ALL: {
808 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
809 //activate all the waiting threads
810 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
811 scheduler->wake(get_thread(*rit));
816 case ATOMIC_NOTIFY_ONE: {
817 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
818 int wakeupthread = curr->get_node()->get_misc();
819 action_list_t::iterator it = waiters->begin();
820 advance(it, wakeupthread);
821 scheduler->wake(get_thread(*it));
833 * Process a write ModelAction
834 * @param curr The ModelAction to process
835 * @return True if the mo_graph was updated or promises were resolved
837 bool ModelChecker::process_write(ModelAction *curr)
839 bool updated_mod_order = w_modification_order(curr);
840 bool updated_promises = resolve_promises(curr);
842 if (promises->size() == 0) {
843 for (unsigned int i = 0; i < futurevalues->size(); i++) {
844 struct PendingFutureValue pfv = (*futurevalues)[i];
845 //Do more ambitious checks now that mo is more complete
846 if (mo_may_allow(pfv.writer, pfv.act)&&
847 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
848 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
849 priv->next_backtrack = pfv.act;
851 futurevalues->resize(0);
854 mo_graph->commitChanges();
855 mo_check_promises(curr->get_tid(), curr);
857 get_thread(curr)->set_return_value(VALUE_NONE);
858 return updated_mod_order || updated_promises;
862 * Process a fence ModelAction
863 * @param curr The ModelAction to process
864 * @return True if synchronization was updated
866 bool ModelChecker::process_fence(ModelAction *curr)
869 * fence-relaxed: no-op
870 * fence-release: only log the occurence (not in this function), for
871 * use in later synchronization
872 * fence-acquire (this function): search for hypothetical release
875 bool updated = false;
876 if (curr->is_acquire()) {
877 action_list_t *list = action_trace;
878 action_list_t::reverse_iterator rit;
879 /* Find X : is_read(X) && X --sb-> curr */
880 for (rit = list->rbegin(); rit != list->rend(); rit++) {
881 ModelAction *act = *rit;
884 if (act->get_tid() != curr->get_tid())
886 /* Stop at the beginning of the thread */
887 if (act->is_thread_start())
889 /* Stop once we reach a prior fence-acquire */
890 if (act->is_fence() && act->is_acquire())
894 /* read-acquire will find its own release sequences */
895 if (act->is_acquire())
898 /* Establish hypothetical release sequences */
899 rel_heads_list_t release_heads;
900 get_release_seq_heads(curr, act, &release_heads);
901 for (unsigned int i = 0; i < release_heads.size(); i++)
902 if (!curr->synchronize_with(release_heads[i]))
903 set_bad_synchronization();
904 if (release_heads.size() != 0)
912 * @brief Process the current action for thread-related activity
914 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
915 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
916 * synchronization, etc. This function is a no-op for non-THREAD actions
917 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
919 * @param curr The current action
920 * @return True if synchronization was updated or a thread completed
922 bool ModelChecker::process_thread_action(ModelAction *curr)
924 bool updated = false;
926 switch (curr->get_type()) {
927 case THREAD_CREATE: {
928 Thread *th = (Thread *)curr->get_location();
929 th->set_creation(curr);
933 Thread *blocking = (Thread *)curr->get_location();
934 ModelAction *act = get_last_action(blocking->get_id());
935 curr->synchronize_with(act);
936 updated = true; /* trigger rel-seq checks */
939 case THREAD_FINISH: {
940 Thread *th = get_thread(curr);
941 while (!th->wait_list_empty()) {
942 ModelAction *act = th->pop_wait_list();
943 scheduler->wake(get_thread(act));
946 updated = true; /* trigger rel-seq checks */
950 check_promises(curr->get_tid(), NULL, curr->get_cv());
961 * @brief Process the current action for release sequence fixup activity
963 * Performs model-checker release sequence fixups for the current action,
964 * forcing a single pending release sequence to break (with a given, potential
965 * "loose" write) or to complete (i.e., synchronize). If a pending release
966 * sequence forms a complete release sequence, then we must perform the fixup
967 * synchronization, mo_graph additions, etc.
969 * @param curr The current action; must be a release sequence fixup action
970 * @param work_queue The work queue to which to add work items as they are
973 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
975 const ModelAction *write = curr->get_node()->get_relseq_break();
976 struct release_seq *sequence = pending_rel_seqs->back();
977 pending_rel_seqs->pop_back();
979 ModelAction *acquire = sequence->acquire;
980 const ModelAction *rf = sequence->rf;
981 const ModelAction *release = sequence->release;
985 ASSERT(release->same_thread(rf));
989 * @todo Forcing a synchronization requires that we set
990 * modification order constraints. For instance, we can't allow
991 * a fixup sequence in which two separate read-acquire
992 * operations read from the same sequence, where the first one
993 * synchronizes and the other doesn't. Essentially, we can't
994 * allow any writes to insert themselves between 'release' and
998 /* Must synchronize */
999 if (!acquire->synchronize_with(release)) {
1000 set_bad_synchronization();
1003 /* Re-check all pending release sequences */
1004 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1005 /* Re-check act for mo_graph edges */
1006 work_queue->push_back(MOEdgeWorkEntry(acquire));
1008 /* propagate synchronization to later actions */
1009 action_list_t::reverse_iterator rit = action_trace->rbegin();
1010 for (; (*rit) != acquire; rit++) {
1011 ModelAction *propagate = *rit;
1012 if (acquire->happens_before(propagate)) {
1013 propagate->synchronize_with(acquire);
1014 /* Re-check 'propagate' for mo_graph edges */
1015 work_queue->push_back(MOEdgeWorkEntry(propagate));
1019 /* Break release sequence with new edges:
1020 * release --mo--> write --mo--> rf */
1021 mo_graph->addEdge(release, write);
1022 mo_graph->addEdge(write, rf);
1025 /* See if we have realized a data race */
1030 * Initialize the current action by performing one or more of the following
1031 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1032 * in the NodeStack, manipulating backtracking sets, allocating and
1033 * initializing clock vectors, and computing the promises to fulfill.
1035 * @param curr The current action, as passed from the user context; may be
1036 * freed/invalidated after the execution of this function, with a different
1037 * action "returned" its place (pass-by-reference)
1038 * @return True if curr is a newly-explored action; false otherwise
1040 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1042 ModelAction *newcurr;
1044 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1045 newcurr = process_rmw(*curr);
1048 if (newcurr->is_rmw())
1049 compute_promises(newcurr);
1055 (*curr)->set_seq_number(get_next_seq_num());
1057 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1059 /* First restore type and order in case of RMW operation */
1060 if ((*curr)->is_rmwr())
1061 newcurr->copy_typeandorder(*curr);
1063 ASSERT((*curr)->get_location() == newcurr->get_location());
1064 newcurr->copy_from_new(*curr);
1066 /* Discard duplicate ModelAction; use action from NodeStack */
1069 /* Always compute new clock vector */
1070 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1073 return false; /* Action was explored previously */
1077 /* Always compute new clock vector */
1078 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1080 /* Assign most recent release fence */
1081 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1084 * Perform one-time actions when pushing new ModelAction onto
1087 if (newcurr->is_write())
1088 compute_promises(newcurr);
1089 else if (newcurr->is_relseq_fixup())
1090 compute_relseq_breakwrites(newcurr);
1091 else if (newcurr->is_wait())
1092 newcurr->get_node()->set_misc_max(2);
1093 else if (newcurr->is_notify_one()) {
1094 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1096 return true; /* This was a new ModelAction */
1101 * @brief Establish reads-from relation between two actions
1103 * Perform basic operations involved with establishing a concrete rf relation,
1104 * including setting the ModelAction data and checking for release sequences.
1106 * @param act The action that is reading (must be a read)
1107 * @param rf The action from which we are reading (must be a write)
1109 * @return True if this read established synchronization
1111 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1113 act->set_read_from(rf);
1114 if (rf != NULL && act->is_acquire()) {
1115 rel_heads_list_t release_heads;
1116 get_release_seq_heads(act, act, &release_heads);
1117 int num_heads = release_heads.size();
1118 for (unsigned int i = 0; i < release_heads.size(); i++)
1119 if (!act->synchronize_with(release_heads[i])) {
1120 set_bad_synchronization();
1123 return num_heads > 0;
1129 * @brief Check whether a model action is enabled.
1131 * Checks whether a lock or join operation would be successful (i.e., is the
1132 * lock already locked, or is the joined thread already complete). If not, put
1133 * the action in a waiter list.
1135 * @param curr is the ModelAction to check whether it is enabled.
1136 * @return a bool that indicates whether the action is enabled.
1138 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1139 if (curr->is_lock()) {
1140 std::mutex *lock = (std::mutex *)curr->get_location();
1141 struct std::mutex_state *state = lock->get_state();
1142 if (state->islocked) {
1143 //Stick the action in the appropriate waiting queue
1144 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1147 } else if (curr->get_type() == THREAD_JOIN) {
1148 Thread *blocking = (Thread *)curr->get_location();
1149 if (!blocking->is_complete()) {
1150 blocking->push_wait_list(curr);
1159 * Stores the ModelAction for the current thread action. Call this
1160 * immediately before switching from user- to system-context to pass
1161 * data between them.
1162 * @param act The ModelAction created by the user-thread action
1164 void ModelChecker::set_current_action(ModelAction *act) {
1165 priv->current_action = act;
1169 * This is the heart of the model checker routine. It performs model-checking
1170 * actions corresponding to a given "current action." Among other processes, it
1171 * calculates reads-from relationships, updates synchronization clock vectors,
1172 * forms a memory_order constraints graph, and handles replay/backtrack
1173 * execution when running permutations of previously-observed executions.
1175 * @param curr The current action to process
1176 * @return The next Thread that must be executed. May be NULL if ModelChecker
1177 * makes no choice (e.g., according to replay execution, combining RMW actions,
1180 Thread * ModelChecker::check_current_action(ModelAction *curr)
1183 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1185 if (!check_action_enabled(curr)) {
1186 /* Make the execution look like we chose to run this action
1187 * much later, when a lock/join can succeed */
1188 get_current_thread()->set_pending(curr);
1189 scheduler->sleep(get_current_thread());
1190 return get_next_thread(NULL);
1193 bool newly_explored = initialize_curr_action(&curr);
1195 wake_up_sleeping_actions(curr);
1197 /* Add the action to lists before any other model-checking tasks */
1198 if (!second_part_of_rmw)
1199 add_action_to_lists(curr);
1201 /* Build may_read_from set for newly-created actions */
1202 if (newly_explored && curr->is_read())
1203 build_reads_from_past(curr);
1205 /* Initialize work_queue with the "current action" work */
1206 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1207 while (!work_queue.empty() && !has_asserted()) {
1208 WorkQueueEntry work = work_queue.front();
1209 work_queue.pop_front();
1211 switch (work.type) {
1212 case WORK_CHECK_CURR_ACTION: {
1213 ModelAction *act = work.action;
1214 bool update = false; /* update this location's release seq's */
1215 bool update_all = false; /* update all release seq's */
1217 if (process_thread_action(curr))
1220 if (act->is_read() && process_read(act, second_part_of_rmw))
1223 if (act->is_write() && process_write(act))
1226 if (act->is_fence() && process_fence(act))
1229 if (act->is_mutex_op() && process_mutex(act))
1232 if (act->is_relseq_fixup())
1233 process_relseq_fixup(curr, &work_queue);
1236 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1238 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1241 case WORK_CHECK_RELEASE_SEQ:
1242 resolve_release_sequences(work.location, &work_queue);
1244 case WORK_CHECK_MO_EDGES: {
1245 /** @todo Complete verification of work_queue */
1246 ModelAction *act = work.action;
1247 bool updated = false;
1249 if (act->is_read()) {
1250 const ModelAction *rf = act->get_reads_from();
1251 if (rf != NULL && r_modification_order(act, rf))
1254 if (act->is_write()) {
1255 if (w_modification_order(act))
1258 mo_graph->commitChanges();
1261 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1270 check_curr_backtracking(curr);
1271 set_backtracking(curr);
1272 return get_next_thread(curr);
1275 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1277 Node *currnode = curr->get_node();
1278 Node *parnode = currnode->get_parent();
1280 if ((!parnode->backtrack_empty() ||
1281 !currnode->misc_empty() ||
1282 !currnode->read_from_empty() ||
1283 !currnode->future_value_empty() ||
1284 !currnode->promise_empty() ||
1285 !currnode->relseq_break_empty())
1286 && (!priv->next_backtrack ||
1287 *curr > *priv->next_backtrack)) {
1288 priv->next_backtrack = curr;
1292 bool ModelChecker::promises_expired() const
1294 for (unsigned int i = 0; i < promises->size(); i++) {
1295 Promise *promise = (*promises)[i];
1296 if (promise->get_expiration() < priv->used_sequence_numbers)
1303 * This is the strongest feasibility check available.
1304 * @return whether the current trace (partial or complete) must be a prefix of
1307 bool ModelChecker::isfeasibleprefix() const
1309 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1313 * Returns whether the current completed trace is feasible, except for pending
1314 * release sequences.
1316 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1318 if (DBG_ENABLED() && promises->size() != 0)
1319 DEBUG("Infeasible: unrevolved promises\n");
1321 return !is_infeasible() && promises->size() == 0;
1325 * Check if the current partial trace is infeasible. Does not check any
1326 * end-of-execution flags, which might rule out the execution. Thus, this is
1327 * useful only for ruling an execution as infeasible.
1328 * @return whether the current partial trace is infeasible.
1330 bool ModelChecker::is_infeasible() const
1332 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1333 DEBUG("Infeasible: RMW violation\n");
1335 return mo_graph->checkForRMWViolation() || is_infeasible_ignoreRMW();
1339 * Check If the current partial trace is infeasible, while ignoring
1340 * infeasibility related to 2 RMW's reading from the same store. It does not
1341 * check end-of-execution feasibility.
1342 * @see ModelChecker::is_infeasible
1343 * @return whether the current partial trace is infeasible, ignoring multiple
1344 * RMWs reading from the same store.
1346 bool ModelChecker::is_infeasible_ignoreRMW() const
1348 if (DBG_ENABLED()) {
1349 if (mo_graph->checkForCycles())
1350 DEBUG("Infeasible: modification order cycles\n");
1351 if (priv->failed_promise)
1352 DEBUG("Infeasible: failed promise\n");
1353 if (priv->too_many_reads)
1354 DEBUG("Infeasible: too many reads\n");
1355 if (priv->bad_synchronization)
1356 DEBUG("Infeasible: bad synchronization ordering\n");
1357 if (promises_expired())
1358 DEBUG("Infeasible: promises expired\n");
1360 return mo_graph->checkForCycles() || priv->failed_promise ||
1361 priv->too_many_reads || priv->bad_synchronization ||
1365 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1366 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1367 ModelAction *lastread = get_last_action(act->get_tid());
1368 lastread->process_rmw(act);
1369 if (act->is_rmw() && lastread->get_reads_from() != NULL) {
1370 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1371 mo_graph->commitChanges();
1377 * Checks whether a thread has read from the same write for too many times
1378 * without seeing the effects of a later write.
1381 * 1) there must a different write that we could read from that would satisfy the modification order,
1382 * 2) we must have read from the same value in excess of maxreads times, and
1383 * 3) that other write must have been in the reads_from set for maxreads times.
1385 * If so, we decide that the execution is no longer feasible.
1387 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1389 if (params.maxreads != 0) {
1390 if (curr->get_node()->get_read_from_size() <= 1)
1392 //Must make sure that execution is currently feasible... We could
1393 //accidentally clear by rolling back
1394 if (is_infeasible())
1396 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1397 int tid = id_to_int(curr->get_tid());
1400 if ((int)thrd_lists->size() <= tid)
1402 action_list_t *list = &(*thrd_lists)[tid];
1404 action_list_t::reverse_iterator rit = list->rbegin();
1405 /* Skip past curr */
1406 for (; (*rit) != curr; rit++)
1408 /* go past curr now */
1411 action_list_t::reverse_iterator ritcopy = rit;
1412 //See if we have enough reads from the same value
1414 for (; count < params.maxreads; rit++,count++) {
1415 if (rit == list->rend())
1417 ModelAction *act = *rit;
1418 if (!act->is_read())
1421 if (act->get_reads_from() != rf)
1423 if (act->get_node()->get_read_from_size() <= 1)
1426 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1428 const ModelAction *write = curr->get_node()->get_read_from_at(i);
1430 /* Need a different write */
1434 /* Test to see whether this is a feasible write to read from*/
1435 mo_graph->startChanges();
1436 r_modification_order(curr, write);
1437 bool feasiblereadfrom = !is_infeasible();
1438 mo_graph->rollbackChanges();
1440 if (!feasiblereadfrom)
1444 bool feasiblewrite = true;
1445 //new we need to see if this write works for everyone
1447 for (int loop = count; loop>0; loop--,rit++) {
1448 ModelAction *act = *rit;
1449 bool foundvalue = false;
1450 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1451 if (act->get_node()->get_read_from_at(j) == write) {
1457 feasiblewrite = false;
1461 if (feasiblewrite) {
1462 priv->too_many_reads = true;
1470 * Updates the mo_graph with the constraints imposed from the current
1473 * Basic idea is the following: Go through each other thread and find
1474 * the lastest action that happened before our read. Two cases:
1476 * (1) The action is a write => that write must either occur before
1477 * the write we read from or be the write we read from.
1479 * (2) The action is a read => the write that that action read from
1480 * must occur before the write we read from or be the same write.
1482 * @param curr The current action. Must be a read.
1483 * @param rf The action that curr reads from. Must be a write.
1484 * @return True if modification order edges were added; false otherwise
1486 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1488 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1491 ASSERT(curr->is_read());
1493 /* Last SC fence in the current thread */
1494 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1496 /* Iterate over all threads */
1497 for (i = 0; i < thrd_lists->size(); i++) {
1498 /* Last SC fence in thread i */
1499 ModelAction *last_sc_fence_thread_local = NULL;
1500 if (int_to_id((int)i) != curr->get_tid())
1501 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1503 /* Last SC fence in thread i, before last SC fence in current thread */
1504 ModelAction *last_sc_fence_thread_before = NULL;
1505 if (last_sc_fence_local)
1506 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1508 /* Iterate over actions in thread, starting from most recent */
1509 action_list_t *list = &(*thrd_lists)[i];
1510 action_list_t::reverse_iterator rit;
1511 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1512 ModelAction *act = *rit;
1514 if (act->is_write() && act != rf && act != curr) {
1515 /* C++, Section 29.3 statement 5 */
1516 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1517 *act < *last_sc_fence_thread_local) {
1518 mo_graph->addEdge(act, rf);
1522 /* C++, Section 29.3 statement 4 */
1523 else if (act->is_seqcst() && last_sc_fence_local &&
1524 *act < *last_sc_fence_local) {
1525 mo_graph->addEdge(act, rf);
1529 /* C++, Section 29.3 statement 6 */
1530 else if (last_sc_fence_thread_before &&
1531 *act < *last_sc_fence_thread_before) {
1532 mo_graph->addEdge(act, rf);
1539 * Include at most one act per-thread that "happens
1540 * before" curr. Don't consider reflexively.
1542 if (act->happens_before(curr) && act != curr) {
1543 if (act->is_write()) {
1545 mo_graph->addEdge(act, rf);
1549 const ModelAction *prevreadfrom = act->get_reads_from();
1550 //if the previous read is unresolved, keep going...
1551 if (prevreadfrom == NULL)
1554 if (rf != prevreadfrom) {
1555 mo_graph->addEdge(prevreadfrom, rf);
1567 /** This method fixes up the modification order when we resolve a
1568 * promises. The basic problem is that actions that occur after the
1569 * read curr could not property add items to the modification order
1572 * So for each thread, we find the earliest item that happens after
1573 * the read curr. This is the item we have to fix up with additional
1574 * constraints. If that action is write, we add a MO edge between
1575 * the Action rf and that action. If the action is a read, we add a
1576 * MO edge between the Action rf, and whatever the read accessed.
1578 * @param curr is the read ModelAction that we are fixing up MO edges for.
1579 * @param rf is the write ModelAction that curr reads from.
1582 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1584 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1586 ASSERT(curr->is_read());
1588 /* Iterate over all threads */
1589 for (i = 0; i < thrd_lists->size(); i++) {
1590 /* Iterate over actions in thread, starting from most recent */
1591 action_list_t *list = &(*thrd_lists)[i];
1592 action_list_t::reverse_iterator rit;
1593 ModelAction *lastact = NULL;
1595 /* Find last action that happens after curr that is either not curr or a rmw */
1596 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1597 ModelAction *act = *rit;
1598 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1604 /* Include at most one act per-thread that "happens before" curr */
1605 if (lastact != NULL) {
1606 if (lastact== curr) {
1607 //Case 1: The resolved read is a RMW, and we need to make sure
1608 //that the write portion of the RMW mod order after rf
1610 mo_graph->addEdge(rf, lastact);
1611 } else if (lastact->is_read()) {
1612 //Case 2: The resolved read is a normal read and the next
1613 //operation is a read, and we need to make sure the value read
1614 //is mod ordered after rf
1616 const ModelAction *postreadfrom = lastact->get_reads_from();
1617 if (postreadfrom != NULL && rf != postreadfrom)
1618 mo_graph->addEdge(rf, postreadfrom);
1620 //Case 3: The resolved read is a normal read and the next
1621 //operation is a write, and we need to make sure that the
1622 //write is mod ordered after rf
1624 mo_graph->addEdge(rf, lastact);
1632 * Updates the mo_graph with the constraints imposed from the current write.
1634 * Basic idea is the following: Go through each other thread and find
1635 * the lastest action that happened before our write. Two cases:
1637 * (1) The action is a write => that write must occur before
1640 * (2) The action is a read => the write that that action read from
1641 * must occur before the current write.
1643 * This method also handles two other issues:
1645 * (I) Sequential Consistency: Making sure that if the current write is
1646 * seq_cst, that it occurs after the previous seq_cst write.
1648 * (II) Sending the write back to non-synchronizing reads.
1650 * @param curr The current action. Must be a write.
1651 * @return True if modification order edges were added; false otherwise
1653 bool ModelChecker::w_modification_order(ModelAction *curr)
1655 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1658 ASSERT(curr->is_write());
1660 if (curr->is_seqcst()) {
1661 /* We have to at least see the last sequentially consistent write,
1662 so we are initialized. */
1663 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1664 if (last_seq_cst != NULL) {
1665 mo_graph->addEdge(last_seq_cst, curr);
1670 /* Last SC fence in the current thread */
1671 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1673 /* Iterate over all threads */
1674 for (i = 0; i < thrd_lists->size(); i++) {
1675 /* Last SC fence in thread i, before last SC fence in current thread */
1676 ModelAction *last_sc_fence_thread_before = NULL;
1677 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1678 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1680 /* Iterate over actions in thread, starting from most recent */
1681 action_list_t *list = &(*thrd_lists)[i];
1682 action_list_t::reverse_iterator rit;
1683 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1684 ModelAction *act = *rit;
1687 * 1) If RMW and it actually read from something, then we
1688 * already have all relevant edges, so just skip to next
1691 * 2) If RMW and it didn't read from anything, we should
1692 * whatever edge we can get to speed up convergence.
1694 * 3) If normal write, we need to look at earlier actions, so
1695 * continue processing list.
1697 if (curr->is_rmw()) {
1698 if (curr->get_reads_from() != NULL)
1706 /* C++, Section 29.3 statement 7 */
1707 if (last_sc_fence_thread_before && act->is_write() &&
1708 *act < *last_sc_fence_thread_before) {
1709 mo_graph->addEdge(act, curr);
1715 * Include at most one act per-thread that "happens
1718 if (act->happens_before(curr)) {
1720 * Note: if act is RMW, just add edge:
1722 * The following edge should be handled elsewhere:
1723 * readfrom(act) --mo--> act
1725 if (act->is_write())
1726 mo_graph->addEdge(act, curr);
1727 else if (act->is_read()) {
1728 //if previous read accessed a null, just keep going
1729 if (act->get_reads_from() == NULL)
1731 mo_graph->addEdge(act->get_reads_from(), curr);
1735 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1736 !act->same_thread(curr)) {
1737 /* We have an action that:
1738 (1) did not happen before us
1739 (2) is a read and we are a write
1740 (3) cannot synchronize with us
1741 (4) is in a different thread
1743 that read could potentially read from our write. Note that
1744 these checks are overly conservative at this point, we'll
1745 do more checks before actually removing the
1749 if (thin_air_constraint_may_allow(curr, act)) {
1750 if (!is_infeasible() ||
1751 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && !is_infeasible_ignoreRMW())) {
1752 struct PendingFutureValue pfv = {curr,act};
1753 futurevalues->push_back(pfv);
1763 /** Arbitrary reads from the future are not allowed. Section 29.3
1764 * part 9 places some constraints. This method checks one result of constraint
1765 * constraint. Others require compiler support. */
1766 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1768 if (!writer->is_rmw())
1771 if (!reader->is_rmw())
1774 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1775 if (search == reader)
1777 if (search->get_tid() == reader->get_tid() &&
1778 search->happens_before(reader))
1786 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1787 * some constraints. This method checks one the following constraint (others
1788 * require compiler support):
1790 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1792 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1794 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1796 /* Iterate over all threads */
1797 for (i = 0; i < thrd_lists->size(); i++) {
1798 const ModelAction *write_after_read = NULL;
1800 /* Iterate over actions in thread, starting from most recent */
1801 action_list_t *list = &(*thrd_lists)[i];
1802 action_list_t::reverse_iterator rit;
1803 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1804 ModelAction *act = *rit;
1806 /* Don't disallow due to act == reader */
1807 if (!reader->happens_before(act) || reader == act)
1809 else if (act->is_write())
1810 write_after_read = act;
1811 else if (act->is_read() && act->get_reads_from() != NULL)
1812 write_after_read = act->get_reads_from();
1815 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1822 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1823 * The ModelAction under consideration is expected to be taking part in
1824 * release/acquire synchronization as an object of the "reads from" relation.
1825 * Note that this can only provide release sequence support for RMW chains
1826 * which do not read from the future, as those actions cannot be traced until
1827 * their "promise" is fulfilled. Similarly, we may not even establish the
1828 * presence of a release sequence with certainty, as some modification order
1829 * constraints may be decided further in the future. Thus, this function
1830 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1831 * and a boolean representing certainty.
1833 * @param rf The action that might be part of a release sequence. Must be a
1835 * @param release_heads A pass-by-reference style return parameter. After
1836 * execution of this function, release_heads will contain the heads of all the
1837 * relevant release sequences, if any exists with certainty
1838 * @param pending A pass-by-reference style return parameter which is only used
1839 * when returning false (i.e., uncertain). Returns most information regarding
1840 * an uncertain release sequence, including any write operations that might
1841 * break the sequence.
1842 * @return true, if the ModelChecker is certain that release_heads is complete;
1845 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1846 rel_heads_list_t *release_heads,
1847 struct release_seq *pending) const
1849 /* Only check for release sequences if there are no cycles */
1850 if (mo_graph->checkForCycles())
1854 ASSERT(rf->is_write());
1856 if (rf->is_release())
1857 release_heads->push_back(rf);
1858 else if (rf->get_last_fence_release())
1859 release_heads->push_back(rf->get_last_fence_release());
1861 break; /* End of RMW chain */
1863 /** @todo Need to be smarter here... In the linux lock
1864 * example, this will run to the beginning of the program for
1866 /** @todo The way to be smarter here is to keep going until 1
1867 * thread has a release preceded by an acquire and you've seen
1870 /* acq_rel RMW is a sufficient stopping condition */
1871 if (rf->is_acquire() && rf->is_release())
1872 return true; /* complete */
1874 rf = rf->get_reads_from();
1877 /* read from future: need to settle this later */
1879 return false; /* incomplete */
1882 if (rf->is_release())
1883 return true; /* complete */
1885 /* else relaxed write
1886 * - check for fence-release in the same thread (29.8, stmt. 3)
1887 * - check modification order for contiguous subsequence
1888 * -> rf must be same thread as release */
1890 const ModelAction *fence_release = rf->get_last_fence_release();
1891 /* Synchronize with a fence-release unconditionally; we don't need to
1892 * find any more "contiguous subsequence..." for it */
1894 release_heads->push_back(fence_release);
1896 int tid = id_to_int(rf->get_tid());
1897 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1898 action_list_t *list = &(*thrd_lists)[tid];
1899 action_list_t::const_reverse_iterator rit;
1901 /* Find rf in the thread list */
1902 rit = std::find(list->rbegin(), list->rend(), rf);
1903 ASSERT(rit != list->rend());
1905 /* Find the last {write,fence}-release */
1906 for (; rit != list->rend(); rit++) {
1907 if (fence_release && *(*rit) < *fence_release)
1909 if ((*rit)->is_release())
1912 if (rit == list->rend()) {
1913 /* No write-release in this thread */
1914 return true; /* complete */
1915 } else if (fence_release && *(*rit) < *fence_release) {
1916 /* The fence-release is more recent (and so, "stronger") than
1917 * the most recent write-release */
1918 return true; /* complete */
1919 } /* else, need to establish contiguous release sequence */
1920 ModelAction *release = *rit;
1922 ASSERT(rf->same_thread(release));
1924 pending->writes.clear();
1926 bool certain = true;
1927 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1928 if (id_to_int(rf->get_tid()) == (int)i)
1930 list = &(*thrd_lists)[i];
1932 /* Can we ensure no future writes from this thread may break
1933 * the release seq? */
1934 bool future_ordered = false;
1936 ModelAction *last = get_last_action(int_to_id(i));
1937 Thread *th = get_thread(int_to_id(i));
1938 if ((last && rf->happens_before(last)) ||
1941 future_ordered = true;
1943 ASSERT(!th->is_model_thread() || future_ordered);
1945 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1946 const ModelAction *act = *rit;
1947 /* Reach synchronization -> this thread is complete */
1948 if (act->happens_before(release))
1950 if (rf->happens_before(act)) {
1951 future_ordered = true;
1955 /* Only non-RMW writes can break release sequences */
1956 if (!act->is_write() || act->is_rmw())
1959 /* Check modification order */
1960 if (mo_graph->checkReachable(rf, act)) {
1961 /* rf --mo--> act */
1962 future_ordered = true;
1965 if (mo_graph->checkReachable(act, release))
1966 /* act --mo--> release */
1968 if (mo_graph->checkReachable(release, act) &&
1969 mo_graph->checkReachable(act, rf)) {
1970 /* release --mo-> act --mo--> rf */
1971 return true; /* complete */
1973 /* act may break release sequence */
1974 pending->writes.push_back(act);
1977 if (!future_ordered)
1978 certain = false; /* This thread is uncertain */
1982 release_heads->push_back(release);
1983 pending->writes.clear();
1985 pending->release = release;
1992 * An interface for getting the release sequence head(s) with which a
1993 * given ModelAction must synchronize. This function only returns a non-empty
1994 * result when it can locate a release sequence head with certainty. Otherwise,
1995 * it may mark the internal state of the ModelChecker so that it will handle
1996 * the release sequence at a later time, causing @a acquire to update its
1997 * synchronization at some later point in execution.
1999 * @param acquire The 'acquire' action that may synchronize with a release
2001 * @param read The read action that may read from a release sequence; this may
2002 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2003 * when 'acquire' is a fence-acquire)
2004 * @param release_heads A pass-by-reference return parameter. Will be filled
2005 * with the head(s) of the release sequence(s), if they exists with certainty.
2006 * @see ModelChecker::release_seq_heads
2008 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2009 ModelAction *read, rel_heads_list_t *release_heads)
2011 const ModelAction *rf = read->get_reads_from();
2012 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2013 sequence->acquire = acquire;
2014 sequence->read = read;
2016 if (!release_seq_heads(rf, release_heads, sequence)) {
2017 /* add act to 'lazy checking' list */
2018 pending_rel_seqs->push_back(sequence);
2020 snapshot_free(sequence);
2025 * Attempt to resolve all stashed operations that might synchronize with a
2026 * release sequence for a given location. This implements the "lazy" portion of
2027 * determining whether or not a release sequence was contiguous, since not all
2028 * modification order information is present at the time an action occurs.
2030 * @param location The location/object that should be checked for release
2031 * sequence resolutions. A NULL value means to check all locations.
2032 * @param work_queue The work queue to which to add work items as they are
2034 * @return True if any updates occurred (new synchronization, new mo_graph
2037 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2039 bool updated = false;
2040 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2041 while (it != pending_rel_seqs->end()) {
2042 struct release_seq *pending = *it;
2043 ModelAction *acquire = pending->acquire;
2044 const ModelAction *read = pending->read;
2046 /* Only resolve sequences on the given location, if provided */
2047 if (location && read->get_location() != location) {
2052 const ModelAction *rf = read->get_reads_from();
2053 rel_heads_list_t release_heads;
2055 complete = release_seq_heads(rf, &release_heads, pending);
2056 for (unsigned int i = 0; i < release_heads.size(); i++) {
2057 if (!acquire->has_synchronized_with(release_heads[i])) {
2058 if (acquire->synchronize_with(release_heads[i]))
2061 set_bad_synchronization();
2066 /* Re-check all pending release sequences */
2067 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2068 /* Re-check read-acquire for mo_graph edges */
2069 if (acquire->is_read())
2070 work_queue->push_back(MOEdgeWorkEntry(acquire));
2072 /* propagate synchronization to later actions */
2073 action_list_t::reverse_iterator rit = action_trace->rbegin();
2074 for (; (*rit) != acquire; rit++) {
2075 ModelAction *propagate = *rit;
2076 if (acquire->happens_before(propagate)) {
2077 propagate->synchronize_with(acquire);
2078 /* Re-check 'propagate' for mo_graph edges */
2079 work_queue->push_back(MOEdgeWorkEntry(propagate));
2084 it = pending_rel_seqs->erase(it);
2085 snapshot_free(pending);
2091 // If we resolved promises or data races, see if we have realized a data race.
2098 * Performs various bookkeeping operations for the current ModelAction. For
2099 * instance, adds action to the per-object, per-thread action vector and to the
2100 * action trace list of all thread actions.
2102 * @param act is the ModelAction to add.
2104 void ModelChecker::add_action_to_lists(ModelAction *act)
2106 int tid = id_to_int(act->get_tid());
2107 ModelAction *uninit = NULL;
2109 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2110 if (list->empty() && act->is_atomic_var()) {
2111 uninit = new_uninitialized_action(act->get_location());
2112 uninit_id = id_to_int(uninit->get_tid());
2113 list->push_back(uninit);
2115 list->push_back(act);
2117 action_trace->push_back(act);
2119 action_trace->push_front(uninit);
2121 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2122 if (tid >= (int)vec->size())
2123 vec->resize(priv->next_thread_id);
2124 (*vec)[tid].push_back(act);
2126 (*vec)[uninit_id].push_front(uninit);
2128 if ((int)thrd_last_action->size() <= tid)
2129 thrd_last_action->resize(get_num_threads());
2130 (*thrd_last_action)[tid] = act;
2132 (*thrd_last_action)[uninit_id] = uninit;
2134 if (act->is_fence() && act->is_release()) {
2135 if ((int)thrd_last_fence_release->size() <= tid)
2136 thrd_last_fence_release->resize(get_num_threads());
2137 (*thrd_last_fence_release)[tid] = act;
2140 if (act->is_wait()) {
2141 void *mutex_loc = (void *) act->get_value();
2142 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2144 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2145 if (tid >= (int)vec->size())
2146 vec->resize(priv->next_thread_id);
2147 (*vec)[tid].push_back(act);
2152 * @brief Get the last action performed by a particular Thread
2153 * @param tid The thread ID of the Thread in question
2154 * @return The last action in the thread
2156 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2158 int threadid = id_to_int(tid);
2159 if (threadid < (int)thrd_last_action->size())
2160 return (*thrd_last_action)[id_to_int(tid)];
2166 * @brief Get the last fence release performed by a particular Thread
2167 * @param tid The thread ID of the Thread in question
2168 * @return The last fence release in the thread, if one exists; NULL otherwise
2170 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2172 int threadid = id_to_int(tid);
2173 if (threadid < (int)thrd_last_fence_release->size())
2174 return (*thrd_last_fence_release)[id_to_int(tid)];
2180 * Gets the last memory_order_seq_cst write (in the total global sequence)
2181 * performed on a particular object (i.e., memory location), not including the
2183 * @param curr The current ModelAction; also denotes the object location to
2185 * @return The last seq_cst write
2187 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2189 void *location = curr->get_location();
2190 action_list_t *list = get_safe_ptr_action(obj_map, location);
2191 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2192 action_list_t::reverse_iterator rit;
2193 for (rit = list->rbegin(); rit != list->rend(); rit++)
2194 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2200 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2201 * performed in a particular thread, prior to a particular fence.
2202 * @param tid The ID of the thread to check
2203 * @param before_fence The fence from which to begin the search; if NULL, then
2204 * search for the most recent fence in the thread.
2205 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2207 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2209 /* All fences should have NULL location */
2210 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2211 action_list_t::reverse_iterator rit = list->rbegin();
2214 for (; rit != list->rend(); rit++)
2215 if (*rit == before_fence)
2218 ASSERT(*rit == before_fence);
2222 for (; rit != list->rend(); rit++)
2223 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2229 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2230 * location). This function identifies the mutex according to the current
2231 * action, which is presumed to perform on the same mutex.
2232 * @param curr The current ModelAction; also denotes the object location to
2234 * @return The last unlock operation
2236 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2238 void *location = curr->get_location();
2239 action_list_t *list = get_safe_ptr_action(obj_map, location);
2240 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2241 action_list_t::reverse_iterator rit;
2242 for (rit = list->rbegin(); rit != list->rend(); rit++)
2243 if ((*rit)->is_unlock() || (*rit)->is_wait())
2248 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2250 ModelAction *parent = get_last_action(tid);
2252 parent = get_thread(tid)->get_creation();
2257 * Returns the clock vector for a given thread.
2258 * @param tid The thread whose clock vector we want
2259 * @return Desired clock vector
2261 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2263 return get_parent_action(tid)->get_cv();
2267 * Resolve a set of Promises with a current write. The set is provided in the
2268 * Node corresponding to @a write.
2269 * @param write The ModelAction that is fulfilling Promises
2270 * @return True if promises were resolved; false otherwise
2272 bool ModelChecker::resolve_promises(ModelAction *write)
2274 bool resolved = false;
2275 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2277 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2278 Promise *promise = (*promises)[promise_index];
2279 if (write->get_node()->get_promise(i)) {
2280 ModelAction *read = promise->get_action();
2281 if (read->is_rmw()) {
2282 mo_graph->addRMWEdge(write, read);
2284 read_from(read, write);
2285 //First fix up the modification order for actions that happened
2287 r_modification_order(read, write);
2288 //Next fix up the modification order for actions that happened
2290 post_r_modification_order(read, write);
2291 //Make sure the promise's value matches the write's value
2292 ASSERT(promise->get_value() == write->get_value());
2295 promises->erase(promises->begin() + promise_index);
2296 threads_to_check.push_back(read->get_tid());
2303 //Check whether reading these writes has made threads unable to
2306 for (unsigned int i = 0; i < threads_to_check.size(); i++)
2307 mo_check_promises(threads_to_check[i], write);
2313 * Compute the set of promises that could potentially be satisfied by this
2314 * action. Note that the set computation actually appears in the Node, not in
2316 * @param curr The ModelAction that may satisfy promises
2318 void ModelChecker::compute_promises(ModelAction *curr)
2320 for (unsigned int i = 0; i < promises->size(); i++) {
2321 Promise *promise = (*promises)[i];
2322 const ModelAction *act = promise->get_action();
2323 if (!act->happens_before(curr) &&
2325 !act->could_synchronize_with(curr) &&
2326 !act->same_thread(curr) &&
2327 act->get_location() == curr->get_location() &&
2328 promise->get_value() == curr->get_value()) {
2329 curr->get_node()->set_promise(i, act->is_rmw());
2334 /** Checks promises in response to change in ClockVector Threads. */
2335 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2337 for (unsigned int i = 0; i < promises->size(); i++) {
2338 Promise *promise = (*promises)[i];
2339 const ModelAction *act = promise->get_action();
2340 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2341 merge_cv->synchronized_since(act)) {
2342 if (promise->increment_threads(tid)) {
2343 //Promise has failed
2344 priv->failed_promise = true;
2351 void ModelChecker::check_promises_thread_disabled() {
2352 for (unsigned int i = 0; i < promises->size(); i++) {
2353 Promise *promise = (*promises)[i];
2354 if (promise->check_promise()) {
2355 priv->failed_promise = true;
2361 /** Checks promises in response to addition to modification order for threads.
2363 * pthread is the thread that performed the read that created the promise
2365 * pread is the read that created the promise
2367 * pwrite is either the first write to same location as pread by
2368 * pthread that is sequenced after pread or the value read by the
2369 * first read to the same lcoation as pread by pthread that is
2370 * sequenced after pread..
2372 * 1. If tid=pthread, then we check what other threads are reachable
2373 * through the mode order starting with pwrite. Those threads cannot
2374 * perform a write that will resolve the promise due to modification
2375 * order constraints.
2377 * 2. If the tid is not pthread, we check whether pwrite can reach the
2378 * action write through the modification order. If so, that thread
2379 * cannot perform a future write that will resolve the promise due to
2380 * modificatin order constraints.
2382 * @param tid The thread that either read from the model action
2383 * write, or actually did the model action write.
2385 * @param write The ModelAction representing the relevant write.
2387 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write)
2389 void *location = write->get_location();
2390 for (unsigned int i = 0; i < promises->size(); i++) {
2391 Promise *promise = (*promises)[i];
2392 const ModelAction *act = promise->get_action();
2394 //Is this promise on the same location?
2395 if ( act->get_location() != location )
2398 //same thread as the promise
2399 if ( act->get_tid() == tid ) {
2401 //do we have a pwrite for the promise, if not, set it
2402 if (promise->get_write() == NULL ) {
2403 promise->set_write(write);
2404 //The pwrite cannot happen before the promise
2405 if (write->happens_before(act) && (write != act)) {
2406 priv->failed_promise = true;
2410 if (mo_graph->checkPromise(write, promise)) {
2411 priv->failed_promise = true;
2416 //Don't do any lookups twice for the same thread
2417 if (promise->has_sync_thread(tid))
2420 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2421 if (promise->increment_threads(tid)) {
2422 priv->failed_promise = true;
2430 * Compute the set of writes that may break the current pending release
2431 * sequence. This information is extracted from previou release sequence
2434 * @param curr The current ModelAction. Must be a release sequence fixup
2437 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2439 if (pending_rel_seqs->empty())
2442 struct release_seq *pending = pending_rel_seqs->back();
2443 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2444 const ModelAction *write = pending->writes[i];
2445 curr->get_node()->add_relseq_break(write);
2448 /* NULL means don't break the sequence; just synchronize */
2449 curr->get_node()->add_relseq_break(NULL);
2453 * Build up an initial set of all past writes that this 'read' action may read
2454 * from. This set is determined by the clock vector's "happens before"
2456 * @param curr is the current ModelAction that we are exploring; it must be a
2459 void ModelChecker::build_reads_from_past(ModelAction *curr)
2461 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2463 ASSERT(curr->is_read());
2465 ModelAction *last_sc_write = NULL;
2467 if (curr->is_seqcst())
2468 last_sc_write = get_last_seq_cst_write(curr);
2470 /* Iterate over all threads */
2471 for (i = 0; i < thrd_lists->size(); i++) {
2472 /* Iterate over actions in thread, starting from most recent */
2473 action_list_t *list = &(*thrd_lists)[i];
2474 action_list_t::reverse_iterator rit;
2475 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2476 ModelAction *act = *rit;
2478 /* Only consider 'write' actions */
2479 if (!act->is_write() || act == curr)
2482 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2483 bool allow_read = true;
2485 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2487 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2491 DEBUG("Adding action to may_read_from:\n");
2492 if (DBG_ENABLED()) {
2496 curr->get_node()->add_read_from(act);
2499 /* Include at most one act per-thread that "happens before" curr */
2500 if (act->happens_before(curr))
2505 if (DBG_ENABLED()) {
2506 model_print("Reached read action:\n");
2508 model_print("Printing may_read_from\n");
2509 curr->get_node()->print_may_read_from();
2510 model_print("End printing may_read_from\n");
2514 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2517 /* UNINIT actions don't have a Node, and they never sleep */
2518 if (write->is_uninitialized())
2520 Node *prevnode = write->get_node()->get_parent();
2522 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2523 if (write->is_release() && thread_sleep)
2525 if (!write->is_rmw()) {
2528 if (write->get_reads_from() == NULL)
2530 write = write->get_reads_from();
2535 * @brief Create a new action representing an uninitialized atomic
2536 * @param location The memory location of the atomic object
2537 * @return A pointer to a new ModelAction
2539 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2541 ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2542 act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2543 act->create_cv(NULL);
2547 static void print_list(action_list_t *list, int exec_num = -1)
2549 action_list_t::iterator it;
2551 model_print("---------------------------------------------------------------------\n");
2553 model_print("Execution %d:\n", exec_num);
2555 unsigned int hash = 0;
2557 for (it = list->begin(); it != list->end(); it++) {
2559 hash = hash^(hash<<3)^((*it)->hash());
2561 model_print("HASH %u\n", hash);
2562 model_print("---------------------------------------------------------------------\n");
2565 #if SUPPORT_MOD_ORDER_DUMP
2566 void ModelChecker::dumpGraph(char *filename) const
2569 sprintf(buffer, "%s.dot",filename);
2570 FILE *file = fopen(buffer, "w");
2571 fprintf(file, "digraph %s {\n",filename);
2572 mo_graph->dumpNodes(file);
2573 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2575 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2576 ModelAction *action = *it;
2577 if (action->is_read()) {
2578 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2579 if (action->get_reads_from() != NULL)
2580 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2582 if (thread_array[action->get_tid()] != NULL) {
2583 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2586 thread_array[action->get_tid()] = action;
2588 fprintf(file,"}\n");
2589 model_free(thread_array);
2594 /** @brief Prints an execution trace summary. */
2595 void ModelChecker::print_summary() const
2597 #if SUPPORT_MOD_ORDER_DUMP
2599 char buffername[100];
2600 sprintf(buffername, "exec%04u", stats.num_total);
2601 mo_graph->dumpGraphToFile(buffername);
2602 sprintf(buffername, "graph%04u", stats.num_total);
2603 dumpGraph(buffername);
2606 if (!isfeasibleprefix())
2607 model_print("INFEASIBLE EXECUTION!\n");
2608 print_list(action_trace, stats.num_total);
2613 * Add a Thread to the system for the first time. Should only be called once
2615 * @param t The Thread to add
2617 void ModelChecker::add_thread(Thread *t)
2619 thread_map->put(id_to_int(t->get_id()), t);
2620 scheduler->add_thread(t);
2624 * Removes a thread from the scheduler.
2625 * @param the thread to remove.
2627 void ModelChecker::remove_thread(Thread *t)
2629 scheduler->remove_thread(t);
2633 * @brief Get a Thread reference by its ID
2634 * @param tid The Thread's ID
2635 * @return A Thread reference
2637 Thread * ModelChecker::get_thread(thread_id_t tid) const
2639 return thread_map->get(id_to_int(tid));
2643 * @brief Get a reference to the Thread in which a ModelAction was executed
2644 * @param act The ModelAction
2645 * @return A Thread reference
2647 Thread * ModelChecker::get_thread(ModelAction *act) const
2649 return get_thread(act->get_tid());
2653 * @brief Check if a Thread is currently enabled
2654 * @param t The Thread to check
2655 * @return True if the Thread is currently enabled
2657 bool ModelChecker::is_enabled(Thread *t) const
2659 return scheduler->is_enabled(t);
2663 * @brief Check if a Thread is currently enabled
2664 * @param tid The ID of the Thread to check
2665 * @return True if the Thread is currently enabled
2667 bool ModelChecker::is_enabled(thread_id_t tid) const
2669 return scheduler->is_enabled(tid);
2673 * Switch from a user-context to the "master thread" context (a.k.a. system
2674 * context). This switch is made with the intention of exploring a particular
2675 * model-checking action (described by a ModelAction object). Must be called
2676 * from a user-thread context.
2678 * @param act The current action that will be explored. May be NULL only if
2679 * trace is exiting via an assertion (see ModelChecker::set_assert and
2680 * ModelChecker::has_asserted).
2681 * @return Return the value returned by the current action
2683 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2686 Thread *old = thread_current();
2687 set_current_action(act);
2688 old->set_state(THREAD_READY);
2689 if (Thread::swap(old, &system_context) < 0) {
2690 perror("swap threads");
2693 return old->get_return_value();
2697 * Takes the next step in the execution, if possible.
2698 * @param curr The current step to take
2699 * @return Returns true (success) if a step was taken and false otherwise.
2701 bool ModelChecker::take_step(ModelAction *curr)
2706 Thread *curr_thrd = get_thread(curr);
2707 ASSERT(curr_thrd->get_state() == THREAD_READY);
2709 Thread *next_thrd = check_current_action(curr);
2711 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2712 scheduler->remove_thread(curr_thrd);
2714 next_thrd = scheduler->next_thread(next_thrd);
2716 /* Infeasible -> don't take any more steps */
2717 if (is_infeasible())
2719 else if (isfeasibleprefix() && have_bug_reports()) {
2724 if (params.bound != 0) {
2725 if (priv->used_sequence_numbers > params.bound) {
2730 DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2731 next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2734 * Launch end-of-execution release sequence fixups only when there are:
2736 * (1) no more user threads to run (or when execution replay chooses
2737 * the 'model_thread')
2738 * (2) pending release sequences
2739 * (3) pending assertions (i.e., data races)
2740 * (4) no pending promises
2742 if (!pending_rel_seqs->empty() && (!next_thrd || next_thrd->is_model_thread()) &&
2743 is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2744 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2745 pending_rel_seqs->size());
2746 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2747 std::memory_order_seq_cst, NULL, VALUE_NONE,
2749 set_current_action(fixup);
2753 /* next_thrd == NULL -> don't take any more steps */
2757 next_thrd->set_state(THREAD_RUNNING);
2759 if (next_thrd->get_pending() != NULL) {
2760 /* restart a pending action */
2761 set_current_action(next_thrd->get_pending());
2762 next_thrd->set_pending(NULL);
2763 next_thrd->set_state(THREAD_READY);
2767 /* Return false only if swap fails with an error */
2768 return (Thread::swap(&system_context, next_thrd) == 0);
2771 /** Wrapper to run the user's main function, with appropriate arguments */
2772 void user_main_wrapper(void *)
2774 user_main(model->params.argc, model->params.argv);
2777 /** @brief Run ModelChecker for the user program */
2778 void ModelChecker::run()
2782 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2786 /* Run user thread up to its first action */
2787 scheduler->next_thread(t);
2788 Thread::swap(&system_context, t);
2790 /* Wait for all threads to complete */
2791 while (take_step(priv->current_action));
2792 } while (next_execution());