10 #include "snapshot-interface.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
16 #include "threads-model.h"
19 #define INITIAL_THREAD_ID 0
24 bug_message(const char *str) {
25 const char *fmt = " [BUG] %s\n";
26 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27 sprintf(msg, fmt, str);
29 ~bug_message() { if (msg) snapshot_free(msg); }
32 void print() { model_print("%s", msg); }
38 * Structure for holding small ModelChecker members that should be snapshotted
40 struct model_snapshot_members {
41 model_snapshot_members() :
43 /* First thread created will have id INITIAL_THREAD_ID */
44 next_thread_id(INITIAL_THREAD_ID),
45 used_sequence_numbers(0),
50 failed_promise(false),
51 too_many_reads(false),
52 bad_synchronization(false),
56 ~model_snapshot_members() {
57 for (unsigned int i = 0; i < bugs.size(); i++)
62 ModelAction *current_action;
63 unsigned int next_thread_id;
64 modelclock_t used_sequence_numbers;
66 ModelAction *next_backtrack;
67 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
68 struct execution_stats stats;
71 /** @brief Incorrectly-ordered synchronization was made */
72 bool bad_synchronization;
78 /** @brief Constructor */
79 ModelChecker::ModelChecker(struct model_params params) :
80 /* Initialize default scheduler */
82 scheduler(new Scheduler()),
84 earliest_diverge(NULL),
85 action_trace(new action_list_t()),
86 thread_map(new HashTable<int, Thread *, int>()),
87 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
89 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
90 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
91 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
92 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
93 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
94 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
95 thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
96 node_stack(new NodeStack()),
97 priv(new struct model_snapshot_members()),
98 mo_graph(new CycleGraph())
100 /* Initialize a model-checker thread, for special ModelActions */
101 model_thread = new Thread(get_next_id());
102 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
105 /** @brief Destructor */
106 ModelChecker::~ModelChecker()
108 for (unsigned int i = 0; i < get_num_threads(); i++)
109 delete thread_map->get(i);
114 delete lock_waiters_map;
115 delete condvar_waiters_map;
118 for (unsigned int i = 0; i < promises->size(); i++)
119 delete (*promises)[i];
122 delete pending_rel_seqs;
124 delete thrd_last_action;
125 delete thrd_last_fence_release;
132 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
133 action_list_t * tmp=hash->get(ptr);
135 tmp=new action_list_t();
141 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
142 std::vector<action_list_t> * tmp=hash->get(ptr);
144 tmp=new std::vector<action_list_t>();
151 * Restores user program to initial state and resets all model-checker data
154 void ModelChecker::reset_to_initial_state()
156 DEBUG("+++ Resetting to initial state +++\n");
157 node_stack->reset_execution();
159 /* Print all model-checker output before rollback */
162 snapshotObject->backTrackBeforeStep(0);
165 /** @return a thread ID for a new Thread */
166 thread_id_t ModelChecker::get_next_id()
168 return priv->next_thread_id++;
171 /** @return the number of user threads created during this execution */
172 unsigned int ModelChecker::get_num_threads() const
174 return priv->next_thread_id;
177 /** @return The currently executing Thread. */
178 Thread * ModelChecker::get_current_thread() const
180 return scheduler->get_current_thread();
183 /** @return a sequence number for a new ModelAction */
184 modelclock_t ModelChecker::get_next_seq_num()
186 return ++priv->used_sequence_numbers;
189 Node * ModelChecker::get_curr_node() const
191 return node_stack->get_head();
195 * @brief Choose the next thread to execute.
197 * This function chooses the next thread that should execute. It can force the
198 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
199 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
200 * The model-checker may have no preference regarding the next thread (i.e.,
201 * when exploring a new execution ordering), in which case this will return
203 * @param curr The current ModelAction. This action might guide the choice of
205 * @return The next thread to run. If the model-checker has no preference, NULL.
207 Thread * ModelChecker::get_next_thread(ModelAction *curr)
212 /* Do not split atomic actions. */
214 return thread_current();
215 /* The THREAD_CREATE action points to the created Thread */
216 else if (curr->get_type() == THREAD_CREATE)
217 return (Thread *)curr->get_location();
220 /* Have we completed exploring the preselected path? */
224 /* Else, we are trying to replay an execution */
225 ModelAction *next = node_stack->get_next()->get_action();
227 if (next == diverge) {
228 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
229 earliest_diverge=diverge;
231 Node *nextnode = next->get_node();
232 Node *prevnode = nextnode->get_parent();
233 scheduler->update_sleep_set(prevnode);
235 /* Reached divergence point */
236 if (nextnode->increment_misc()) {
237 /* The next node will try to satisfy a different misc_index values. */
238 tid = next->get_tid();
239 node_stack->pop_restofstack(2);
240 } else if (nextnode->increment_promise()) {
241 /* The next node will try to satisfy a different set of promises. */
242 tid = next->get_tid();
243 node_stack->pop_restofstack(2);
244 } else if (nextnode->increment_read_from()) {
245 /* The next node will read from a different value. */
246 tid = next->get_tid();
247 node_stack->pop_restofstack(2);
248 } else if (nextnode->increment_future_value()) {
249 /* The next node will try to read from a different future value. */
250 tid = next->get_tid();
251 node_stack->pop_restofstack(2);
252 } else if (nextnode->increment_relseq_break()) {
253 /* The next node will try to resolve a release sequence differently */
254 tid = next->get_tid();
255 node_stack->pop_restofstack(2);
257 /* Make a different thread execute for next step */
258 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
259 tid = prevnode->get_next_backtrack();
260 /* Make sure the backtracked thread isn't sleeping. */
261 node_stack->pop_restofstack(1);
262 if (diverge==earliest_diverge) {
263 earliest_diverge=prevnode->get_action();
266 /* The correct sleep set is in the parent node. */
269 DEBUG("*** Divergence point ***\n");
273 tid = next->get_tid();
275 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
276 ASSERT(tid != THREAD_ID_T_NONE);
277 return thread_map->get(id_to_int(tid));
281 * We need to know what the next actions of all threads in the sleep
282 * set will be. This method computes them and stores the actions at
283 * the corresponding thread object's pending action.
286 void ModelChecker::execute_sleep_set() {
287 for(unsigned int i=0;i<get_num_threads();i++) {
288 thread_id_t tid=int_to_id(i);
289 Thread *thr=get_thread(tid);
290 if (scheduler->is_sleep_set(thr) && thr->get_pending() == NULL) {
291 thr->set_state(THREAD_RUNNING);
292 scheduler->next_thread(thr);
293 Thread::swap(&system_context, thr);
294 priv->current_action->set_sleep_flag();
295 thr->set_pending(priv->current_action);
298 priv->current_action = NULL;
301 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
303 for (unsigned int i = 0; i < get_num_threads(); i++) {
304 Thread *thr = get_thread(int_to_id(i));
305 if (scheduler->is_sleep_set(thr)) {
306 ModelAction *pending_act = thr->get_pending();
307 if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
308 //Remove this thread from sleep set
309 scheduler->remove_sleep(thr);
314 /** @brief Alert the model-checker that an incorrectly-ordered
315 * synchronization was made */
316 void ModelChecker::set_bad_synchronization()
318 priv->bad_synchronization = true;
321 bool ModelChecker::has_asserted() const
323 return priv->asserted;
326 void ModelChecker::set_assert()
328 priv->asserted = true;
332 * Check if we are in a deadlock. Should only be called at the end of an
333 * execution, although it should not give false positives in the middle of an
334 * execution (there should be some ENABLED thread).
336 * @return True if program is in a deadlock; false otherwise
338 bool ModelChecker::is_deadlocked() const
340 bool blocking_threads = false;
341 for (unsigned int i = 0; i < get_num_threads(); i++) {
342 thread_id_t tid = int_to_id(i);
345 Thread *t = get_thread(tid);
346 if (!t->is_model_thread() && t->get_pending())
347 blocking_threads = true;
349 return blocking_threads;
353 * Check if this is a complete execution. That is, have all thread completed
354 * execution (rather than exiting because sleep sets have forced a redundant
357 * @return True if the execution is complete.
359 bool ModelChecker::is_complete_execution() const
361 for (unsigned int i = 0; i < get_num_threads(); i++)
362 if (is_enabled(int_to_id(i)))
368 * @brief Assert a bug in the executing program.
370 * Use this function to assert any sort of bug in the user program. If the
371 * current trace is feasible (actually, a prefix of some feasible execution),
372 * then this execution will be aborted, printing the appropriate message. If
373 * the current trace is not yet feasible, the error message will be stashed and
374 * printed if the execution ever becomes feasible.
376 * @param msg Descriptive message for the bug (do not include newline char)
377 * @return True if bug is immediately-feasible
379 bool ModelChecker::assert_bug(const char *msg)
381 priv->bugs.push_back(new bug_message(msg));
383 if (isfeasibleprefix()) {
391 * @brief Assert a bug in the executing program, asserted by a user thread
392 * @see ModelChecker::assert_bug
393 * @param msg Descriptive message for the bug (do not include newline char)
395 void ModelChecker::assert_user_bug(const char *msg)
397 /* If feasible bug, bail out now */
399 switch_to_master(NULL);
402 /** @return True, if any bugs have been reported for this execution */
403 bool ModelChecker::have_bug_reports() const
405 return priv->bugs.size() != 0;
408 /** @brief Print bug report listing for this execution (if any bugs exist) */
409 void ModelChecker::print_bugs() const
411 if (have_bug_reports()) {
412 model_print("Bug report: %zu bug%s detected\n",
414 priv->bugs.size() > 1 ? "s" : "");
415 for (unsigned int i = 0; i < priv->bugs.size(); i++)
416 priv->bugs[i]->print();
421 * @brief Record end-of-execution stats
423 * Must be run when exiting an execution. Records various stats.
424 * @see struct execution_stats
426 void ModelChecker::record_stats()
429 if (!isfeasibleprefix())
430 stats.num_infeasible++;
431 else if (have_bug_reports())
432 stats.num_buggy_executions++;
433 else if (is_complete_execution())
434 stats.num_complete++;
436 stats.num_redundant++;
439 /** @brief Print execution stats */
440 void ModelChecker::print_stats() const
442 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
443 model_print("Number of redundant executions: %d\n", stats.num_redundant);
444 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
445 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
446 model_print("Total executions: %d\n", stats.num_total);
447 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
451 * @brief End-of-exeuction print
452 * @param printbugs Should any existing bugs be printed?
454 void ModelChecker::print_execution(bool printbugs) const
456 print_program_output();
458 if (DBG_ENABLED() || params.verbose) {
459 model_print("Earliest divergence point since last feasible execution:\n");
460 if (earliest_diverge)
461 earliest_diverge->print();
463 model_print("(Not set)\n");
469 /* Don't print invalid bugs */
478 * Queries the model-checker for more executions to explore and, if one
479 * exists, resets the model-checker state to execute a new execution.
481 * @return If there are more executions to explore, return true. Otherwise,
484 bool ModelChecker::next_execution()
487 /* Is this execution a feasible execution that's worth bug-checking? */
488 bool complete = isfeasibleprefix() && (is_complete_execution() ||
491 /* End-of-execution bug checks */
494 assert_bug("Deadlock detected");
502 if (DBG_ENABLED() || params.verbose || have_bug_reports())
503 print_execution(complete);
505 clear_program_output();
508 earliest_diverge = NULL;
510 if ((diverge = get_next_backtrack()) == NULL)
514 model_print("Next execution will diverge at:\n");
518 reset_to_initial_state();
522 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
524 switch (act->get_type()) {
529 /* Optimization: relaxed operations don't need backtracking */
530 if (act->is_relaxed())
532 /* linear search: from most recent to oldest */
533 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
534 action_list_t::reverse_iterator rit;
535 for (rit = list->rbegin(); rit != list->rend(); rit++) {
536 ModelAction *prev = *rit;
537 if (prev->could_synchronize_with(act))
543 case ATOMIC_TRYLOCK: {
544 /* linear search: from most recent to oldest */
545 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
546 action_list_t::reverse_iterator rit;
547 for (rit = list->rbegin(); rit != list->rend(); rit++) {
548 ModelAction *prev = *rit;
549 if (act->is_conflicting_lock(prev))
554 case ATOMIC_UNLOCK: {
555 /* linear search: from most recent to oldest */
556 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
557 action_list_t::reverse_iterator rit;
558 for (rit = list->rbegin(); rit != list->rend(); rit++) {
559 ModelAction *prev = *rit;
560 if (!act->same_thread(prev)&&prev->is_failed_trylock())
566 /* linear search: from most recent to oldest */
567 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
568 action_list_t::reverse_iterator rit;
569 for (rit = list->rbegin(); rit != list->rend(); rit++) {
570 ModelAction *prev = *rit;
571 if (!act->same_thread(prev)&&prev->is_failed_trylock())
573 if (!act->same_thread(prev)&&prev->is_notify())
579 case ATOMIC_NOTIFY_ALL:
580 case ATOMIC_NOTIFY_ONE: {
581 /* linear search: from most recent to oldest */
582 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
583 action_list_t::reverse_iterator rit;
584 for (rit = list->rbegin(); rit != list->rend(); rit++) {
585 ModelAction *prev = *rit;
586 if (!act->same_thread(prev)&&prev->is_wait())
597 /** This method finds backtracking points where we should try to
598 * reorder the parameter ModelAction against.
600 * @param the ModelAction to find backtracking points for.
602 void ModelChecker::set_backtracking(ModelAction *act)
604 Thread *t = get_thread(act);
605 ModelAction * prev = get_last_conflict(act);
609 Node * node = prev->get_node()->get_parent();
611 int low_tid, high_tid;
612 if (node->is_enabled(t)) {
613 low_tid = id_to_int(act->get_tid());
614 high_tid = low_tid+1;
617 high_tid = get_num_threads();
620 for(int i = low_tid; i < high_tid; i++) {
621 thread_id_t tid = int_to_id(i);
623 /* Make sure this thread can be enabled here. */
624 if (i >= node->get_num_threads())
627 /* Don't backtrack into a point where the thread is disabled or sleeping. */
628 if (node->enabled_status(tid)!=THREAD_ENABLED)
631 /* Check if this has been explored already */
632 if (node->has_been_explored(tid))
635 /* See if fairness allows */
636 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
638 for(int t=0;t<node->get_num_threads();t++) {
639 thread_id_t tother=int_to_id(t);
640 if (node->is_enabled(tother) && node->has_priority(tother)) {
648 /* Cache the latest backtracking point */
649 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
650 priv->next_backtrack = prev;
652 /* If this is a new backtracking point, mark the tree */
653 if (!node->set_backtrack(tid))
655 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
656 id_to_int(prev->get_tid()),
657 id_to_int(t->get_id()));
666 * Returns last backtracking point. The model checker will explore a different
667 * path for this point in the next execution.
668 * @return The ModelAction at which the next execution should diverge.
670 ModelAction * ModelChecker::get_next_backtrack()
672 ModelAction *next = priv->next_backtrack;
673 priv->next_backtrack = NULL;
678 * Processes a read or rmw model action.
679 * @param curr is the read model action to process.
680 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
681 * @return True if processing this read updates the mo_graph.
683 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
685 uint64_t value = VALUE_NONE;
686 bool updated = false;
688 const ModelAction *reads_from = curr->get_node()->get_read_from();
689 if (reads_from != NULL) {
690 mo_graph->startChanges();
692 value = reads_from->get_value();
693 bool r_status = false;
695 if (!second_part_of_rmw) {
696 check_recency(curr, reads_from);
697 r_status = r_modification_order(curr, reads_from);
701 if (!second_part_of_rmw&&is_infeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
702 mo_graph->rollbackChanges();
703 priv->too_many_reads = false;
707 read_from(curr, reads_from);
708 mo_graph->commitChanges();
709 mo_check_promises(curr->get_tid(), reads_from);
712 } else if (!second_part_of_rmw) {
713 /* Read from future value */
714 value = curr->get_node()->get_future_value();
715 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
716 curr->set_read_from(NULL);
717 Promise *valuepromise = new Promise(curr, value, expiration);
718 promises->push_back(valuepromise);
720 get_thread(curr)->set_return_value(value);
726 * Processes a lock, trylock, or unlock model action. @param curr is
727 * the read model action to process.
729 * The try lock operation checks whether the lock is taken. If not,
730 * it falls to the normal lock operation case. If so, it returns
733 * The lock operation has already been checked that it is enabled, so
734 * it just grabs the lock and synchronizes with the previous unlock.
736 * The unlock operation has to re-enable all of the threads that are
737 * waiting on the lock.
739 * @return True if synchronization was updated; false otherwise
741 bool ModelChecker::process_mutex(ModelAction *curr) {
742 std::mutex *mutex=NULL;
743 struct std::mutex_state *state=NULL;
745 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
746 mutex = (std::mutex *)curr->get_location();
747 state = mutex->get_state();
748 } else if(curr->is_wait()) {
749 mutex = (std::mutex *)curr->get_value();
750 state = mutex->get_state();
753 switch (curr->get_type()) {
754 case ATOMIC_TRYLOCK: {
755 bool success = !state->islocked;
756 curr->set_try_lock(success);
758 get_thread(curr)->set_return_value(0);
761 get_thread(curr)->set_return_value(1);
763 //otherwise fall into the lock case
765 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
766 assert_bug("Lock access before initialization");
767 state->islocked = true;
768 ModelAction *unlock = get_last_unlock(curr);
769 //synchronize with the previous unlock statement
770 if (unlock != NULL) {
771 curr->synchronize_with(unlock);
776 case ATOMIC_UNLOCK: {
778 state->islocked = false;
779 //wake up the other threads
780 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
781 //activate all the waiting threads
782 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
783 scheduler->wake(get_thread(*rit));
790 state->islocked = false;
791 //wake up the other threads
792 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
793 //activate all the waiting threads
794 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
795 scheduler->wake(get_thread(*rit));
798 //check whether we should go to sleep or not...simulate spurious failures
799 if (curr->get_node()->get_misc()==0) {
800 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
802 scheduler->sleep(get_current_thread());
806 case ATOMIC_NOTIFY_ALL: {
807 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
808 //activate all the waiting threads
809 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
810 scheduler->wake(get_thread(*rit));
815 case ATOMIC_NOTIFY_ONE: {
816 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
817 int wakeupthread=curr->get_node()->get_misc();
818 action_list_t::iterator it = waiters->begin();
819 advance(it, wakeupthread);
820 scheduler->wake(get_thread(*it));
832 * Process a write ModelAction
833 * @param curr The ModelAction to process
834 * @return True if the mo_graph was updated or promises were resolved
836 bool ModelChecker::process_write(ModelAction *curr)
838 bool updated_mod_order = w_modification_order(curr);
839 bool updated_promises = resolve_promises(curr);
841 if (promises->size() == 0) {
842 for (unsigned int i = 0; i < futurevalues->size(); i++) {
843 struct PendingFutureValue pfv = (*futurevalues)[i];
844 //Do more ambitious checks now that mo is more complete
845 if (mo_may_allow(pfv.writer, pfv.act)&&
846 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
847 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
848 priv->next_backtrack = pfv.act;
850 futurevalues->resize(0);
853 mo_graph->commitChanges();
854 mo_check_promises(curr->get_tid(), curr);
856 get_thread(curr)->set_return_value(VALUE_NONE);
857 return updated_mod_order || updated_promises;
861 * Process a fence ModelAction
862 * @param curr The ModelAction to process
863 * @return True if synchronization was updated
865 bool ModelChecker::process_fence(ModelAction *curr)
868 * fence-relaxed: no-op
869 * fence-release: only log the occurence (not in this function), for
870 * use in later synchronization
871 * fence-acquire (this function): search for hypothetical release
874 bool updated = false;
875 if (curr->is_acquire()) {
876 action_list_t *list = action_trace;
877 action_list_t::reverse_iterator rit;
878 /* Find X : is_read(X) && X --sb-> curr */
879 for (rit = list->rbegin(); rit != list->rend(); rit++) {
880 ModelAction *act = *rit;
883 if (act->get_tid() != curr->get_tid())
885 /* Stop at the beginning of the thread */
886 if (act->is_thread_start())
888 /* Stop once we reach a prior fence-acquire */
889 if (act->is_fence() && act->is_acquire())
893 /* read-acquire will find its own release sequences */
894 if (act->is_acquire())
897 /* Establish hypothetical release sequences */
898 rel_heads_list_t release_heads;
899 get_release_seq_heads(curr, act, &release_heads);
900 for (unsigned int i = 0; i < release_heads.size(); i++)
901 if (!curr->synchronize_with(release_heads[i]))
902 set_bad_synchronization();
903 if (release_heads.size() != 0)
911 * @brief Process the current action for thread-related activity
913 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
914 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
915 * synchronization, etc. This function is a no-op for non-THREAD actions
916 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
918 * @param curr The current action
919 * @return True if synchronization was updated or a thread completed
921 bool ModelChecker::process_thread_action(ModelAction *curr)
923 bool updated = false;
925 switch (curr->get_type()) {
926 case THREAD_CREATE: {
927 Thread *th = (Thread *)curr->get_location();
928 th->set_creation(curr);
932 Thread *blocking = (Thread *)curr->get_location();
933 ModelAction *act = get_last_action(blocking->get_id());
934 curr->synchronize_with(act);
935 updated = true; /* trigger rel-seq checks */
938 case THREAD_FINISH: {
939 Thread *th = get_thread(curr);
940 while (!th->wait_list_empty()) {
941 ModelAction *act = th->pop_wait_list();
942 scheduler->wake(get_thread(act));
945 updated = true; /* trigger rel-seq checks */
949 check_promises(curr->get_tid(), NULL, curr->get_cv());
960 * @brief Process the current action for release sequence fixup activity
962 * Performs model-checker release sequence fixups for the current action,
963 * forcing a single pending release sequence to break (with a given, potential
964 * "loose" write) or to complete (i.e., synchronize). If a pending release
965 * sequence forms a complete release sequence, then we must perform the fixup
966 * synchronization, mo_graph additions, etc.
968 * @param curr The current action; must be a release sequence fixup action
969 * @param work_queue The work queue to which to add work items as they are
972 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
974 const ModelAction *write = curr->get_node()->get_relseq_break();
975 struct release_seq *sequence = pending_rel_seqs->back();
976 pending_rel_seqs->pop_back();
978 ModelAction *acquire = sequence->acquire;
979 const ModelAction *rf = sequence->rf;
980 const ModelAction *release = sequence->release;
984 ASSERT(release->same_thread(rf));
988 * @todo Forcing a synchronization requires that we set
989 * modification order constraints. For instance, we can't allow
990 * a fixup sequence in which two separate read-acquire
991 * operations read from the same sequence, where the first one
992 * synchronizes and the other doesn't. Essentially, we can't
993 * allow any writes to insert themselves between 'release' and
997 /* Must synchronize */
998 if (!acquire->synchronize_with(release)) {
999 set_bad_synchronization();
1002 /* Re-check all pending release sequences */
1003 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1004 /* Re-check act for mo_graph edges */
1005 work_queue->push_back(MOEdgeWorkEntry(acquire));
1007 /* propagate synchronization to later actions */
1008 action_list_t::reverse_iterator rit = action_trace->rbegin();
1009 for (; (*rit) != acquire; rit++) {
1010 ModelAction *propagate = *rit;
1011 if (acquire->happens_before(propagate)) {
1012 propagate->synchronize_with(acquire);
1013 /* Re-check 'propagate' for mo_graph edges */
1014 work_queue->push_back(MOEdgeWorkEntry(propagate));
1018 /* Break release sequence with new edges:
1019 * release --mo--> write --mo--> rf */
1020 mo_graph->addEdge(release, write);
1021 mo_graph->addEdge(write, rf);
1024 /* See if we have realized a data race */
1029 * Initialize the current action by performing one or more of the following
1030 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1031 * in the NodeStack, manipulating backtracking sets, allocating and
1032 * initializing clock vectors, and computing the promises to fulfill.
1034 * @param curr The current action, as passed from the user context; may be
1035 * freed/invalidated after the execution of this function, with a different
1036 * action "returned" its place (pass-by-reference)
1037 * @return True if curr is a newly-explored action; false otherwise
1039 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1041 ModelAction *newcurr;
1043 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1044 newcurr = process_rmw(*curr);
1047 if (newcurr->is_rmw())
1048 compute_promises(newcurr);
1054 (*curr)->set_seq_number(get_next_seq_num());
1056 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1058 /* First restore type and order in case of RMW operation */
1059 if ((*curr)->is_rmwr())
1060 newcurr->copy_typeandorder(*curr);
1062 ASSERT((*curr)->get_location() == newcurr->get_location());
1063 newcurr->copy_from_new(*curr);
1065 /* Discard duplicate ModelAction; use action from NodeStack */
1068 /* Always compute new clock vector */
1069 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1072 return false; /* Action was explored previously */
1076 /* Always compute new clock vector */
1077 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1079 /* Assign most recent release fence */
1080 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1083 * Perform one-time actions when pushing new ModelAction onto
1086 if (newcurr->is_write())
1087 compute_promises(newcurr);
1088 else if (newcurr->is_relseq_fixup())
1089 compute_relseq_breakwrites(newcurr);
1090 else if (newcurr->is_wait())
1091 newcurr->get_node()->set_misc_max(2);
1092 else if (newcurr->is_notify_one()) {
1093 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1095 return true; /* This was a new ModelAction */
1100 * @brief Establish reads-from relation between two actions
1102 * Perform basic operations involved with establishing a concrete rf relation,
1103 * including setting the ModelAction data and checking for release sequences.
1105 * @param act The action that is reading (must be a read)
1106 * @param rf The action from which we are reading (must be a write)
1108 * @return True if this read established synchronization
1110 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1112 act->set_read_from(rf);
1113 if (rf != NULL && act->is_acquire()) {
1114 rel_heads_list_t release_heads;
1115 get_release_seq_heads(act, act, &release_heads);
1116 int num_heads = release_heads.size();
1117 for (unsigned int i = 0; i < release_heads.size(); i++)
1118 if (!act->synchronize_with(release_heads[i])) {
1119 set_bad_synchronization();
1122 return num_heads > 0;
1128 * @brief Check whether a model action is enabled.
1130 * Checks whether a lock or join operation would be successful (i.e., is the
1131 * lock already locked, or is the joined thread already complete). If not, put
1132 * the action in a waiter list.
1134 * @param curr is the ModelAction to check whether it is enabled.
1135 * @return a bool that indicates whether the action is enabled.
1137 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1138 if (curr->is_lock()) {
1139 std::mutex * lock = (std::mutex *)curr->get_location();
1140 struct std::mutex_state * state = lock->get_state();
1141 if (state->islocked) {
1142 //Stick the action in the appropriate waiting queue
1143 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1146 } else if (curr->get_type() == THREAD_JOIN) {
1147 Thread *blocking = (Thread *)curr->get_location();
1148 if (!blocking->is_complete()) {
1149 blocking->push_wait_list(curr);
1158 * Stores the ModelAction for the current thread action. Call this
1159 * immediately before switching from user- to system-context to pass
1160 * data between them.
1161 * @param act The ModelAction created by the user-thread action
1163 void ModelChecker::set_current_action(ModelAction *act) {
1164 priv->current_action = act;
1168 * This is the heart of the model checker routine. It performs model-checking
1169 * actions corresponding to a given "current action." Among other processes, it
1170 * calculates reads-from relationships, updates synchronization clock vectors,
1171 * forms a memory_order constraints graph, and handles replay/backtrack
1172 * execution when running permutations of previously-observed executions.
1174 * @param curr The current action to process
1175 * @return The next Thread that must be executed. May be NULL if ModelChecker
1176 * makes no choice (e.g., according to replay execution, combining RMW actions,
1179 Thread * ModelChecker::check_current_action(ModelAction *curr)
1182 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1184 if (!check_action_enabled(curr)) {
1185 /* Make the execution look like we chose to run this action
1186 * much later, when a lock/join can succeed */
1187 get_current_thread()->set_pending(curr);
1188 scheduler->sleep(get_current_thread());
1189 return get_next_thread(NULL);
1192 bool newly_explored = initialize_curr_action(&curr);
1194 wake_up_sleeping_actions(curr);
1196 /* Add the action to lists before any other model-checking tasks */
1197 if (!second_part_of_rmw)
1198 add_action_to_lists(curr);
1200 /* Build may_read_from set for newly-created actions */
1201 if (newly_explored && curr->is_read())
1202 build_reads_from_past(curr);
1204 /* Initialize work_queue with the "current action" work */
1205 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1206 while (!work_queue.empty() && !has_asserted()) {
1207 WorkQueueEntry work = work_queue.front();
1208 work_queue.pop_front();
1210 switch (work.type) {
1211 case WORK_CHECK_CURR_ACTION: {
1212 ModelAction *act = work.action;
1213 bool update = false; /* update this location's release seq's */
1214 bool update_all = false; /* update all release seq's */
1216 if (process_thread_action(curr))
1219 if (act->is_read() && process_read(act, second_part_of_rmw))
1222 if (act->is_write() && process_write(act))
1225 if (act->is_fence() && process_fence(act))
1228 if (act->is_mutex_op() && process_mutex(act))
1231 if (act->is_relseq_fixup())
1232 process_relseq_fixup(curr, &work_queue);
1235 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1237 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1240 case WORK_CHECK_RELEASE_SEQ:
1241 resolve_release_sequences(work.location, &work_queue);
1243 case WORK_CHECK_MO_EDGES: {
1244 /** @todo Complete verification of work_queue */
1245 ModelAction *act = work.action;
1246 bool updated = false;
1248 if (act->is_read()) {
1249 const ModelAction *rf = act->get_reads_from();
1250 if (rf != NULL && r_modification_order(act, rf))
1253 if (act->is_write()) {
1254 if (w_modification_order(act))
1257 mo_graph->commitChanges();
1260 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1269 check_curr_backtracking(curr);
1270 set_backtracking(curr);
1271 return get_next_thread(curr);
1274 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
1275 Node *currnode = curr->get_node();
1276 Node *parnode = currnode->get_parent();
1278 if ((!parnode->backtrack_empty() ||
1279 !currnode->misc_empty() ||
1280 !currnode->read_from_empty() ||
1281 !currnode->future_value_empty() ||
1282 !currnode->promise_empty() ||
1283 !currnode->relseq_break_empty())
1284 && (!priv->next_backtrack ||
1285 *curr > *priv->next_backtrack)) {
1286 priv->next_backtrack = curr;
1290 bool ModelChecker::promises_expired() const
1292 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1293 Promise *promise = (*promises)[promise_index];
1294 if (promise->get_expiration()<priv->used_sequence_numbers) {
1302 * This is the strongest feasibility check available.
1303 * @return whether the current trace (partial or complete) must be a prefix of
1306 bool ModelChecker::isfeasibleprefix() const
1308 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1312 * Returns whether the current completed trace is feasible, except for pending
1313 * release sequences.
1315 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1317 if (DBG_ENABLED() && promises->size() != 0)
1318 DEBUG("Infeasible: unrevolved promises\n");
1320 return !is_infeasible() && promises->size() == 0;
1324 * Check if the current partial trace is infeasible. Does not check any
1325 * end-of-execution flags, which might rule out the execution. Thus, this is
1326 * useful only for ruling an execution as infeasible.
1327 * @return whether the current partial trace is infeasible.
1329 bool ModelChecker::is_infeasible() const
1331 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1332 DEBUG("Infeasible: RMW violation\n");
1334 return mo_graph->checkForRMWViolation() || is_infeasible_ignoreRMW();
1338 * Check If the current partial trace is infeasible, while ignoring
1339 * infeasibility related to 2 RMW's reading from the same store. It does not
1340 * check end-of-execution feasibility.
1341 * @see ModelChecker::is_infeasible
1342 * @return whether the current partial trace is infeasible, ignoring multiple
1343 * RMWs reading from the same store.
1345 bool ModelChecker::is_infeasible_ignoreRMW() const
1347 if (DBG_ENABLED()) {
1348 if (mo_graph->checkForCycles())
1349 DEBUG("Infeasible: modification order cycles\n");
1350 if (priv->failed_promise)
1351 DEBUG("Infeasible: failed promise\n");
1352 if (priv->too_many_reads)
1353 DEBUG("Infeasible: too many reads\n");
1354 if (priv->bad_synchronization)
1355 DEBUG("Infeasible: bad synchronization ordering\n");
1356 if (promises_expired())
1357 DEBUG("Infeasible: promises expired\n");
1359 return mo_graph->checkForCycles() || priv->failed_promise ||
1360 priv->too_many_reads || priv->bad_synchronization ||
1364 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1365 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1366 ModelAction *lastread = get_last_action(act->get_tid());
1367 lastread->process_rmw(act);
1368 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1369 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1370 mo_graph->commitChanges();
1376 * Checks whether a thread has read from the same write for too many times
1377 * without seeing the effects of a later write.
1380 * 1) there must a different write that we could read from that would satisfy the modification order,
1381 * 2) we must have read from the same value in excess of maxreads times, and
1382 * 3) that other write must have been in the reads_from set for maxreads times.
1384 * If so, we decide that the execution is no longer feasible.
1386 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1387 if (params.maxreads != 0) {
1389 if (curr->get_node()->get_read_from_size() <= 1)
1391 //Must make sure that execution is currently feasible... We could
1392 //accidentally clear by rolling back
1393 if (is_infeasible())
1395 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1396 int tid = id_to_int(curr->get_tid());
1399 if ((int)thrd_lists->size() <= tid)
1401 action_list_t *list = &(*thrd_lists)[tid];
1403 action_list_t::reverse_iterator rit = list->rbegin();
1404 /* Skip past curr */
1405 for (; (*rit) != curr; rit++)
1407 /* go past curr now */
1410 action_list_t::reverse_iterator ritcopy = rit;
1411 //See if we have enough reads from the same value
1413 for (; count < params.maxreads; rit++,count++) {
1414 if (rit==list->rend())
1416 ModelAction *act = *rit;
1417 if (!act->is_read())
1420 if (act->get_reads_from() != rf)
1422 if (act->get_node()->get_read_from_size() <= 1)
1425 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1427 const ModelAction * write = curr->get_node()->get_read_from_at(i);
1429 //Need a different write
1433 /* Test to see whether this is a feasible write to read from*/
1434 mo_graph->startChanges();
1435 r_modification_order(curr, write);
1436 bool feasiblereadfrom = !is_infeasible();
1437 mo_graph->rollbackChanges();
1439 if (!feasiblereadfrom)
1443 bool feasiblewrite = true;
1444 //new we need to see if this write works for everyone
1446 for (int loop = count; loop>0; loop--,rit++) {
1447 ModelAction *act=*rit;
1448 bool foundvalue = false;
1449 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1450 if (act->get_node()->get_read_from_at(j)==write) {
1456 feasiblewrite = false;
1460 if (feasiblewrite) {
1461 priv->too_many_reads = true;
1469 * Updates the mo_graph with the constraints imposed from the current
1472 * Basic idea is the following: Go through each other thread and find
1473 * the lastest action that happened before our read. Two cases:
1475 * (1) The action is a write => that write must either occur before
1476 * the write we read from or be the write we read from.
1478 * (2) The action is a read => the write that that action read from
1479 * must occur before the write we read from or be the same write.
1481 * @param curr The current action. Must be a read.
1482 * @param rf The action that curr reads from. Must be a write.
1483 * @return True if modification order edges were added; false otherwise
1485 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1487 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1490 ASSERT(curr->is_read());
1492 /* Last SC fence in the current thread */
1493 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1495 /* Iterate over all threads */
1496 for (i = 0; i < thrd_lists->size(); i++) {
1497 /* Last SC fence in thread i */
1498 ModelAction *last_sc_fence_thread_local = NULL;
1499 if (int_to_id((int)i) != curr->get_tid())
1500 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1502 /* Last SC fence in thread i, before last SC fence in current thread */
1503 ModelAction *last_sc_fence_thread_before = NULL;
1504 if (last_sc_fence_local)
1505 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1507 /* Iterate over actions in thread, starting from most recent */
1508 action_list_t *list = &(*thrd_lists)[i];
1509 action_list_t::reverse_iterator rit;
1510 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1511 ModelAction *act = *rit;
1513 if (act->is_write() && act != rf && act != curr) {
1514 /* C++, Section 29.3 statement 5 */
1515 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1516 *act < *last_sc_fence_thread_local) {
1517 mo_graph->addEdge(act, rf);
1521 /* C++, Section 29.3 statement 4 */
1522 else if (act->is_seqcst() && last_sc_fence_local &&
1523 *act < *last_sc_fence_local) {
1524 mo_graph->addEdge(act, rf);
1528 /* C++, Section 29.3 statement 6 */
1529 else if (last_sc_fence_thread_before &&
1530 *act < *last_sc_fence_thread_before) {
1531 mo_graph->addEdge(act, rf);
1538 * Include at most one act per-thread that "happens
1539 * before" curr. Don't consider reflexively.
1541 if (act->happens_before(curr) && act != curr) {
1542 if (act->is_write()) {
1544 mo_graph->addEdge(act, rf);
1548 const ModelAction *prevreadfrom = act->get_reads_from();
1549 //if the previous read is unresolved, keep going...
1550 if (prevreadfrom == NULL)
1553 if (rf != prevreadfrom) {
1554 mo_graph->addEdge(prevreadfrom, rf);
1566 /** This method fixes up the modification order when we resolve a
1567 * promises. The basic problem is that actions that occur after the
1568 * read curr could not property add items to the modification order
1571 * So for each thread, we find the earliest item that happens after
1572 * the read curr. This is the item we have to fix up with additional
1573 * constraints. If that action is write, we add a MO edge between
1574 * the Action rf and that action. If the action is a read, we add a
1575 * MO edge between the Action rf, and whatever the read accessed.
1577 * @param curr is the read ModelAction that we are fixing up MO edges for.
1578 * @param rf is the write ModelAction that curr reads from.
1581 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1583 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1585 ASSERT(curr->is_read());
1587 /* Iterate over all threads */
1588 for (i = 0; i < thrd_lists->size(); i++) {
1589 /* Iterate over actions in thread, starting from most recent */
1590 action_list_t *list = &(*thrd_lists)[i];
1591 action_list_t::reverse_iterator rit;
1592 ModelAction *lastact = NULL;
1594 /* Find last action that happens after curr that is either not curr or a rmw */
1595 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1596 ModelAction *act = *rit;
1597 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1603 /* Include at most one act per-thread that "happens before" curr */
1604 if (lastact != NULL) {
1605 if (lastact==curr) {
1606 //Case 1: The resolved read is a RMW, and we need to make sure
1607 //that the write portion of the RMW mod order after rf
1609 mo_graph->addEdge(rf, lastact);
1610 } else if (lastact->is_read()) {
1611 //Case 2: The resolved read is a normal read and the next
1612 //operation is a read, and we need to make sure the value read
1613 //is mod ordered after rf
1615 const ModelAction *postreadfrom = lastact->get_reads_from();
1616 if (postreadfrom != NULL&&rf != postreadfrom)
1617 mo_graph->addEdge(rf, postreadfrom);
1619 //Case 3: The resolved read is a normal read and the next
1620 //operation is a write, and we need to make sure that the
1621 //write is mod ordered after rf
1623 mo_graph->addEdge(rf, lastact);
1631 * Updates the mo_graph with the constraints imposed from the current write.
1633 * Basic idea is the following: Go through each other thread and find
1634 * the lastest action that happened before our write. Two cases:
1636 * (1) The action is a write => that write must occur before
1639 * (2) The action is a read => the write that that action read from
1640 * must occur before the current write.
1642 * This method also handles two other issues:
1644 * (I) Sequential Consistency: Making sure that if the current write is
1645 * seq_cst, that it occurs after the previous seq_cst write.
1647 * (II) Sending the write back to non-synchronizing reads.
1649 * @param curr The current action. Must be a write.
1650 * @return True if modification order edges were added; false otherwise
1652 bool ModelChecker::w_modification_order(ModelAction *curr)
1654 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1657 ASSERT(curr->is_write());
1659 if (curr->is_seqcst()) {
1660 /* We have to at least see the last sequentially consistent write,
1661 so we are initialized. */
1662 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1663 if (last_seq_cst != NULL) {
1664 mo_graph->addEdge(last_seq_cst, curr);
1669 /* Last SC fence in the current thread */
1670 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1672 /* Iterate over all threads */
1673 for (i = 0; i < thrd_lists->size(); i++) {
1674 /* Last SC fence in thread i, before last SC fence in current thread */
1675 ModelAction *last_sc_fence_thread_before = NULL;
1676 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1677 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1679 /* Iterate over actions in thread, starting from most recent */
1680 action_list_t *list = &(*thrd_lists)[i];
1681 action_list_t::reverse_iterator rit;
1682 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1683 ModelAction *act = *rit;
1686 * 1) If RMW and it actually read from something, then we
1687 * already have all relevant edges, so just skip to next
1690 * 2) If RMW and it didn't read from anything, we should
1691 * whatever edge we can get to speed up convergence.
1693 * 3) If normal write, we need to look at earlier actions, so
1694 * continue processing list.
1696 if (curr->is_rmw()) {
1697 if (curr->get_reads_from()!=NULL)
1705 /* C++, Section 29.3 statement 7 */
1706 if (last_sc_fence_thread_before && act->is_write() &&
1707 *act < *last_sc_fence_thread_before) {
1708 mo_graph->addEdge(act, curr);
1714 * Include at most one act per-thread that "happens
1717 if (act->happens_before(curr)) {
1719 * Note: if act is RMW, just add edge:
1721 * The following edge should be handled elsewhere:
1722 * readfrom(act) --mo--> act
1724 if (act->is_write())
1725 mo_graph->addEdge(act, curr);
1726 else if (act->is_read()) {
1727 //if previous read accessed a null, just keep going
1728 if (act->get_reads_from() == NULL)
1730 mo_graph->addEdge(act->get_reads_from(), curr);
1734 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1735 !act->same_thread(curr)) {
1736 /* We have an action that:
1737 (1) did not happen before us
1738 (2) is a read and we are a write
1739 (3) cannot synchronize with us
1740 (4) is in a different thread
1742 that read could potentially read from our write. Note that
1743 these checks are overly conservative at this point, we'll
1744 do more checks before actually removing the
1748 if (thin_air_constraint_may_allow(curr, act)) {
1749 if (!is_infeasible() ||
1750 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && !is_infeasible_ignoreRMW())) {
1751 struct PendingFutureValue pfv = {curr,act};
1752 futurevalues->push_back(pfv);
1762 /** Arbitrary reads from the future are not allowed. Section 29.3
1763 * part 9 places some constraints. This method checks one result of constraint
1764 * constraint. Others require compiler support. */
1765 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1766 if (!writer->is_rmw())
1769 if (!reader->is_rmw())
1772 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1773 if (search == reader)
1775 if (search->get_tid() == reader->get_tid() &&
1776 search->happens_before(reader))
1784 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1785 * some constraints. This method checks one the following constraint (others
1786 * require compiler support):
1788 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1790 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1792 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1794 /* Iterate over all threads */
1795 for (i = 0; i < thrd_lists->size(); i++) {
1796 const ModelAction *write_after_read = NULL;
1798 /* Iterate over actions in thread, starting from most recent */
1799 action_list_t *list = &(*thrd_lists)[i];
1800 action_list_t::reverse_iterator rit;
1801 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1802 ModelAction *act = *rit;
1804 if (!reader->happens_before(act))
1806 else if (act->is_write())
1807 write_after_read = act;
1808 else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1809 write_after_read = act->get_reads_from();
1813 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1820 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1821 * The ModelAction under consideration is expected to be taking part in
1822 * release/acquire synchronization as an object of the "reads from" relation.
1823 * Note that this can only provide release sequence support for RMW chains
1824 * which do not read from the future, as those actions cannot be traced until
1825 * their "promise" is fulfilled. Similarly, we may not even establish the
1826 * presence of a release sequence with certainty, as some modification order
1827 * constraints may be decided further in the future. Thus, this function
1828 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1829 * and a boolean representing certainty.
1831 * @param rf The action that might be part of a release sequence. Must be a
1833 * @param release_heads A pass-by-reference style return parameter. After
1834 * execution of this function, release_heads will contain the heads of all the
1835 * relevant release sequences, if any exists with certainty
1836 * @param pending A pass-by-reference style return parameter which is only used
1837 * when returning false (i.e., uncertain). Returns most information regarding
1838 * an uncertain release sequence, including any write operations that might
1839 * break the sequence.
1840 * @return true, if the ModelChecker is certain that release_heads is complete;
1843 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1844 rel_heads_list_t *release_heads,
1845 struct release_seq *pending) const
1847 /* Only check for release sequences if there are no cycles */
1848 if (mo_graph->checkForCycles())
1852 ASSERT(rf->is_write());
1854 if (rf->is_release())
1855 release_heads->push_back(rf);
1856 else if (rf->get_last_fence_release())
1857 release_heads->push_back(rf->get_last_fence_release());
1859 break; /* End of RMW chain */
1861 /** @todo Need to be smarter here... In the linux lock
1862 * example, this will run to the beginning of the program for
1864 /** @todo The way to be smarter here is to keep going until 1
1865 * thread has a release preceded by an acquire and you've seen
1868 /* acq_rel RMW is a sufficient stopping condition */
1869 if (rf->is_acquire() && rf->is_release())
1870 return true; /* complete */
1872 rf = rf->get_reads_from();
1875 /* read from future: need to settle this later */
1877 return false; /* incomplete */
1880 if (rf->is_release())
1881 return true; /* complete */
1883 /* else relaxed write
1884 * - check for fence-release in the same thread (29.8, stmt. 3)
1885 * - check modification order for contiguous subsequence
1886 * -> rf must be same thread as release */
1888 const ModelAction *fence_release = rf->get_last_fence_release();
1889 /* Synchronize with a fence-release unconditionally; we don't need to
1890 * find any more "contiguous subsequence..." for it */
1892 release_heads->push_back(fence_release);
1894 int tid = id_to_int(rf->get_tid());
1895 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1896 action_list_t *list = &(*thrd_lists)[tid];
1897 action_list_t::const_reverse_iterator rit;
1899 /* Find rf in the thread list */
1900 rit = std::find(list->rbegin(), list->rend(), rf);
1901 ASSERT(rit != list->rend());
1903 /* Find the last {write,fence}-release */
1904 for (; rit != list->rend(); rit++) {
1905 if (fence_release && *(*rit) < *fence_release)
1907 if ((*rit)->is_release())
1910 if (rit == list->rend()) {
1911 /* No write-release in this thread */
1912 return true; /* complete */
1913 } else if (fence_release && *(*rit) < *fence_release) {
1914 /* The fence-release is more recent (and so, "stronger") than
1915 * the most recent write-release */
1916 return true; /* complete */
1917 } /* else, need to establish contiguous release sequence */
1918 ModelAction *release = *rit;
1920 ASSERT(rf->same_thread(release));
1922 pending->writes.clear();
1924 bool certain = true;
1925 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1926 if (id_to_int(rf->get_tid()) == (int)i)
1928 list = &(*thrd_lists)[i];
1930 /* Can we ensure no future writes from this thread may break
1931 * the release seq? */
1932 bool future_ordered = false;
1934 ModelAction *last = get_last_action(int_to_id(i));
1935 Thread *th = get_thread(int_to_id(i));
1936 if ((last && rf->happens_before(last)) ||
1939 future_ordered = true;
1941 ASSERT(!th->is_model_thread() || future_ordered);
1943 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1944 const ModelAction *act = *rit;
1945 /* Reach synchronization -> this thread is complete */
1946 if (act->happens_before(release))
1948 if (rf->happens_before(act)) {
1949 future_ordered = true;
1953 /* Only non-RMW writes can break release sequences */
1954 if (!act->is_write() || act->is_rmw())
1957 /* Check modification order */
1958 if (mo_graph->checkReachable(rf, act)) {
1959 /* rf --mo--> act */
1960 future_ordered = true;
1963 if (mo_graph->checkReachable(act, release))
1964 /* act --mo--> release */
1966 if (mo_graph->checkReachable(release, act) &&
1967 mo_graph->checkReachable(act, rf)) {
1968 /* release --mo-> act --mo--> rf */
1969 return true; /* complete */
1971 /* act may break release sequence */
1972 pending->writes.push_back(act);
1975 if (!future_ordered)
1976 certain = false; /* This thread is uncertain */
1980 release_heads->push_back(release);
1981 pending->writes.clear();
1983 pending->release = release;
1990 * An interface for getting the release sequence head(s) with which a
1991 * given ModelAction must synchronize. This function only returns a non-empty
1992 * result when it can locate a release sequence head with certainty. Otherwise,
1993 * it may mark the internal state of the ModelChecker so that it will handle
1994 * the release sequence at a later time, causing @a acquire to update its
1995 * synchronization at some later point in execution.
1997 * @param acquire The 'acquire' action that may synchronize with a release
1999 * @param read The read action that may read from a release sequence; this may
2000 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2001 * when 'acquire' is a fence-acquire)
2002 * @param release_heads A pass-by-reference return parameter. Will be filled
2003 * with the head(s) of the release sequence(s), if they exists with certainty.
2004 * @see ModelChecker::release_seq_heads
2006 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2007 ModelAction *read, rel_heads_list_t *release_heads)
2009 const ModelAction *rf = read->get_reads_from();
2010 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2011 sequence->acquire = acquire;
2012 sequence->read = read;
2014 if (!release_seq_heads(rf, release_heads, sequence)) {
2015 /* add act to 'lazy checking' list */
2016 pending_rel_seqs->push_back(sequence);
2018 snapshot_free(sequence);
2023 * Attempt to resolve all stashed operations that might synchronize with a
2024 * release sequence for a given location. This implements the "lazy" portion of
2025 * determining whether or not a release sequence was contiguous, since not all
2026 * modification order information is present at the time an action occurs.
2028 * @param location The location/object that should be checked for release
2029 * sequence resolutions. A NULL value means to check all locations.
2030 * @param work_queue The work queue to which to add work items as they are
2032 * @return True if any updates occurred (new synchronization, new mo_graph
2035 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2037 bool updated = false;
2038 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2039 while (it != pending_rel_seqs->end()) {
2040 struct release_seq *pending = *it;
2041 ModelAction *acquire = pending->acquire;
2042 const ModelAction *read = pending->read;
2044 /* Only resolve sequences on the given location, if provided */
2045 if (location && read->get_location() != location) {
2050 const ModelAction *rf = read->get_reads_from();
2051 rel_heads_list_t release_heads;
2053 complete = release_seq_heads(rf, &release_heads, pending);
2054 for (unsigned int i = 0; i < release_heads.size(); i++) {
2055 if (!acquire->has_synchronized_with(release_heads[i])) {
2056 if (acquire->synchronize_with(release_heads[i]))
2059 set_bad_synchronization();
2064 /* Re-check all pending release sequences */
2065 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2066 /* Re-check read-acquire for mo_graph edges */
2067 if (acquire->is_read())
2068 work_queue->push_back(MOEdgeWorkEntry(acquire));
2070 /* propagate synchronization to later actions */
2071 action_list_t::reverse_iterator rit = action_trace->rbegin();
2072 for (; (*rit) != acquire; rit++) {
2073 ModelAction *propagate = *rit;
2074 if (acquire->happens_before(propagate)) {
2075 propagate->synchronize_with(acquire);
2076 /* Re-check 'propagate' for mo_graph edges */
2077 work_queue->push_back(MOEdgeWorkEntry(propagate));
2082 it = pending_rel_seqs->erase(it);
2083 snapshot_free(pending);
2089 // If we resolved promises or data races, see if we have realized a data race.
2096 * Performs various bookkeeping operations for the current ModelAction. For
2097 * instance, adds action to the per-object, per-thread action vector and to the
2098 * action trace list of all thread actions.
2100 * @param act is the ModelAction to add.
2102 void ModelChecker::add_action_to_lists(ModelAction *act)
2104 int tid = id_to_int(act->get_tid());
2105 ModelAction *uninit = NULL;
2107 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2108 if (list->empty()) {
2109 uninit = new_uninitialized_action(act->get_location());
2110 uninit_id = id_to_int(uninit->get_tid());
2111 list->push_back(uninit);
2113 list->push_back(act);
2115 action_trace->push_back(act);
2117 action_trace->push_front(uninit);
2119 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2120 if (tid >= (int)vec->size())
2121 vec->resize(priv->next_thread_id);
2122 (*vec)[tid].push_back(act);
2124 (*vec)[uninit_id].push_front(uninit);
2126 if ((int)thrd_last_action->size() <= tid)
2127 thrd_last_action->resize(get_num_threads());
2128 (*thrd_last_action)[tid] = act;
2130 (*thrd_last_action)[uninit_id] = uninit;
2132 if (act->is_fence() && act->is_release()) {
2133 if ((int)thrd_last_fence_release->size() <= tid)
2134 thrd_last_fence_release->resize(get_num_threads());
2135 (*thrd_last_fence_release)[tid] = act;
2138 if (act->is_wait()) {
2139 void *mutex_loc=(void *) act->get_value();
2140 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2142 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2143 if (tid >= (int)vec->size())
2144 vec->resize(priv->next_thread_id);
2145 (*vec)[tid].push_back(act);
2150 * @brief Get the last action performed by a particular Thread
2151 * @param tid The thread ID of the Thread in question
2152 * @return The last action in the thread
2154 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2156 int threadid = id_to_int(tid);
2157 if (threadid < (int)thrd_last_action->size())
2158 return (*thrd_last_action)[id_to_int(tid)];
2164 * @brief Get the last fence release performed by a particular Thread
2165 * @param tid The thread ID of the Thread in question
2166 * @return The last fence release in the thread, if one exists; NULL otherwise
2168 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2170 int threadid = id_to_int(tid);
2171 if (threadid < (int)thrd_last_fence_release->size())
2172 return (*thrd_last_fence_release)[id_to_int(tid)];
2178 * Gets the last memory_order_seq_cst write (in the total global sequence)
2179 * performed on a particular object (i.e., memory location), not including the
2181 * @param curr The current ModelAction; also denotes the object location to
2183 * @return The last seq_cst write
2185 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2187 void *location = curr->get_location();
2188 action_list_t *list = get_safe_ptr_action(obj_map, location);
2189 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2190 action_list_t::reverse_iterator rit;
2191 for (rit = list->rbegin(); rit != list->rend(); rit++)
2192 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2198 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2199 * performed in a particular thread, prior to a particular fence.
2200 * @param tid The ID of the thread to check
2201 * @param before_fence The fence from which to begin the search; if NULL, then
2202 * search for the most recent fence in the thread.
2203 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2205 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2207 /* All fences should have NULL location */
2208 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2209 action_list_t::reverse_iterator rit = list->rbegin();
2212 for (; rit != list->rend(); rit++)
2213 if (*rit == before_fence)
2216 ASSERT(*rit == before_fence);
2220 for (; rit != list->rend(); rit++)
2221 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2227 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2228 * location). This function identifies the mutex according to the current
2229 * action, which is presumed to perform on the same mutex.
2230 * @param curr The current ModelAction; also denotes the object location to
2232 * @return The last unlock operation
2234 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2236 void *location = curr->get_location();
2237 action_list_t *list = get_safe_ptr_action(obj_map, location);
2238 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2239 action_list_t::reverse_iterator rit;
2240 for (rit = list->rbegin(); rit != list->rend(); rit++)
2241 if ((*rit)->is_unlock() || (*rit)->is_wait())
2246 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2248 ModelAction *parent = get_last_action(tid);
2250 parent = get_thread(tid)->get_creation();
2255 * Returns the clock vector for a given thread.
2256 * @param tid The thread whose clock vector we want
2257 * @return Desired clock vector
2259 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2261 return get_parent_action(tid)->get_cv();
2265 * Resolve a set of Promises with a current write. The set is provided in the
2266 * Node corresponding to @a write.
2267 * @param write The ModelAction that is fulfilling Promises
2268 * @return True if promises were resolved; false otherwise
2270 bool ModelChecker::resolve_promises(ModelAction *write)
2272 bool resolved = false;
2273 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2275 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2276 Promise *promise = (*promises)[promise_index];
2277 if (write->get_node()->get_promise(i)) {
2278 ModelAction *read = promise->get_action();
2279 if (read->is_rmw()) {
2280 mo_graph->addRMWEdge(write, read);
2282 read_from(read, write);
2283 //First fix up the modification order for actions that happened
2285 r_modification_order(read, write);
2286 //Next fix up the modification order for actions that happened
2288 post_r_modification_order(read, write);
2289 //Make sure the promise's value matches the write's value
2290 ASSERT(promise->get_value() == write->get_value());
2293 promises->erase(promises->begin() + promise_index);
2294 threads_to_check.push_back(read->get_tid());
2301 //Check whether reading these writes has made threads unable to
2304 for(unsigned int i=0;i<threads_to_check.size();i++)
2305 mo_check_promises(threads_to_check[i], write);
2311 * Compute the set of promises that could potentially be satisfied by this
2312 * action. Note that the set computation actually appears in the Node, not in
2314 * @param curr The ModelAction that may satisfy promises
2316 void ModelChecker::compute_promises(ModelAction *curr)
2318 for (unsigned int i = 0; i < promises->size(); i++) {
2319 Promise *promise = (*promises)[i];
2320 const ModelAction *act = promise->get_action();
2321 if (!act->happens_before(curr) &&
2323 !act->could_synchronize_with(curr) &&
2324 !act->same_thread(curr) &&
2325 act->get_location() == curr->get_location() &&
2326 promise->get_value() == curr->get_value()) {
2327 curr->get_node()->set_promise(i, act->is_rmw());
2332 /** Checks promises in response to change in ClockVector Threads. */
2333 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2335 for (unsigned int i = 0; i < promises->size(); i++) {
2336 Promise *promise = (*promises)[i];
2337 const ModelAction *act = promise->get_action();
2338 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2339 merge_cv->synchronized_since(act)) {
2340 if (promise->increment_threads(tid)) {
2341 //Promise has failed
2342 priv->failed_promise = true;
2349 void ModelChecker::check_promises_thread_disabled() {
2350 for (unsigned int i = 0; i < promises->size(); i++) {
2351 Promise *promise = (*promises)[i];
2352 if (promise->check_promise()) {
2353 priv->failed_promise = true;
2359 /** Checks promises in response to addition to modification order for threads.
2361 * pthread is the thread that performed the read that created the promise
2363 * pread is the read that created the promise
2365 * pwrite is either the first write to same location as pread by
2366 * pthread that is sequenced after pread or the value read by the
2367 * first read to the same lcoation as pread by pthread that is
2368 * sequenced after pread..
2370 * 1. If tid=pthread, then we check what other threads are reachable
2371 * through the mode order starting with pwrite. Those threads cannot
2372 * perform a write that will resolve the promise due to modification
2373 * order constraints.
2375 * 2. If the tid is not pthread, we check whether pwrite can reach the
2376 * action write through the modification order. If so, that thread
2377 * cannot perform a future write that will resolve the promise due to
2378 * modificatin order constraints.
2380 * @parem tid The thread that either read from the model action
2381 * write, or actually did the model action write.
2383 * @parem write The ModelAction representing the relevant write.
2386 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
2387 void * location = write->get_location();
2388 for (unsigned int i = 0; i < promises->size(); i++) {
2389 Promise *promise = (*promises)[i];
2390 const ModelAction *act = promise->get_action();
2392 //Is this promise on the same location?
2393 if ( act->get_location() != location )
2396 //same thread as the promise
2397 if ( act->get_tid()==tid ) {
2399 //do we have a pwrite for the promise, if not, set it
2400 if (promise->get_write() == NULL ) {
2401 promise->set_write(write);
2402 //The pwrite cannot happen before the promise
2403 if (write->happens_before(act) && (write != act)) {
2404 priv->failed_promise = true;
2408 if (mo_graph->checkPromise(write, promise)) {
2409 priv->failed_promise = true;
2414 //Don't do any lookups twice for the same thread
2415 if (promise->has_sync_thread(tid))
2418 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2419 if (promise->increment_threads(tid)) {
2420 priv->failed_promise = true;
2428 * Compute the set of writes that may break the current pending release
2429 * sequence. This information is extracted from previou release sequence
2432 * @param curr The current ModelAction. Must be a release sequence fixup
2435 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2437 if (pending_rel_seqs->empty())
2440 struct release_seq *pending = pending_rel_seqs->back();
2441 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2442 const ModelAction *write = pending->writes[i];
2443 curr->get_node()->add_relseq_break(write);
2446 /* NULL means don't break the sequence; just synchronize */
2447 curr->get_node()->add_relseq_break(NULL);
2451 * Build up an initial set of all past writes that this 'read' action may read
2452 * from. This set is determined by the clock vector's "happens before"
2454 * @param curr is the current ModelAction that we are exploring; it must be a
2457 void ModelChecker::build_reads_from_past(ModelAction *curr)
2459 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2461 ASSERT(curr->is_read());
2463 ModelAction *last_sc_write = NULL;
2465 if (curr->is_seqcst())
2466 last_sc_write = get_last_seq_cst_write(curr);
2468 /* Iterate over all threads */
2469 for (i = 0; i < thrd_lists->size(); i++) {
2470 /* Iterate over actions in thread, starting from most recent */
2471 action_list_t *list = &(*thrd_lists)[i];
2472 action_list_t::reverse_iterator rit;
2473 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2474 ModelAction *act = *rit;
2476 /* Only consider 'write' actions */
2477 if (!act->is_write() || act == curr)
2480 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2481 bool allow_read = true;
2483 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2485 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2489 DEBUG("Adding action to may_read_from:\n");
2490 if (DBG_ENABLED()) {
2494 curr->get_node()->add_read_from(act);
2497 /* Include at most one act per-thread that "happens before" curr */
2498 if (act->happens_before(curr))
2503 if (DBG_ENABLED()) {
2504 model_print("Reached read action:\n");
2506 model_print("Printing may_read_from\n");
2507 curr->get_node()->print_may_read_from();
2508 model_print("End printing may_read_from\n");
2512 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2514 /* UNINIT actions don't have a Node, and they never sleep */
2515 if (write->is_uninitialized())
2517 Node *prevnode=write->get_node()->get_parent();
2519 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2520 if (write->is_release()&&thread_sleep)
2522 if (!write->is_rmw()) {
2525 if (write->get_reads_from()==NULL)
2527 write=write->get_reads_from();
2532 * @brief Create a new action representing an uninitialized atomic
2533 * @param location The memory location of the atomic object
2534 * @return A pointer to a new ModelAction
2536 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2538 ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2539 act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2540 act->create_cv(NULL);
2544 static void print_list(action_list_t *list, int exec_num = -1)
2546 action_list_t::iterator it;
2548 model_print("---------------------------------------------------------------------\n");
2550 model_print("Execution %d:\n", exec_num);
2552 unsigned int hash=0;
2554 for (it = list->begin(); it != list->end(); it++) {
2556 hash=hash^(hash<<3)^((*it)->hash());
2558 model_print("HASH %u\n", hash);
2559 model_print("---------------------------------------------------------------------\n");
2562 #if SUPPORT_MOD_ORDER_DUMP
2563 void ModelChecker::dumpGraph(char *filename) {
2565 sprintf(buffer, "%s.dot",filename);
2566 FILE *file=fopen(buffer, "w");
2567 fprintf(file, "digraph %s {\n",filename);
2568 mo_graph->dumpNodes(file);
2569 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2571 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2572 ModelAction *action=*it;
2573 if (action->is_read()) {
2574 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2575 if (action->get_reads_from()!=NULL)
2576 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2578 if (thread_array[action->get_tid()] != NULL) {
2579 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2582 thread_array[action->get_tid()]=action;
2584 fprintf(file,"}\n");
2585 model_free(thread_array);
2590 /** @brief Prints an execution trace summary. */
2591 void ModelChecker::print_summary() const
2593 #if SUPPORT_MOD_ORDER_DUMP
2595 char buffername[100];
2596 sprintf(buffername, "exec%04u", stats.num_total);
2597 mo_graph->dumpGraphToFile(buffername);
2598 sprintf(buffername, "graph%04u", stats.num_total);
2599 dumpGraph(buffername);
2602 if (!isfeasibleprefix())
2603 model_print("INFEASIBLE EXECUTION!\n");
2604 print_list(action_trace, stats.num_total);
2609 * Add a Thread to the system for the first time. Should only be called once
2611 * @param t The Thread to add
2613 void ModelChecker::add_thread(Thread *t)
2615 thread_map->put(id_to_int(t->get_id()), t);
2616 scheduler->add_thread(t);
2620 * Removes a thread from the scheduler.
2621 * @param the thread to remove.
2623 void ModelChecker::remove_thread(Thread *t)
2625 scheduler->remove_thread(t);
2629 * @brief Get a Thread reference by its ID
2630 * @param tid The Thread's ID
2631 * @return A Thread reference
2633 Thread * ModelChecker::get_thread(thread_id_t tid) const
2635 return thread_map->get(id_to_int(tid));
2639 * @brief Get a reference to the Thread in which a ModelAction was executed
2640 * @param act The ModelAction
2641 * @return A Thread reference
2643 Thread * ModelChecker::get_thread(ModelAction *act) const
2645 return get_thread(act->get_tid());
2649 * @brief Check if a Thread is currently enabled
2650 * @param t The Thread to check
2651 * @return True if the Thread is currently enabled
2653 bool ModelChecker::is_enabled(Thread *t) const
2655 return scheduler->is_enabled(t);
2659 * @brief Check if a Thread is currently enabled
2660 * @param tid The ID of the Thread to check
2661 * @return True if the Thread is currently enabled
2663 bool ModelChecker::is_enabled(thread_id_t tid) const
2665 return scheduler->is_enabled(tid);
2669 * Switch from a user-context to the "master thread" context (a.k.a. system
2670 * context). This switch is made with the intention of exploring a particular
2671 * model-checking action (described by a ModelAction object). Must be called
2672 * from a user-thread context.
2674 * @param act The current action that will be explored. May be NULL only if
2675 * trace is exiting via an assertion (see ModelChecker::set_assert and
2676 * ModelChecker::has_asserted).
2677 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2679 int ModelChecker::switch_to_master(ModelAction *act)
2682 Thread *old = thread_current();
2683 set_current_action(act);
2684 old->set_state(THREAD_READY);
2685 return Thread::swap(old, &system_context);
2689 * Takes the next step in the execution, if possible.
2690 * @return Returns true (success) if a step was taken and false otherwise.
2692 bool ModelChecker::take_step() {
2696 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2698 if (curr->get_state() == THREAD_READY) {
2699 ASSERT(priv->current_action);
2701 priv->nextThread = check_current_action(priv->current_action);
2702 priv->current_action = NULL;
2704 if (curr->is_blocked() || curr->is_complete())
2705 scheduler->remove_thread(curr);
2710 Thread *next = scheduler->next_thread(priv->nextThread);
2712 /* Infeasible -> don't take any more steps */
2713 if (is_infeasible())
2715 else if (isfeasibleprefix() && have_bug_reports()) {
2720 if (params.bound != 0) {
2721 if (priv->used_sequence_numbers > params.bound) {
2726 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2727 next ? id_to_int(next->get_id()) : -1);
2730 * Launch end-of-execution release sequence fixups only when there are:
2732 * (1) no more user threads to run (or when execution replay chooses
2733 * the 'model_thread')
2734 * (2) pending release sequences
2735 * (3) pending assertions (i.e., data races)
2736 * (4) no pending promises
2738 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2739 is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2740 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2741 pending_rel_seqs->size());
2742 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2743 std::memory_order_seq_cst, NULL, VALUE_NONE,
2745 set_current_action(fixup);
2749 /* next == NULL -> don't take any more steps */
2753 next->set_state(THREAD_RUNNING);
2755 if (next->get_pending() != NULL) {
2756 /* restart a pending action */
2757 set_current_action(next->get_pending());
2758 next->set_pending(NULL);
2759 next->set_state(THREAD_READY);
2763 /* Return false only if swap fails with an error */
2764 return (Thread::swap(&system_context, next) == 0);
2767 /** Wrapper to run the user's main function, with appropriate arguments */
2768 void user_main_wrapper(void *)
2770 user_main(model->params.argc, model->params.argv);
2773 /** @brief Run ModelChecker for the user program */
2774 void ModelChecker::run()
2779 /* Start user program */
2780 add_thread(new Thread(&user_thread, &user_main_wrapper, NULL));
2782 /* Wait for all threads to complete */
2783 while (take_step());
2784 } while (next_execution());