8 #include "snapshot-interface.h"
10 #include "clockvector.h"
11 #include "cyclegraph.h"
17 #define INITIAL_THREAD_ID 0
21 /** @brief Constructor */
22 ModelChecker::ModelChecker(struct model_params params) :
23 /* Initialize default scheduler */
25 scheduler(new Scheduler()),
27 num_feasible_executions(0),
29 earliest_diverge(NULL),
30 action_trace(new action_list_t()),
31 thread_map(new HashTable<int, Thread *, int>()),
32 obj_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
33 lock_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
34 obj_thrd_map(new HashTable<void *, std::vector<action_list_t>, uintptr_t, 4 >()),
35 promises(new std::vector<Promise *>()),
36 futurevalues(new std::vector<struct PendingFutureValue>()),
37 pending_rel_seqs(new std::vector<struct release_seq *>()),
38 thrd_last_action(new std::vector<ModelAction *>(1)),
39 node_stack(new NodeStack()),
40 mo_graph(new CycleGraph()),
41 failed_promise(false),
42 too_many_reads(false),
44 bad_synchronization(false)
46 /* Allocate this "size" on the snapshotting heap */
47 priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
48 /* First thread created will have id INITIAL_THREAD_ID */
49 priv->next_thread_id = INITIAL_THREAD_ID;
52 /** @brief Destructor */
53 ModelChecker::~ModelChecker()
55 for (unsigned int i = 0; i < get_num_threads(); i++)
56 delete thread_map->get(i);
61 delete lock_waiters_map;
64 for (unsigned int i = 0; i < promises->size(); i++)
65 delete (*promises)[i];
68 delete pending_rel_seqs;
70 delete thrd_last_action;
77 * Restores user program to initial state and resets all model-checker data
80 void ModelChecker::reset_to_initial_state()
82 DEBUG("+++ Resetting to initial state +++\n");
83 node_stack->reset_execution();
84 failed_promise = false;
85 too_many_reads = false;
86 bad_synchronization = false;
88 snapshotObject->backTrackBeforeStep(0);
91 /** @return a thread ID for a new Thread */
92 thread_id_t ModelChecker::get_next_id()
94 return priv->next_thread_id++;
97 /** @return the number of user threads created during this execution */
98 unsigned int ModelChecker::get_num_threads()
100 return priv->next_thread_id;
103 /** @return The currently executing Thread. */
104 Thread * ModelChecker::get_current_thread()
106 return scheduler->get_current_thread();
109 /** @return a sequence number for a new ModelAction */
110 modelclock_t ModelChecker::get_next_seq_num()
112 return ++priv->used_sequence_numbers;
116 * @brief Choose the next thread to execute.
118 * This function chooses the next thread that should execute. It can force the
119 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
120 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
121 * The model-checker may have no preference regarding the next thread (i.e.,
122 * when exploring a new execution ordering), in which case this will return
124 * @param curr The current ModelAction. This action might guide the choice of
126 * @return The next thread to run. If the model-checker has no preference, NULL.
128 Thread * ModelChecker::get_next_thread(ModelAction *curr)
133 /* Do not split atomic actions. */
135 return thread_current();
136 /* The THREAD_CREATE action points to the created Thread */
137 else if (curr->get_type() == THREAD_CREATE)
138 return (Thread *)curr->get_location();
141 /* Have we completed exploring the preselected path? */
145 /* Else, we are trying to replay an execution */
146 ModelAction *next = node_stack->get_next()->get_action();
148 if (next == diverge) {
149 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
150 earliest_diverge=diverge;
152 Node *nextnode = next->get_node();
153 /* Reached divergence point */
154 if (nextnode->increment_promise()) {
155 /* The next node will try to satisfy a different set of promises. */
156 tid = next->get_tid();
157 node_stack->pop_restofstack(2);
158 } else if (nextnode->increment_read_from()) {
159 /* The next node will read from a different value. */
160 tid = next->get_tid();
161 node_stack->pop_restofstack(2);
162 } else if (nextnode->increment_future_value()) {
163 /* The next node will try to read from a different future value. */
164 tid = next->get_tid();
165 node_stack->pop_restofstack(2);
166 } else if (nextnode->increment_relseq_break()) {
167 /* The next node will try to resolve a release sequence differently */
168 tid = next->get_tid();
169 node_stack->pop_restofstack(2);
171 /* Make a different thread execute for next step */
172 Node *node = nextnode->get_parent();
173 tid = node->get_next_backtrack();
174 node_stack->pop_restofstack(1);
175 if (diverge==earliest_diverge) {
176 earliest_diverge=node->get_action();
179 DEBUG("*** Divergence point ***\n");
183 tid = next->get_tid();
185 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
186 ASSERT(tid != THREAD_ID_T_NONE);
187 return thread_map->get(id_to_int(tid));
191 * Queries the model-checker for more executions to explore and, if one
192 * exists, resets the model-checker state to execute a new execution.
194 * @return If there are more executions to explore, return true. Otherwise,
197 bool ModelChecker::next_execution()
203 if (isfinalfeasible()) {
204 printf("Earliest divergence point since last feasible execution:\n");
205 if (earliest_diverge)
206 earliest_diverge->print();
208 printf("(Not set)\n");
210 earliest_diverge = NULL;
211 num_feasible_executions++;
214 DEBUG("Number of acquires waiting on pending release sequences: %zu\n",
215 pending_rel_seqs->size());
217 if (isfinalfeasible() || DBG_ENABLED())
220 if ((diverge = get_next_backtrack()) == NULL)
224 printf("Next execution will diverge at:\n");
228 reset_to_initial_state();
232 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
234 switch (act->get_type()) {
238 /* linear search: from most recent to oldest */
239 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
240 action_list_t::reverse_iterator rit;
241 for (rit = list->rbegin(); rit != list->rend(); rit++) {
242 ModelAction *prev = *rit;
243 if (prev->could_synchronize_with(act))
249 case ATOMIC_TRYLOCK: {
250 /* linear search: from most recent to oldest */
251 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
252 action_list_t::reverse_iterator rit;
253 for (rit = list->rbegin(); rit != list->rend(); rit++) {
254 ModelAction *prev = *rit;
255 if (act->is_conflicting_lock(prev))
260 case ATOMIC_UNLOCK: {
261 /* linear search: from most recent to oldest */
262 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
263 action_list_t::reverse_iterator rit;
264 for (rit = list->rbegin(); rit != list->rend(); rit++) {
265 ModelAction *prev = *rit;
266 if (!act->same_thread(prev)&&prev->is_failed_trylock())
277 /** This method find backtracking points where we should try to
278 * reorder the parameter ModelAction against.
280 * @param the ModelAction to find backtracking points for.
282 void ModelChecker::set_backtracking(ModelAction *act)
284 Thread *t = get_thread(act);
285 ModelAction * prev = get_last_conflict(act);
289 Node * node = prev->get_node()->get_parent();
291 int low_tid, high_tid;
292 if (node->is_enabled(t)) {
293 low_tid = id_to_int(act->get_tid());
294 high_tid = low_tid+1;
297 high_tid = get_num_threads();
300 for(int i = low_tid; i < high_tid; i++) {
301 thread_id_t tid = int_to_id(i);
302 if (!node->is_enabled(tid))
305 /* Check if this has been explored already */
306 if (node->has_been_explored(tid))
309 /* See if fairness allows */
310 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
312 for(int t=0;t<node->get_num_threads();t++) {
313 thread_id_t tother=int_to_id(t);
314 if (node->is_enabled(tother) && node->has_priority(tother)) {
323 /* Cache the latest backtracking point */
324 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
325 priv->next_backtrack = prev;
327 /* If this is a new backtracking point, mark the tree */
328 if (!node->set_backtrack(tid))
330 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
331 id_to_int(prev->get_tid()),
332 id_to_int(t->get_id()));
341 * Returns last backtracking point. The model checker will explore a different
342 * path for this point in the next execution.
343 * @return The ModelAction at which the next execution should diverge.
345 ModelAction * ModelChecker::get_next_backtrack()
347 ModelAction *next = priv->next_backtrack;
348 priv->next_backtrack = NULL;
353 * Processes a read or rmw model action.
354 * @param curr is the read model action to process.
355 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
356 * @return True if processing this read updates the mo_graph.
358 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
361 bool updated = false;
363 const ModelAction *reads_from = curr->get_node()->get_read_from();
364 if (reads_from != NULL) {
365 mo_graph->startChanges();
367 value = reads_from->get_value();
368 bool r_status = false;
370 if (!second_part_of_rmw) {
371 check_recency(curr, reads_from);
372 r_status = r_modification_order(curr, reads_from);
376 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
377 mo_graph->rollbackChanges();
378 too_many_reads = false;
382 curr->read_from(reads_from);
383 mo_graph->commitChanges();
384 mo_check_promises(curr->get_tid(), reads_from);
387 } else if (!second_part_of_rmw) {
388 /* Read from future value */
389 value = curr->get_node()->get_future_value();
390 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
391 curr->read_from(NULL);
392 Promise *valuepromise = new Promise(curr, value, expiration);
393 promises->push_back(valuepromise);
395 get_thread(curr)->set_return_value(value);
401 * Processes a lock, trylock, or unlock model action. @param curr is
402 * the read model action to process.
404 * The try lock operation checks whether the lock is taken. If not,
405 * it falls to the normal lock operation case. If so, it returns
408 * The lock operation has already been checked that it is enabled, so
409 * it just grabs the lock and synchronizes with the previous unlock.
411 * The unlock operation has to re-enable all of the threads that are
412 * waiting on the lock.
414 * @return True if synchronization was updated; false otherwise
416 bool ModelChecker::process_mutex(ModelAction *curr) {
417 std::mutex *mutex = (std::mutex *)curr->get_location();
418 struct std::mutex_state *state = mutex->get_state();
419 switch (curr->get_type()) {
420 case ATOMIC_TRYLOCK: {
421 bool success = !state->islocked;
422 curr->set_try_lock(success);
424 get_thread(curr)->set_return_value(0);
427 get_thread(curr)->set_return_value(1);
429 //otherwise fall into the lock case
431 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
432 printf("Lock access before initialization\n");
435 state->islocked = true;
436 ModelAction *unlock = get_last_unlock(curr);
437 //synchronize with the previous unlock statement
438 if (unlock != NULL) {
439 curr->synchronize_with(unlock);
444 case ATOMIC_UNLOCK: {
446 state->islocked = false;
447 //wake up the other threads
448 action_list_t *waiters = lock_waiters_map->get_safe_ptr(curr->get_location());
449 //activate all the waiting threads
450 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
451 scheduler->wake(get_thread(*rit));
463 * Process a write ModelAction
464 * @param curr The ModelAction to process
465 * @return True if the mo_graph was updated or promises were resolved
467 bool ModelChecker::process_write(ModelAction *curr)
469 bool updated_mod_order = w_modification_order(curr);
470 bool updated_promises = resolve_promises(curr);
472 if (promises->size() == 0) {
473 for (unsigned int i = 0; i < futurevalues->size(); i++) {
474 struct PendingFutureValue pfv = (*futurevalues)[i];
475 if (pfv.act->get_node()->add_future_value(pfv.value, pfv.expiration) &&
476 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
477 priv->next_backtrack = pfv.act;
479 futurevalues->resize(0);
482 mo_graph->commitChanges();
483 mo_check_promises(curr->get_tid(), curr);
485 get_thread(curr)->set_return_value(VALUE_NONE);
486 return updated_mod_order || updated_promises;
490 * @brief Process the current action for thread-related activity
492 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
493 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
494 * synchronization, etc. This function is a no-op for non-THREAD actions
495 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
497 * @param curr The current action
498 * @return True if synchronization was updated or a thread completed
500 bool ModelChecker::process_thread_action(ModelAction *curr)
502 bool updated = false;
504 switch (curr->get_type()) {
505 case THREAD_CREATE: {
506 Thread *th = (Thread *)curr->get_location();
507 th->set_creation(curr);
511 Thread *waiting, *blocking;
512 waiting = get_thread(curr);
513 blocking = (Thread *)curr->get_location();
514 if (!blocking->is_complete()) {
515 blocking->push_wait_list(curr);
516 scheduler->sleep(waiting);
518 do_complete_join(curr);
519 updated = true; /* trigger rel-seq checks */
523 case THREAD_FINISH: {
524 Thread *th = get_thread(curr);
525 while (!th->wait_list_empty()) {
526 ModelAction *act = th->pop_wait_list();
527 Thread *wake = get_thread(act);
528 scheduler->wake(wake);
529 do_complete_join(act);
530 updated = true; /* trigger rel-seq checks */
533 updated = true; /* trigger rel-seq checks */
537 check_promises(curr->get_tid(), NULL, curr->get_cv());
548 * Initialize the current action by performing one or more of the following
549 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
550 * in the NodeStack, manipulating backtracking sets, allocating and
551 * initializing clock vectors, and computing the promises to fulfill.
553 * @param curr The current action, as passed from the user context; may be
554 * freed/invalidated after the execution of this function
555 * @return The current action, as processed by the ModelChecker. Is only the
556 * same as the parameter @a curr if this is a newly-explored action.
558 ModelAction * ModelChecker::initialize_curr_action(ModelAction *curr)
560 ModelAction *newcurr;
562 if (curr->is_rmwc() || curr->is_rmw()) {
563 newcurr = process_rmw(curr);
566 if (newcurr->is_rmw())
567 compute_promises(newcurr);
571 curr->set_seq_number(get_next_seq_num());
573 newcurr = node_stack->explore_action(curr, scheduler->get_enabled());
575 /* First restore type and order in case of RMW operation */
577 newcurr->copy_typeandorder(curr);
579 ASSERT(curr->get_location() == newcurr->get_location());
580 newcurr->copy_from_new(curr);
582 /* Discard duplicate ModelAction; use action from NodeStack */
585 /* Always compute new clock vector */
586 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
590 /* Always compute new clock vector */
591 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
593 * Perform one-time actions when pushing new ModelAction onto
596 if (newcurr->is_write())
597 compute_promises(newcurr);
598 else if (newcurr->is_relseq_fixup())
599 compute_relseq_breakwrites(newcurr);
605 * This method checks whether a model action is enabled at the given point.
606 * At this point, it checks whether a lock operation would be successful at this point.
607 * If not, it puts the thread in a waiter list.
608 * @param curr is the ModelAction to check whether it is enabled.
609 * @return a bool that indicates whether the action is enabled.
611 bool ModelChecker::check_action_enabled(ModelAction *curr) {
612 if (curr->is_lock()) {
613 std::mutex * lock = (std::mutex *)curr->get_location();
614 struct std::mutex_state * state = lock->get_state();
615 if (state->islocked) {
616 //Stick the action in the appropriate waiting queue
617 lock_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
626 * This is the heart of the model checker routine. It performs model-checking
627 * actions corresponding to a given "current action." Among other processes, it
628 * calculates reads-from relationships, updates synchronization clock vectors,
629 * forms a memory_order constraints graph, and handles replay/backtrack
630 * execution when running permutations of previously-observed executions.
632 * @param curr The current action to process
633 * @return The next Thread that must be executed. May be NULL if ModelChecker
634 * makes no choice (e.g., according to replay execution, combining RMW actions,
637 Thread * ModelChecker::check_current_action(ModelAction *curr)
641 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
643 if (!check_action_enabled(curr)) {
644 /* Make the execution look like we chose to run this action
645 * much later, when a lock is actually available to release */
646 get_current_thread()->set_pending(curr);
647 scheduler->sleep(get_current_thread());
648 return get_next_thread(NULL);
651 ModelAction *newcurr = initialize_curr_action(curr);
653 /* Add the action to lists before any other model-checking tasks */
654 if (!second_part_of_rmw)
655 add_action_to_lists(newcurr);
657 /* Build may_read_from set for newly-created actions */
658 if (curr == newcurr && curr->is_read())
659 build_reads_from_past(curr);
662 /* Initialize work_queue with the "current action" work */
663 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
665 while (!work_queue.empty()) {
666 WorkQueueEntry work = work_queue.front();
667 work_queue.pop_front();
670 case WORK_CHECK_CURR_ACTION: {
671 ModelAction *act = work.action;
672 bool update = false; /* update this location's release seq's */
673 bool update_all = false; /* update all release seq's */
675 if (process_thread_action(curr))
678 if (act->is_read() && process_read(act, second_part_of_rmw))
681 if (act->is_write() && process_write(act))
684 if (act->is_mutex_op() && process_mutex(act))
688 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
690 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
693 case WORK_CHECK_RELEASE_SEQ:
694 resolve_release_sequences(work.location, &work_queue);
696 case WORK_CHECK_MO_EDGES: {
697 /** @todo Complete verification of work_queue */
698 ModelAction *act = work.action;
699 bool updated = false;
701 if (act->is_read()) {
702 const ModelAction *rf = act->get_reads_from();
703 if (rf != NULL && r_modification_order(act, rf))
706 if (act->is_write()) {
707 if (w_modification_order(act))
710 mo_graph->commitChanges();
713 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
722 check_curr_backtracking(curr);
724 set_backtracking(curr);
726 return get_next_thread(curr);
730 * Complete a THREAD_JOIN operation, by synchronizing with the THREAD_FINISH
731 * operation from the Thread it is joining with. Must be called after the
732 * completion of the Thread in question.
733 * @param join The THREAD_JOIN action
735 void ModelChecker::do_complete_join(ModelAction *join)
737 Thread *blocking = (Thread *)join->get_location();
738 ModelAction *act = get_last_action(blocking->get_id());
739 join->synchronize_with(act);
742 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
743 Node *currnode = curr->get_node();
744 Node *parnode = currnode->get_parent();
746 if ((!parnode->backtrack_empty() ||
747 !currnode->read_from_empty() ||
748 !currnode->future_value_empty() ||
749 !currnode->promise_empty() ||
750 !currnode->relseq_break_empty())
751 && (!priv->next_backtrack ||
752 *curr > *priv->next_backtrack)) {
753 priv->next_backtrack = curr;
757 bool ModelChecker::promises_expired() {
758 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
759 Promise *promise = (*promises)[promise_index];
760 if (promise->get_expiration()<priv->used_sequence_numbers) {
767 /** @return whether the current partial trace must be a prefix of a
769 bool ModelChecker::isfeasibleprefix() {
770 return promises->size() == 0 && pending_rel_seqs->size() == 0;
773 /** @return whether the current partial trace is feasible. */
774 bool ModelChecker::isfeasible() {
775 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
776 DEBUG("Infeasible: RMW violation\n");
778 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
781 /** @return whether the current partial trace is feasible other than
782 * multiple RMW reading from the same store. */
783 bool ModelChecker::isfeasibleotherthanRMW() {
785 if (mo_graph->checkForCycles())
786 DEBUG("Infeasible: modification order cycles\n");
788 DEBUG("Infeasible: failed promise\n");
790 DEBUG("Infeasible: too many reads\n");
791 if (bad_synchronization)
792 DEBUG("Infeasible: bad synchronization ordering\n");
793 if (promises_expired())
794 DEBUG("Infeasible: promises expired\n");
796 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
799 /** Returns whether the current completed trace is feasible. */
800 bool ModelChecker::isfinalfeasible() {
801 if (DBG_ENABLED() && promises->size() != 0)
802 DEBUG("Infeasible: unrevolved promises\n");
804 return isfeasible() && promises->size() == 0;
807 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
808 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
809 ModelAction *lastread = get_last_action(act->get_tid());
810 lastread->process_rmw(act);
811 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
812 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
813 mo_graph->commitChanges();
819 * Checks whether a thread has read from the same write for too many times
820 * without seeing the effects of a later write.
823 * 1) there must a different write that we could read from that would satisfy the modification order,
824 * 2) we must have read from the same value in excess of maxreads times, and
825 * 3) that other write must have been in the reads_from set for maxreads times.
827 * If so, we decide that the execution is no longer feasible.
829 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
830 if (params.maxreads != 0) {
832 if (curr->get_node()->get_read_from_size() <= 1)
834 //Must make sure that execution is currently feasible... We could
835 //accidentally clear by rolling back
838 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
839 int tid = id_to_int(curr->get_tid());
842 if ((int)thrd_lists->size() <= tid)
844 action_list_t *list = &(*thrd_lists)[tid];
846 action_list_t::reverse_iterator rit = list->rbegin();
848 for (; (*rit) != curr; rit++)
850 /* go past curr now */
853 action_list_t::reverse_iterator ritcopy = rit;
854 //See if we have enough reads from the same value
856 for (; count < params.maxreads; rit++,count++) {
857 if (rit==list->rend())
859 ModelAction *act = *rit;
863 if (act->get_reads_from() != rf)
865 if (act->get_node()->get_read_from_size() <= 1)
868 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
870 const ModelAction * write = curr->get_node()->get_read_from_at(i);
872 //Need a different write
876 /* Test to see whether this is a feasible write to read from*/
877 mo_graph->startChanges();
878 r_modification_order(curr, write);
879 bool feasiblereadfrom = isfeasible();
880 mo_graph->rollbackChanges();
882 if (!feasiblereadfrom)
886 bool feasiblewrite = true;
887 //new we need to see if this write works for everyone
889 for (int loop = count; loop>0; loop--,rit++) {
890 ModelAction *act=*rit;
891 bool foundvalue = false;
892 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
893 if (act->get_node()->get_read_from_at(i)==write) {
899 feasiblewrite = false;
904 too_many_reads = true;
912 * Updates the mo_graph with the constraints imposed from the current
915 * Basic idea is the following: Go through each other thread and find
916 * the lastest action that happened before our read. Two cases:
918 * (1) The action is a write => that write must either occur before
919 * the write we read from or be the write we read from.
921 * (2) The action is a read => the write that that action read from
922 * must occur before the write we read from or be the same write.
924 * @param curr The current action. Must be a read.
925 * @param rf The action that curr reads from. Must be a write.
926 * @return True if modification order edges were added; false otherwise
928 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
930 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
933 ASSERT(curr->is_read());
935 /* Iterate over all threads */
936 for (i = 0; i < thrd_lists->size(); i++) {
937 /* Iterate over actions in thread, starting from most recent */
938 action_list_t *list = &(*thrd_lists)[i];
939 action_list_t::reverse_iterator rit;
940 for (rit = list->rbegin(); rit != list->rend(); rit++) {
941 ModelAction *act = *rit;
944 * Include at most one act per-thread that "happens
945 * before" curr. Don't consider reflexively.
947 if (act->happens_before(curr) && act != curr) {
948 if (act->is_write()) {
950 mo_graph->addEdge(act, rf);
954 const ModelAction *prevreadfrom = act->get_reads_from();
955 //if the previous read is unresolved, keep going...
956 if (prevreadfrom == NULL)
959 if (rf != prevreadfrom) {
960 mo_graph->addEdge(prevreadfrom, rf);
972 /** This method fixes up the modification order when we resolve a
973 * promises. The basic problem is that actions that occur after the
974 * read curr could not property add items to the modification order
977 * So for each thread, we find the earliest item that happens after
978 * the read curr. This is the item we have to fix up with additional
979 * constraints. If that action is write, we add a MO edge between
980 * the Action rf and that action. If the action is a read, we add a
981 * MO edge between the Action rf, and whatever the read accessed.
983 * @param curr is the read ModelAction that we are fixing up MO edges for.
984 * @param rf is the write ModelAction that curr reads from.
987 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
989 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
991 ASSERT(curr->is_read());
993 /* Iterate over all threads */
994 for (i = 0; i < thrd_lists->size(); i++) {
995 /* Iterate over actions in thread, starting from most recent */
996 action_list_t *list = &(*thrd_lists)[i];
997 action_list_t::reverse_iterator rit;
998 ModelAction *lastact = NULL;
1000 /* Find last action that happens after curr that is either not curr or a rmw */
1001 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1002 ModelAction *act = *rit;
1003 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1009 /* Include at most one act per-thread that "happens before" curr */
1010 if (lastact != NULL) {
1011 if (lastact==curr) {
1012 //Case 1: The resolved read is a RMW, and we need to make sure
1013 //that the write portion of the RMW mod order after rf
1015 mo_graph->addEdge(rf, lastact);
1016 } else if (lastact->is_read()) {
1017 //Case 2: The resolved read is a normal read and the next
1018 //operation is a read, and we need to make sure the value read
1019 //is mod ordered after rf
1021 const ModelAction *postreadfrom = lastact->get_reads_from();
1022 if (postreadfrom != NULL&&rf != postreadfrom)
1023 mo_graph->addEdge(rf, postreadfrom);
1025 //Case 3: The resolved read is a normal read and the next
1026 //operation is a write, and we need to make sure that the
1027 //write is mod ordered after rf
1029 mo_graph->addEdge(rf, lastact);
1037 * Updates the mo_graph with the constraints imposed from the current write.
1039 * Basic idea is the following: Go through each other thread and find
1040 * the lastest action that happened before our write. Two cases:
1042 * (1) The action is a write => that write must occur before
1045 * (2) The action is a read => the write that that action read from
1046 * must occur before the current write.
1048 * This method also handles two other issues:
1050 * (I) Sequential Consistency: Making sure that if the current write is
1051 * seq_cst, that it occurs after the previous seq_cst write.
1053 * (II) Sending the write back to non-synchronizing reads.
1055 * @param curr The current action. Must be a write.
1056 * @return True if modification order edges were added; false otherwise
1058 bool ModelChecker::w_modification_order(ModelAction *curr)
1060 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1063 ASSERT(curr->is_write());
1065 if (curr->is_seqcst()) {
1066 /* We have to at least see the last sequentially consistent write,
1067 so we are initialized. */
1068 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1069 if (last_seq_cst != NULL) {
1070 mo_graph->addEdge(last_seq_cst, curr);
1075 /* Iterate over all threads */
1076 for (i = 0; i < thrd_lists->size(); i++) {
1077 /* Iterate over actions in thread, starting from most recent */
1078 action_list_t *list = &(*thrd_lists)[i];
1079 action_list_t::reverse_iterator rit;
1080 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1081 ModelAction *act = *rit;
1084 * 1) If RMW and it actually read from something, then we
1085 * already have all relevant edges, so just skip to next
1088 * 2) If RMW and it didn't read from anything, we should
1089 * whatever edge we can get to speed up convergence.
1091 * 3) If normal write, we need to look at earlier actions, so
1092 * continue processing list.
1094 if (curr->is_rmw()) {
1095 if (curr->get_reads_from()!=NULL)
1104 * Include at most one act per-thread that "happens
1107 if (act->happens_before(curr)) {
1109 * Note: if act is RMW, just add edge:
1111 * The following edge should be handled elsewhere:
1112 * readfrom(act) --mo--> act
1114 if (act->is_write())
1115 mo_graph->addEdge(act, curr);
1116 else if (act->is_read()) {
1117 //if previous read accessed a null, just keep going
1118 if (act->get_reads_from() == NULL)
1120 mo_graph->addEdge(act->get_reads_from(), curr);
1124 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1125 !act->same_thread(curr)) {
1126 /* We have an action that:
1127 (1) did not happen before us
1128 (2) is a read and we are a write
1129 (3) cannot synchronize with us
1130 (4) is in a different thread
1132 that read could potentially read from our write.
1134 if (thin_air_constraint_may_allow(curr, act)) {
1136 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1137 struct PendingFutureValue pfv = {curr->get_value(),curr->get_seq_number()+params.maxfuturedelay,act};
1138 futurevalues->push_back(pfv);
1148 /** Arbitrary reads from the future are not allowed. Section 29.3
1149 * part 9 places some constraints. This method checks one result of constraint
1150 * constraint. Others require compiler support. */
1151 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1152 if (!writer->is_rmw())
1155 if (!reader->is_rmw())
1158 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1159 if (search == reader)
1161 if (search->get_tid() == reader->get_tid() &&
1162 search->happens_before(reader))
1170 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1171 * The ModelAction under consideration is expected to be taking part in
1172 * release/acquire synchronization as an object of the "reads from" relation.
1173 * Note that this can only provide release sequence support for RMW chains
1174 * which do not read from the future, as those actions cannot be traced until
1175 * their "promise" is fulfilled. Similarly, we may not even establish the
1176 * presence of a release sequence with certainty, as some modification order
1177 * constraints may be decided further in the future. Thus, this function
1178 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1179 * and a boolean representing certainty.
1181 * @todo Finish lazy updating, when promises are fulfilled in the future
1182 * @param rf The action that might be part of a release sequence. Must be a
1184 * @param release_heads A pass-by-reference style return parameter. After
1185 * execution of this function, release_heads will contain the heads of all the
1186 * relevant release sequences, if any exists with certainty
1187 * @param pending A pass-by-reference style return parameter which is only used
1188 * when returning false (i.e., uncertain). Returns most information regarding
1189 * an uncertain release sequence, including any write operations that might
1190 * break the sequence.
1191 * @return true, if the ModelChecker is certain that release_heads is complete;
1194 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1195 rel_heads_list_t *release_heads,
1196 struct release_seq *pending) const
1198 /* Only check for release sequences if there are no cycles */
1199 if (mo_graph->checkForCycles())
1203 ASSERT(rf->is_write());
1205 if (rf->is_release())
1206 release_heads->push_back(rf);
1208 break; /* End of RMW chain */
1210 /** @todo Need to be smarter here... In the linux lock
1211 * example, this will run to the beginning of the program for
1213 /** @todo The way to be smarter here is to keep going until 1
1214 * thread has a release preceded by an acquire and you've seen
1217 /* acq_rel RMW is a sufficient stopping condition */
1218 if (rf->is_acquire() && rf->is_release())
1219 return true; /* complete */
1221 rf = rf->get_reads_from();
1224 /* read from future: need to settle this later */
1226 return false; /* incomplete */
1229 if (rf->is_release())
1230 return true; /* complete */
1232 /* else relaxed write; check modification order for contiguous subsequence
1233 * -> rf must be same thread as release */
1234 int tid = id_to_int(rf->get_tid());
1235 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(rf->get_location());
1236 action_list_t *list = &(*thrd_lists)[tid];
1237 action_list_t::const_reverse_iterator rit;
1239 /* Find rf in the thread list */
1240 rit = std::find(list->rbegin(), list->rend(), rf);
1241 ASSERT(rit != list->rend());
1243 /* Find the last write/release */
1244 for (; rit != list->rend(); rit++)
1245 if ((*rit)->is_release())
1247 if (rit == list->rend()) {
1248 /* No write-release in this thread */
1249 return true; /* complete */
1251 ModelAction *release = *rit;
1253 ASSERT(rf->same_thread(release));
1255 pending->writes.clear();
1257 bool certain = true;
1258 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1259 if (id_to_int(rf->get_tid()) == (int)i)
1261 list = &(*thrd_lists)[i];
1263 /* Can we ensure no future writes from this thread may break
1264 * the release seq? */
1265 bool future_ordered = false;
1267 ModelAction *last = get_last_action(int_to_id(i));
1268 Thread *th = get_thread(int_to_id(i));
1269 if ((last && rf->happens_before(last)) ||
1270 !scheduler->is_enabled(th) ||
1272 future_ordered = true;
1274 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1275 const ModelAction *act = *rit;
1276 /* Reach synchronization -> this thread is complete */
1277 if (act->happens_before(release))
1279 if (rf->happens_before(act)) {
1280 future_ordered = true;
1284 /* Only writes can break release sequences */
1285 if (!act->is_write())
1288 /* Check modification order */
1289 if (mo_graph->checkReachable(rf, act)) {
1290 /* rf --mo--> act */
1291 future_ordered = true;
1294 if (mo_graph->checkReachable(act, release))
1295 /* act --mo--> release */
1297 if (mo_graph->checkReachable(release, act) &&
1298 mo_graph->checkReachable(act, rf)) {
1299 /* release --mo-> act --mo--> rf */
1300 return true; /* complete */
1302 /* act may break release sequence */
1303 pending->writes.push_back(act);
1306 if (!future_ordered)
1307 certain = false; /* This thread is uncertain */
1311 release_heads->push_back(release);
1312 pending->writes.clear();
1314 pending->release = release;
1321 * A public interface for getting the release sequence head(s) with which a
1322 * given ModelAction must synchronize. This function only returns a non-empty
1323 * result when it can locate a release sequence head with certainty. Otherwise,
1324 * it may mark the internal state of the ModelChecker so that it will handle
1325 * the release sequence at a later time, causing @a act to update its
1326 * synchronization at some later point in execution.
1327 * @param act The 'acquire' action that may read from a release sequence
1328 * @param release_heads A pass-by-reference return parameter. Will be filled
1329 * with the head(s) of the release sequence(s), if they exists with certainty.
1330 * @see ModelChecker::release_seq_heads
1332 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1334 const ModelAction *rf = act->get_reads_from();
1335 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1336 sequence->acquire = act;
1338 if (!release_seq_heads(rf, release_heads, sequence)) {
1339 /* add act to 'lazy checking' list */
1340 pending_rel_seqs->push_back(sequence);
1342 snapshot_free(sequence);
1347 * Attempt to resolve all stashed operations that might synchronize with a
1348 * release sequence for a given location. This implements the "lazy" portion of
1349 * determining whether or not a release sequence was contiguous, since not all
1350 * modification order information is present at the time an action occurs.
1352 * @param location The location/object that should be checked for release
1353 * sequence resolutions. A NULL value means to check all locations.
1354 * @param work_queue The work queue to which to add work items as they are
1356 * @return True if any updates occurred (new synchronization, new mo_graph
1359 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1361 bool updated = false;
1362 std::vector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
1363 while (it != pending_rel_seqs->end()) {
1364 struct release_seq *pending = *it;
1365 ModelAction *act = pending->acquire;
1367 /* Only resolve sequences on the given location, if provided */
1368 if (location && act->get_location() != location) {
1373 const ModelAction *rf = act->get_reads_from();
1374 rel_heads_list_t release_heads;
1376 complete = release_seq_heads(rf, &release_heads, pending);
1377 for (unsigned int i = 0; i < release_heads.size(); i++) {
1378 if (!act->has_synchronized_with(release_heads[i])) {
1379 if (act->synchronize_with(release_heads[i]))
1382 set_bad_synchronization();
1387 /* Re-check all pending release sequences */
1388 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1389 /* Re-check act for mo_graph edges */
1390 work_queue->push_back(MOEdgeWorkEntry(act));
1392 /* propagate synchronization to later actions */
1393 action_list_t::reverse_iterator rit = action_trace->rbegin();
1394 for (; (*rit) != act; rit++) {
1395 ModelAction *propagate = *rit;
1396 if (act->happens_before(propagate)) {
1397 propagate->synchronize_with(act);
1398 /* Re-check 'propagate' for mo_graph edges */
1399 work_queue->push_back(MOEdgeWorkEntry(propagate));
1404 it = pending_rel_seqs->erase(it);
1405 snapshot_free(pending);
1411 // If we resolved promises or data races, see if we have realized a data race.
1412 if (checkDataRaces()) {
1420 * Performs various bookkeeping operations for the current ModelAction. For
1421 * instance, adds action to the per-object, per-thread action vector and to the
1422 * action trace list of all thread actions.
1424 * @param act is the ModelAction to add.
1426 void ModelChecker::add_action_to_lists(ModelAction *act)
1428 int tid = id_to_int(act->get_tid());
1429 action_trace->push_back(act);
1431 obj_map->get_safe_ptr(act->get_location())->push_back(act);
1433 std::vector<action_list_t> *vec = obj_thrd_map->get_safe_ptr(act->get_location());
1434 if (tid >= (int)vec->size())
1435 vec->resize(priv->next_thread_id);
1436 (*vec)[tid].push_back(act);
1438 if ((int)thrd_last_action->size() <= tid)
1439 thrd_last_action->resize(get_num_threads());
1440 (*thrd_last_action)[tid] = act;
1444 * @brief Get the last action performed by a particular Thread
1445 * @param tid The thread ID of the Thread in question
1446 * @return The last action in the thread
1448 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1450 int threadid = id_to_int(tid);
1451 if (threadid < (int)thrd_last_action->size())
1452 return (*thrd_last_action)[id_to_int(tid)];
1458 * Gets the last memory_order_seq_cst write (in the total global sequence)
1459 * performed on a particular object (i.e., memory location), not including the
1461 * @param curr The current ModelAction; also denotes the object location to
1463 * @return The last seq_cst write
1465 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1467 void *location = curr->get_location();
1468 action_list_t *list = obj_map->get_safe_ptr(location);
1469 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1470 action_list_t::reverse_iterator rit;
1471 for (rit = list->rbegin(); rit != list->rend(); rit++)
1472 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1478 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1479 * location). This function identifies the mutex according to the current
1480 * action, which is presumed to perform on the same mutex.
1481 * @param curr The current ModelAction; also denotes the object location to
1483 * @return The last unlock operation
1485 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1487 void *location = curr->get_location();
1488 action_list_t *list = obj_map->get_safe_ptr(location);
1489 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1490 action_list_t::reverse_iterator rit;
1491 for (rit = list->rbegin(); rit != list->rend(); rit++)
1492 if ((*rit)->is_unlock())
1497 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1499 ModelAction *parent = get_last_action(tid);
1501 parent = get_thread(tid)->get_creation();
1506 * Returns the clock vector for a given thread.
1507 * @param tid The thread whose clock vector we want
1508 * @return Desired clock vector
1510 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1512 return get_parent_action(tid)->get_cv();
1516 * Resolve a set of Promises with a current write. The set is provided in the
1517 * Node corresponding to @a write.
1518 * @param write The ModelAction that is fulfilling Promises
1519 * @return True if promises were resolved; false otherwise
1521 bool ModelChecker::resolve_promises(ModelAction *write)
1523 bool resolved = false;
1524 std::vector<thread_id_t> threads_to_check;
1526 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1527 Promise *promise = (*promises)[promise_index];
1528 if (write->get_node()->get_promise(i)) {
1529 ModelAction *read = promise->get_action();
1530 if (read->is_rmw()) {
1531 mo_graph->addRMWEdge(write, read);
1533 read->read_from(write);
1534 //First fix up the modification order for actions that happened
1536 r_modification_order(read, write);
1537 //Next fix up the modification order for actions that happened
1539 post_r_modification_order(read, write);
1540 //Make sure the promise's value matches the write's value
1541 ASSERT(promise->get_value() == write->get_value());
1544 promises->erase(promises->begin() + promise_index);
1545 threads_to_check.push_back(read->get_tid());
1552 //Check whether reading these writes has made threads unable to
1555 for(unsigned int i=0;i<threads_to_check.size();i++)
1556 mo_check_promises(threads_to_check[i], write);
1562 * Compute the set of promises that could potentially be satisfied by this
1563 * action. Note that the set computation actually appears in the Node, not in
1565 * @param curr The ModelAction that may satisfy promises
1567 void ModelChecker::compute_promises(ModelAction *curr)
1569 for (unsigned int i = 0; i < promises->size(); i++) {
1570 Promise *promise = (*promises)[i];
1571 const ModelAction *act = promise->get_action();
1572 if (!act->happens_before(curr) &&
1574 !act->could_synchronize_with(curr) &&
1575 !act->same_thread(curr) &&
1576 promise->get_value() == curr->get_value()) {
1577 curr->get_node()->set_promise(i);
1582 /** Checks promises in response to change in ClockVector Threads. */
1583 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
1585 for (unsigned int i = 0; i < promises->size(); i++) {
1586 Promise *promise = (*promises)[i];
1587 const ModelAction *act = promise->get_action();
1588 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1589 merge_cv->synchronized_since(act)) {
1590 if (promise->increment_threads(tid)) {
1591 //Promise has failed
1592 failed_promise = true;
1599 /** Checks promises in response to addition to modification order for threads.
1601 * pthread is the thread that performed the read that created the promise
1603 * pread is the read that created the promise
1605 * pwrite is either the first write to same location as pread by
1606 * pthread that is sequenced after pread or the value read by the
1607 * first read to the same lcoation as pread by pthread that is
1608 * sequenced after pread..
1610 * 1. If tid=pthread, then we check what other threads are reachable
1611 * through the mode order starting with pwrite. Those threads cannot
1612 * perform a write that will resolve the promise due to modification
1613 * order constraints.
1615 * 2. If the tid is not pthread, we check whether pwrite can reach the
1616 * action write through the modification order. If so, that thread
1617 * cannot perform a future write that will resolve the promise due to
1618 * modificatin order constraints.
1620 * @parem tid The thread that either read from the model action
1621 * write, or actually did the model action write.
1623 * @parem write The ModelAction representing the relevant write.
1626 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
1627 void * location = write->get_location();
1628 for (unsigned int i = 0; i < promises->size(); i++) {
1629 Promise *promise = (*promises)[i];
1630 const ModelAction *act = promise->get_action();
1632 //Is this promise on the same location?
1633 if ( act->get_location() != location )
1636 //same thread as the promise
1637 if ( act->get_tid()==tid ) {
1639 //do we have a pwrite for the promise, if not, set it
1640 if (promise->get_write() == NULL ) {
1641 promise->set_write(write);
1643 if (mo_graph->checkPromise(write, promise)) {
1644 failed_promise = true;
1649 //Don't do any lookups twice for the same thread
1650 if (promise->has_sync_thread(tid))
1653 if (mo_graph->checkReachable(promise->get_write(), write)) {
1654 if (promise->increment_threads(tid)) {
1655 failed_promise = true;
1663 * Compute the set of writes that may break the current pending release
1664 * sequence. This information is extracted from previou release sequence
1667 * @param curr The current ModelAction. Must be a release sequence fixup
1670 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
1672 if (pending_rel_seqs->empty())
1675 struct release_seq *pending = pending_rel_seqs->back();
1676 for (unsigned int i = 0; i < pending->writes.size(); i++) {
1677 const ModelAction *write = pending->writes[i];
1678 curr->get_node()->add_relseq_break(write);
1681 /* NULL means don't break the sequence; just synchronize */
1682 curr->get_node()->add_relseq_break(NULL);
1686 * Build up an initial set of all past writes that this 'read' action may read
1687 * from. This set is determined by the clock vector's "happens before"
1689 * @param curr is the current ModelAction that we are exploring; it must be a
1692 void ModelChecker::build_reads_from_past(ModelAction *curr)
1694 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1696 ASSERT(curr->is_read());
1698 ModelAction *last_seq_cst = NULL;
1700 /* Track whether this object has been initialized */
1701 bool initialized = false;
1703 if (curr->is_seqcst()) {
1704 last_seq_cst = get_last_seq_cst(curr);
1705 /* We have to at least see the last sequentially consistent write,
1706 so we are initialized. */
1707 if (last_seq_cst != NULL)
1711 /* Iterate over all threads */
1712 for (i = 0; i < thrd_lists->size(); i++) {
1713 /* Iterate over actions in thread, starting from most recent */
1714 action_list_t *list = &(*thrd_lists)[i];
1715 action_list_t::reverse_iterator rit;
1716 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1717 ModelAction *act = *rit;
1719 /* Only consider 'write' actions */
1720 if (!act->is_write() || act == curr)
1723 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1724 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
1725 DEBUG("Adding action to may_read_from:\n");
1726 if (DBG_ENABLED()) {
1730 curr->get_node()->add_read_from(act);
1733 /* Include at most one act per-thread that "happens before" curr */
1734 if (act->happens_before(curr)) {
1742 /** @todo Need a more informative way of reporting errors. */
1743 printf("ERROR: may read from uninitialized atomic\n");
1746 if (DBG_ENABLED() || !initialized) {
1747 printf("Reached read action:\n");
1749 printf("Printing may_read_from\n");
1750 curr->get_node()->print_may_read_from();
1751 printf("End printing may_read_from\n");
1754 ASSERT(initialized);
1757 static void print_list(action_list_t *list)
1759 action_list_t::iterator it;
1761 printf("---------------------------------------------------------------------\n");
1764 for (it = list->begin(); it != list->end(); it++) {
1767 printf("---------------------------------------------------------------------\n");
1770 #if SUPPORT_MOD_ORDER_DUMP
1771 void ModelChecker::dumpGraph(char *filename) {
1773 sprintf(buffer, "%s.dot",filename);
1774 FILE *file=fopen(buffer, "w");
1775 fprintf(file, "digraph %s {\n",filename);
1776 mo_graph->dumpNodes(file);
1777 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
1779 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
1780 ModelAction *action=*it;
1781 if (action->is_read()) {
1782 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
1783 if (action->get_reads_from()!=NULL)
1784 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
1786 if (thread_array[action->get_tid()] != NULL) {
1787 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
1790 thread_array[action->get_tid()]=action;
1792 fprintf(file,"}\n");
1793 model_free(thread_array);
1798 void ModelChecker::print_summary()
1801 printf("Number of executions: %d\n", num_executions);
1802 printf("Number of feasible executions: %d\n", num_feasible_executions);
1803 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
1805 #if SUPPORT_MOD_ORDER_DUMP
1807 char buffername[100];
1808 sprintf(buffername, "exec%04u", num_executions);
1809 mo_graph->dumpGraphToFile(buffername);
1810 sprintf(buffername, "graph%04u", num_executions);
1811 dumpGraph(buffername);
1814 if (!isfinalfeasible())
1815 printf("INFEASIBLE EXECUTION!\n");
1816 print_list(action_trace);
1821 * Add a Thread to the system for the first time. Should only be called once
1823 * @param t The Thread to add
1825 void ModelChecker::add_thread(Thread *t)
1827 thread_map->put(id_to_int(t->get_id()), t);
1828 scheduler->add_thread(t);
1832 * Removes a thread from the scheduler.
1833 * @param the thread to remove.
1835 void ModelChecker::remove_thread(Thread *t)
1837 scheduler->remove_thread(t);
1841 * @brief Get a Thread reference by its ID
1842 * @param tid The Thread's ID
1843 * @return A Thread reference
1845 Thread * ModelChecker::get_thread(thread_id_t tid) const
1847 return thread_map->get(id_to_int(tid));
1851 * @brief Get a reference to the Thread in which a ModelAction was executed
1852 * @param act The ModelAction
1853 * @return A Thread reference
1855 Thread * ModelChecker::get_thread(ModelAction *act) const
1857 return get_thread(act->get_tid());
1861 * Switch from a user-context to the "master thread" context (a.k.a. system
1862 * context). This switch is made with the intention of exploring a particular
1863 * model-checking action (described by a ModelAction object). Must be called
1864 * from a user-thread context.
1866 * @param act The current action that will be explored. May be NULL only if
1867 * trace is exiting via an assertion (see ModelChecker::set_assert and
1868 * ModelChecker::has_asserted).
1869 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
1871 int ModelChecker::switch_to_master(ModelAction *act)
1874 Thread *old = thread_current();
1875 set_current_action(act);
1876 old->set_state(THREAD_READY);
1877 return Thread::swap(old, &system_context);
1881 * Takes the next step in the execution, if possible.
1882 * @return Returns true (success) if a step was taken and false otherwise.
1884 bool ModelChecker::take_step() {
1888 Thread *curr = thread_current();
1890 if (curr->get_state() == THREAD_READY) {
1891 ASSERT(priv->current_action);
1893 priv->nextThread = check_current_action(priv->current_action);
1894 priv->current_action = NULL;
1896 if (curr->is_blocked() || curr->is_complete())
1897 scheduler->remove_thread(curr);
1902 Thread *next = scheduler->next_thread(priv->nextThread);
1904 /* Infeasible -> don't take any more steps */
1908 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
1909 next ? id_to_int(next->get_id()) : -1);
1911 /* next == NULL -> don't take any more steps */
1915 next->set_state(THREAD_RUNNING);
1917 if (next->get_pending() != NULL) {
1918 /* restart a pending action */
1919 set_current_action(next->get_pending());
1920 next->set_pending(NULL);
1921 next->set_state(THREAD_READY);
1925 /* Return false only if swap fails with an error */
1926 return (Thread::swap(&system_context, next) == 0);
1929 /** Runs the current execution until threre are no more steps to take. */
1930 void ModelChecker::finish_execution() {
1933 while (take_step());