8 #include "snapshot-interface.h"
10 #include "clockvector.h"
11 #include "cyclegraph.h"
16 #define INITIAL_THREAD_ID 0
20 /** @brief Constructor */
21 ModelChecker::ModelChecker(struct model_params params) :
22 /* Initialize default scheduler */
24 scheduler(new Scheduler()),
26 num_feasible_executions(0),
28 earliest_diverge(NULL),
29 action_trace(new action_list_t()),
30 thread_map(new HashTable<int, Thread *, int>()),
31 obj_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
32 lock_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
33 obj_thrd_map(new HashTable<void *, std::vector<action_list_t>, uintptr_t, 4 >()),
34 promises(new std::vector<Promise *>()),
35 futurevalues(new std::vector<struct PendingFutureValue>()),
36 pending_acq_rel_seq(new std::vector<ModelAction *>()),
37 thrd_last_action(new std::vector<ModelAction *>(1)),
38 node_stack(new NodeStack()),
39 mo_graph(new CycleGraph()),
40 failed_promise(false),
41 too_many_reads(false),
43 bad_synchronization(false)
45 /* Allocate this "size" on the snapshotting heap */
46 priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
47 /* First thread created will have id INITIAL_THREAD_ID */
48 priv->next_thread_id = INITIAL_THREAD_ID;
51 /** @brief Destructor */
52 ModelChecker::~ModelChecker()
54 for (int i = 0; i < get_num_threads(); i++)
55 delete thread_map->get(i);
60 delete lock_waiters_map;
63 for (unsigned int i = 0; i < promises->size(); i++)
64 delete (*promises)[i];
67 delete pending_acq_rel_seq;
69 delete thrd_last_action;
76 * Restores user program to initial state and resets all model-checker data
79 void ModelChecker::reset_to_initial_state()
81 DEBUG("+++ Resetting to initial state +++\n");
82 node_stack->reset_execution();
83 failed_promise = false;
84 too_many_reads = false;
85 bad_synchronization = false;
87 snapshotObject->backTrackBeforeStep(0);
90 /** @return a thread ID for a new Thread */
91 thread_id_t ModelChecker::get_next_id()
93 return priv->next_thread_id++;
96 /** @return the number of user threads created during this execution */
97 int ModelChecker::get_num_threads()
99 return priv->next_thread_id;
102 /** @return a sequence number for a new ModelAction */
103 modelclock_t ModelChecker::get_next_seq_num()
105 return ++priv->used_sequence_numbers;
109 * @brief Choose the next thread to execute.
111 * This function chooses the next thread that should execute. It can force the
112 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
113 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
114 * The model-checker may have no preference regarding the next thread (i.e.,
115 * when exploring a new execution ordering), in which case this will return
117 * @param curr The current ModelAction. This action might guide the choice of
119 * @return The next thread to run. If the model-checker has no preference, NULL.
121 Thread * ModelChecker::get_next_thread(ModelAction *curr)
126 /* Do not split atomic actions. */
128 return thread_current();
129 /* The THREAD_CREATE action points to the created Thread */
130 else if (curr->get_type() == THREAD_CREATE)
131 return (Thread *)curr->get_location();
134 /* Have we completed exploring the preselected path? */
138 /* Else, we are trying to replay an execution */
139 ModelAction *next = node_stack->get_next()->get_action();
141 if (next == diverge) {
142 Node *nextnode = next->get_node();
143 /* Reached divergence point */
144 if (nextnode->increment_promise()) {
145 /* The next node will try to satisfy a different set of promises. */
146 tid = next->get_tid();
147 node_stack->pop_restofstack(2);
148 } else if (nextnode->increment_read_from()) {
149 /* The next node will read from a different value. */
150 tid = next->get_tid();
151 node_stack->pop_restofstack(2);
152 } else if (nextnode->increment_future_value()) {
153 /* The next node will try to read from a different future value. */
154 tid = next->get_tid();
155 node_stack->pop_restofstack(2);
157 /* Make a different thread execute for next step */
158 Node *node = nextnode->get_parent();
159 tid = node->get_next_backtrack();
160 node_stack->pop_restofstack(1);
162 DEBUG("*** Divergence point ***\n");
165 tid = next->get_tid();
167 DEBUG("*** ModelChecker chose next thread = %d ***\n", tid);
168 ASSERT(tid != THREAD_ID_T_NONE);
169 return thread_map->get(id_to_int(tid));
173 * Queries the model-checker for more executions to explore and, if one
174 * exists, resets the model-checker state to execute a new execution.
176 * @return If there are more executions to explore, return true. Otherwise,
179 bool ModelChecker::next_execution()
184 if (isfinalfeasible()) {
185 printf("Earliest divergence point since last feasible execution:\n");
186 if (earliest_diverge)
187 earliest_diverge->print(false);
189 printf("(Not set)\n");
191 earliest_diverge = NULL;
192 num_feasible_executions++;
195 if (isfinalfeasible() || DBG_ENABLED())
198 if ((diverge = get_next_backtrack()) == NULL)
201 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
202 earliest_diverge=diverge;
205 printf("Next execution will diverge at:\n");
209 reset_to_initial_state();
213 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
215 switch (act->get_type()) {
219 /* linear search: from most recent to oldest */
220 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
221 action_list_t::reverse_iterator rit;
222 for (rit = list->rbegin(); rit != list->rend(); rit++) {
223 ModelAction *prev = *rit;
224 if (act->is_synchronizing(prev))
230 case ATOMIC_TRYLOCK: {
231 /* linear search: from most recent to oldest */
232 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
233 action_list_t::reverse_iterator rit;
234 for (rit = list->rbegin(); rit != list->rend(); rit++) {
235 ModelAction *prev = *rit;
236 if (act->is_conflicting_lock(prev))
241 case ATOMIC_UNLOCK: {
242 /* linear search: from most recent to oldest */
243 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
244 action_list_t::reverse_iterator rit;
245 for (rit = list->rbegin(); rit != list->rend(); rit++) {
246 ModelAction *prev = *rit;
247 if (!act->same_thread(prev)&&prev->is_failed_trylock())
258 /** This method find backtracking points where we should try to
259 * reorder the parameter ModelAction against.
261 * @param the ModelAction to find backtracking points for.
263 void ModelChecker::set_backtracking(ModelAction *act)
265 Thread *t = get_thread(act);
266 ModelAction * prev = get_last_conflict(act);
270 Node * node = prev->get_node()->get_parent();
272 int low_tid, high_tid;
273 if (node->is_enabled(t)) {
274 low_tid = id_to_int(act->get_tid());
275 high_tid = low_tid+1;
278 high_tid = get_num_threads();
281 for(int i = low_tid; i < high_tid; i++) {
282 thread_id_t tid = int_to_id(i);
283 if (!node->is_enabled(tid))
286 /* Check if this has been explored already */
287 if (node->has_been_explored(tid))
290 /* See if fairness allows */
291 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
293 for(int t=0;t<node->get_num_threads();t++) {
294 thread_id_t tother=int_to_id(t);
295 if (node->is_enabled(tother) && node->has_priority(tother)) {
304 /* Cache the latest backtracking point */
305 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
306 priv->next_backtrack = prev;
308 /* If this is a new backtracking point, mark the tree */
309 if (!node->set_backtrack(tid))
311 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
312 prev->get_tid(), t->get_id());
321 * Returns last backtracking point. The model checker will explore a different
322 * path for this point in the next execution.
323 * @return The ModelAction at which the next execution should diverge.
325 ModelAction * ModelChecker::get_next_backtrack()
327 ModelAction *next = priv->next_backtrack;
328 priv->next_backtrack = NULL;
333 * Processes a read or rmw model action.
334 * @param curr is the read model action to process.
335 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
336 * @return True if processing this read updates the mo_graph.
338 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
341 bool updated = false;
343 const ModelAction *reads_from = curr->get_node()->get_read_from();
344 if (reads_from != NULL) {
345 mo_graph->startChanges();
347 value = reads_from->get_value();
348 bool r_status = false;
350 if (!second_part_of_rmw) {
351 check_recency(curr, reads_from);
352 r_status = r_modification_order(curr, reads_from);
356 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
357 mo_graph->rollbackChanges();
358 too_many_reads = false;
362 curr->read_from(reads_from);
363 mo_graph->commitChanges();
365 } else if (!second_part_of_rmw) {
366 /* Read from future value */
367 value = curr->get_node()->get_future_value();
368 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
369 curr->read_from(NULL);
370 Promise *valuepromise = new Promise(curr, value, expiration);
371 promises->push_back(valuepromise);
373 get_thread(curr)->set_return_value(value);
379 * Processes a lock, trylock, or unlock model action. @param curr is
380 * the read model action to process.
382 * The try lock operation checks whether the lock is taken. If not,
383 * it falls to the normal lock operation case. If so, it returns
386 * The lock operation has already been checked that it is enabled, so
387 * it just grabs the lock and synchronizes with the previous unlock.
389 * The unlock operation has to re-enable all of the threads that are
390 * waiting on the lock.
392 * @return True if synchronization was updated; false otherwise
394 bool ModelChecker::process_mutex(ModelAction *curr) {
395 std::mutex *mutex = (std::mutex *)curr->get_location();
396 struct std::mutex_state *state = mutex->get_state();
397 switch (curr->get_type()) {
398 case ATOMIC_TRYLOCK: {
399 bool success = !state->islocked;
400 curr->set_try_lock(success);
402 get_thread(curr)->set_return_value(0);
405 get_thread(curr)->set_return_value(1);
407 //otherwise fall into the lock case
409 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
410 printf("Lock access before initialization\n");
413 state->islocked = true;
414 ModelAction *unlock = get_last_unlock(curr);
415 //synchronize with the previous unlock statement
416 if (unlock != NULL) {
417 curr->synchronize_with(unlock);
422 case ATOMIC_UNLOCK: {
424 state->islocked = false;
425 //wake up the other threads
426 action_list_t *waiters = lock_waiters_map->get_safe_ptr(curr->get_location());
427 //activate all the waiting threads
428 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
429 scheduler->wake(get_thread(*rit));
441 * Process a write ModelAction
442 * @param curr The ModelAction to process
443 * @return True if the mo_graph was updated or promises were resolved
445 bool ModelChecker::process_write(ModelAction *curr)
447 bool updated_mod_order = w_modification_order(curr);
448 bool updated_promises = resolve_promises(curr);
450 if (promises->size() == 0) {
451 for (unsigned int i = 0; i < futurevalues->size(); i++) {
452 struct PendingFutureValue pfv = (*futurevalues)[i];
453 if (pfv.act->get_node()->add_future_value(pfv.value, pfv.expiration) &&
454 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
455 priv->next_backtrack = pfv.act;
457 futurevalues->resize(0);
460 mo_graph->commitChanges();
461 get_thread(curr)->set_return_value(VALUE_NONE);
462 return updated_mod_order || updated_promises;
466 * @brief Process the current action for thread-related activity
468 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
469 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
470 * synchronization, etc. This function is a no-op for non-THREAD actions
471 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
473 * @param curr The current action
474 * @return True if synchronization was updated
476 bool ModelChecker::process_thread_action(ModelAction *curr)
478 bool synchronized = false;
480 switch (curr->get_type()) {
481 case THREAD_CREATE: {
482 Thread *th = (Thread *)curr->get_location();
483 th->set_creation(curr);
487 Thread *blocking = (Thread *)curr->get_location();
488 ModelAction *act = get_last_action(blocking->get_id());
489 curr->synchronize_with(act);
493 case THREAD_FINISH: {
494 Thread *th = get_thread(curr);
495 while (!th->wait_list_empty()) {
496 ModelAction *act = th->pop_wait_list();
497 scheduler->wake(get_thread(act));
503 check_promises(NULL, curr->get_cv());
514 * Initialize the current action by performing one or more of the following
515 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
516 * in the NodeStack, manipulating backtracking sets, allocating and
517 * initializing clock vectors, and computing the promises to fulfill.
519 * @param curr The current action, as passed from the user context; may be
520 * freed/invalidated after the execution of this function
521 * @return The current action, as processed by the ModelChecker. Is only the
522 * same as the parameter @a curr if this is a newly-explored action.
524 ModelAction * ModelChecker::initialize_curr_action(ModelAction *curr)
526 ModelAction *newcurr;
528 if (curr->is_rmwc() || curr->is_rmw()) {
529 newcurr = process_rmw(curr);
532 if (newcurr->is_rmw())
533 compute_promises(newcurr);
537 curr->set_seq_number(get_next_seq_num());
539 newcurr = node_stack->explore_action(curr, scheduler->get_enabled());
541 /* First restore type and order in case of RMW operation */
543 newcurr->copy_typeandorder(curr);
545 ASSERT(curr->get_location() == newcurr->get_location());
546 newcurr->copy_from_new(curr);
548 /* Discard duplicate ModelAction; use action from NodeStack */
551 /* Always compute new clock vector */
552 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
556 /* Always compute new clock vector */
557 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
559 * Perform one-time actions when pushing new ModelAction onto
562 if (newcurr->is_write())
563 compute_promises(newcurr);
569 * @brief Check whether a model action is enabled.
571 * Checks whether a lock or join operation would be successful (i.e., is the
572 * lock already locked, or is the joined thread already complete). If not, put
573 * the action in a waiter list.
575 * @param curr is the ModelAction to check whether it is enabled.
576 * @return a bool that indicates whether the action is enabled.
578 bool ModelChecker::check_action_enabled(ModelAction *curr) {
579 if (curr->is_lock()) {
580 std::mutex * lock = (std::mutex *)curr->get_location();
581 struct std::mutex_state * state = lock->get_state();
582 if (state->islocked) {
583 //Stick the action in the appropriate waiting queue
584 lock_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
587 } else if (curr->get_type() == THREAD_JOIN) {
588 Thread *blocking = (Thread *)curr->get_location();
589 if (!blocking->is_complete()) {
590 blocking->push_wait_list(curr);
599 * This is the heart of the model checker routine. It performs model-checking
600 * actions corresponding to a given "current action." Among other processes, it
601 * calculates reads-from relationships, updates synchronization clock vectors,
602 * forms a memory_order constraints graph, and handles replay/backtrack
603 * execution when running permutations of previously-observed executions.
605 * @param curr The current action to process
606 * @return The next Thread that must be executed. May be NULL if ModelChecker
607 * makes no choice (e.g., according to replay execution, combining RMW actions,
610 Thread * ModelChecker::check_current_action(ModelAction *curr)
614 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
616 if (!check_action_enabled(curr)) {
617 /* Make the execution look like we chose to run this action
618 * much later, when a lock/join can succeed */
619 get_current_thread()->set_pending(curr);
620 scheduler->sleep(get_current_thread());
621 return get_next_thread(NULL);
624 ModelAction *newcurr = initialize_curr_action(curr);
626 /* Add the action to lists before any other model-checking tasks */
627 if (!second_part_of_rmw)
628 add_action_to_lists(newcurr);
630 /* Build may_read_from set for newly-created actions */
631 if (curr == newcurr && curr->is_read())
632 build_reads_from_past(curr);
635 /* Initialize work_queue with the "current action" work */
636 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
638 while (!work_queue.empty()) {
639 WorkQueueEntry work = work_queue.front();
640 work_queue.pop_front();
643 case WORK_CHECK_CURR_ACTION: {
644 ModelAction *act = work.action;
645 bool update = false; /* update this location's release seq's */
646 bool update_all = false; /* update all release seq's */
648 if (process_thread_action(curr))
651 if (act->is_read() && process_read(act, second_part_of_rmw))
654 if (act->is_write() && process_write(act))
657 if (act->is_mutex_op() && process_mutex(act))
661 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
663 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
666 case WORK_CHECK_RELEASE_SEQ:
667 resolve_release_sequences(work.location, &work_queue);
669 case WORK_CHECK_MO_EDGES: {
670 /** @todo Complete verification of work_queue */
671 ModelAction *act = work.action;
672 bool updated = false;
674 if (act->is_read()) {
675 const ModelAction *rf = act->get_reads_from();
676 if (rf != NULL && r_modification_order(act, rf))
679 if (act->is_write()) {
680 if (w_modification_order(act))
683 mo_graph->commitChanges();
686 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
695 check_curr_backtracking(curr);
697 set_backtracking(curr);
699 return get_next_thread(curr);
702 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
703 Node *currnode = curr->get_node();
704 Node *parnode = currnode->get_parent();
706 if ((!parnode->backtrack_empty() ||
707 !currnode->read_from_empty() ||
708 !currnode->future_value_empty() ||
709 !currnode->promise_empty())
710 && (!priv->next_backtrack ||
711 *curr > *priv->next_backtrack)) {
712 priv->next_backtrack = curr;
716 bool ModelChecker::promises_expired() {
717 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
718 Promise *promise = (*promises)[promise_index];
719 if (promise->get_expiration()<priv->used_sequence_numbers) {
726 /** @return whether the current partial trace must be a prefix of a
728 bool ModelChecker::isfeasibleprefix() {
729 return promises->size() == 0 && pending_acq_rel_seq->size() == 0;
732 /** @return whether the current partial trace is feasible. */
733 bool ModelChecker::isfeasible() {
734 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
735 DEBUG("Infeasible: RMW violation\n");
737 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
740 /** @return whether the current partial trace is feasible other than
741 * multiple RMW reading from the same store. */
742 bool ModelChecker::isfeasibleotherthanRMW() {
744 if (mo_graph->checkForCycles())
745 DEBUG("Infeasible: modification order cycles\n");
747 DEBUG("Infeasible: failed promise\n");
749 DEBUG("Infeasible: too many reads\n");
750 if (bad_synchronization)
751 DEBUG("Infeasible: bad synchronization ordering\n");
752 if (promises_expired())
753 DEBUG("Infeasible: promises expired\n");
755 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
758 /** Returns whether the current completed trace is feasible. */
759 bool ModelChecker::isfinalfeasible() {
760 if (DBG_ENABLED() && promises->size() != 0)
761 DEBUG("Infeasible: unrevolved promises\n");
763 return isfeasible() && promises->size() == 0;
766 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
767 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
768 int tid = id_to_int(act->get_tid());
769 ModelAction *lastread = get_last_action(tid);
770 lastread->process_rmw(act);
771 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
772 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
773 mo_graph->commitChanges();
779 * Checks whether a thread has read from the same write for too many times
780 * without seeing the effects of a later write.
783 * 1) there must a different write that we could read from that would satisfy the modification order,
784 * 2) we must have read from the same value in excess of maxreads times, and
785 * 3) that other write must have been in the reads_from set for maxreads times.
787 * If so, we decide that the execution is no longer feasible.
789 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
790 if (params.maxreads != 0) {
792 if (curr->get_node()->get_read_from_size() <= 1)
794 //Must make sure that execution is currently feasible... We could
795 //accidentally clear by rolling back
798 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
799 int tid = id_to_int(curr->get_tid());
802 if ((int)thrd_lists->size() <= tid)
804 action_list_t *list = &(*thrd_lists)[tid];
806 action_list_t::reverse_iterator rit = list->rbegin();
808 for (; (*rit) != curr; rit++)
810 /* go past curr now */
813 action_list_t::reverse_iterator ritcopy = rit;
814 //See if we have enough reads from the same value
816 for (; count < params.maxreads; rit++,count++) {
817 if (rit==list->rend())
819 ModelAction *act = *rit;
823 if (act->get_reads_from() != rf)
825 if (act->get_node()->get_read_from_size() <= 1)
828 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
830 const ModelAction * write = curr->get_node()->get_read_from_at(i);
832 //Need a different write
836 /* Test to see whether this is a feasible write to read from*/
837 mo_graph->startChanges();
838 r_modification_order(curr, write);
839 bool feasiblereadfrom = isfeasible();
840 mo_graph->rollbackChanges();
842 if (!feasiblereadfrom)
846 bool feasiblewrite = true;
847 //new we need to see if this write works for everyone
849 for (int loop = count; loop>0; loop--,rit++) {
850 ModelAction *act=*rit;
851 bool foundvalue = false;
852 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
853 if (act->get_node()->get_read_from_at(i)==write) {
859 feasiblewrite = false;
864 too_many_reads = true;
872 * Updates the mo_graph with the constraints imposed from the current
875 * Basic idea is the following: Go through each other thread and find
876 * the lastest action that happened before our read. Two cases:
878 * (1) The action is a write => that write must either occur before
879 * the write we read from or be the write we read from.
881 * (2) The action is a read => the write that that action read from
882 * must occur before the write we read from or be the same write.
884 * @param curr The current action. Must be a read.
885 * @param rf The action that curr reads from. Must be a write.
886 * @return True if modification order edges were added; false otherwise
888 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
890 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
893 ASSERT(curr->is_read());
895 /* Iterate over all threads */
896 for (i = 0; i < thrd_lists->size(); i++) {
897 /* Iterate over actions in thread, starting from most recent */
898 action_list_t *list = &(*thrd_lists)[i];
899 action_list_t::reverse_iterator rit;
900 for (rit = list->rbegin(); rit != list->rend(); rit++) {
901 ModelAction *act = *rit;
904 * Include at most one act per-thread that "happens
905 * before" curr. Don't consider reflexively.
907 if (act->happens_before(curr) && act != curr) {
908 if (act->is_write()) {
910 mo_graph->addEdge(act, rf);
914 const ModelAction *prevreadfrom = act->get_reads_from();
915 if (prevreadfrom != NULL && rf != prevreadfrom) {
916 mo_graph->addEdge(prevreadfrom, rf);
928 /** This method fixes up the modification order when we resolve a
929 * promises. The basic problem is that actions that occur after the
930 * read curr could not property add items to the modification order
933 * So for each thread, we find the earliest item that happens after
934 * the read curr. This is the item we have to fix up with additional
935 * constraints. If that action is write, we add a MO edge between
936 * the Action rf and that action. If the action is a read, we add a
937 * MO edge between the Action rf, and whatever the read accessed.
939 * @param curr is the read ModelAction that we are fixing up MO edges for.
940 * @param rf is the write ModelAction that curr reads from.
943 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
945 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
947 ASSERT(curr->is_read());
949 /* Iterate over all threads */
950 for (i = 0; i < thrd_lists->size(); i++) {
951 /* Iterate over actions in thread, starting from most recent */
952 action_list_t *list = &(*thrd_lists)[i];
953 action_list_t::reverse_iterator rit;
954 ModelAction *lastact = NULL;
956 /* Find last action that happens after curr that is either not curr or a rmw */
957 for (rit = list->rbegin(); rit != list->rend(); rit++) {
958 ModelAction *act = *rit;
959 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
965 /* Include at most one act per-thread that "happens before" curr */
966 if (lastact != NULL) {
968 //Case 1: The resolved read is a RMW, and we need to make sure
969 //that the write portion of the RMW mod order after rf
971 mo_graph->addEdge(rf, lastact);
972 } else if (lastact->is_read()) {
973 //Case 2: The resolved read is a normal read and the next
974 //operation is a read, and we need to make sure the value read
975 //is mod ordered after rf
977 const ModelAction *postreadfrom = lastact->get_reads_from();
978 if (postreadfrom != NULL&&rf != postreadfrom)
979 mo_graph->addEdge(rf, postreadfrom);
981 //Case 3: The resolved read is a normal read and the next
982 //operation is a write, and we need to make sure that the
983 //write is mod ordered after rf
985 mo_graph->addEdge(rf, lastact);
993 * Updates the mo_graph with the constraints imposed from the current write.
995 * Basic idea is the following: Go through each other thread and find
996 * the lastest action that happened before our write. Two cases:
998 * (1) The action is a write => that write must occur before
1001 * (2) The action is a read => the write that that action read from
1002 * must occur before the current write.
1004 * This method also handles two other issues:
1006 * (I) Sequential Consistency: Making sure that if the current write is
1007 * seq_cst, that it occurs after the previous seq_cst write.
1009 * (II) Sending the write back to non-synchronizing reads.
1011 * @param curr The current action. Must be a write.
1012 * @return True if modification order edges were added; false otherwise
1014 bool ModelChecker::w_modification_order(ModelAction *curr)
1016 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1019 ASSERT(curr->is_write());
1021 if (curr->is_seqcst()) {
1022 /* We have to at least see the last sequentially consistent write,
1023 so we are initialized. */
1024 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1025 if (last_seq_cst != NULL) {
1026 mo_graph->addEdge(last_seq_cst, curr);
1031 /* Iterate over all threads */
1032 for (i = 0; i < thrd_lists->size(); i++) {
1033 /* Iterate over actions in thread, starting from most recent */
1034 action_list_t *list = &(*thrd_lists)[i];
1035 action_list_t::reverse_iterator rit;
1036 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1037 ModelAction *act = *rit;
1040 * If RMW, we already have all relevant edges,
1041 * so just skip to next thread.
1042 * If normal write, we need to look at earlier
1043 * actions, so continue processing list.
1052 * Include at most one act per-thread that "happens
1055 if (act->happens_before(curr)) {
1057 * Note: if act is RMW, just add edge:
1059 * The following edge should be handled elsewhere:
1060 * readfrom(act) --mo--> act
1062 if (act->is_write())
1063 mo_graph->addEdge(act, curr);
1064 else if (act->is_read() && act->get_reads_from() != NULL)
1065 mo_graph->addEdge(act->get_reads_from(), curr);
1068 } else if (act->is_read() && !act->is_synchronizing(curr) &&
1069 !act->same_thread(curr)) {
1070 /* We have an action that:
1071 (1) did not happen before us
1072 (2) is a read and we are a write
1073 (3) cannot synchronize with us
1074 (4) is in a different thread
1076 that read could potentially read from our write.
1078 if (thin_air_constraint_may_allow(curr, act)) {
1080 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1081 struct PendingFutureValue pfv = {curr->get_value(),curr->get_seq_number()+params.maxfuturedelay,act};
1082 futurevalues->push_back(pfv);
1092 /** Arbitrary reads from the future are not allowed. Section 29.3
1093 * part 9 places some constraints. This method checks one result of constraint
1094 * constraint. Others require compiler support. */
1095 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1096 if (!writer->is_rmw())
1099 if (!reader->is_rmw())
1102 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1103 if (search == reader)
1105 if (search->get_tid() == reader->get_tid() &&
1106 search->happens_before(reader))
1114 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1115 * The ModelAction under consideration is expected to be taking part in
1116 * release/acquire synchronization as an object of the "reads from" relation.
1117 * Note that this can only provide release sequence support for RMW chains
1118 * which do not read from the future, as those actions cannot be traced until
1119 * their "promise" is fulfilled. Similarly, we may not even establish the
1120 * presence of a release sequence with certainty, as some modification order
1121 * constraints may be decided further in the future. Thus, this function
1122 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1123 * and a boolean representing certainty.
1125 * @todo Finish lazy updating, when promises are fulfilled in the future
1126 * @param rf The action that might be part of a release sequence. Must be a
1128 * @param release_heads A pass-by-reference style return parameter. After
1129 * execution of this function, release_heads will contain the heads of all the
1130 * relevant release sequences, if any exists
1131 * @return true, if the ModelChecker is certain that release_heads is complete;
1134 bool ModelChecker::release_seq_head(const ModelAction *rf, rel_heads_list_t *release_heads) const
1136 /* Only check for release sequences if there are no cycles */
1137 if (mo_graph->checkForCycles())
1141 ASSERT(rf->is_write());
1143 if (rf->is_release())
1144 release_heads->push_back(rf);
1146 break; /* End of RMW chain */
1148 /** @todo Need to be smarter here... In the linux lock
1149 * example, this will run to the beginning of the program for
1151 /** @todo The way to be smarter here is to keep going until 1
1152 * thread has a release preceded by an acquire and you've seen
1155 /* acq_rel RMW is a sufficient stopping condition */
1156 if (rf->is_acquire() && rf->is_release())
1157 return true; /* complete */
1159 rf = rf->get_reads_from();
1162 /* read from future: need to settle this later */
1163 return false; /* incomplete */
1166 if (rf->is_release())
1167 return true; /* complete */
1169 /* else relaxed write; check modification order for contiguous subsequence
1170 * -> rf must be same thread as release */
1171 int tid = id_to_int(rf->get_tid());
1172 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(rf->get_location());
1173 action_list_t *list = &(*thrd_lists)[tid];
1174 action_list_t::const_reverse_iterator rit;
1176 /* Find rf in the thread list */
1177 rit = std::find(list->rbegin(), list->rend(), rf);
1178 ASSERT(rit != list->rend());
1180 /* Find the last write/release */
1181 for (; rit != list->rend(); rit++)
1182 if ((*rit)->is_release())
1184 if (rit == list->rend()) {
1185 /* No write-release in this thread */
1186 return true; /* complete */
1188 ModelAction *release = *rit;
1190 ASSERT(rf->same_thread(release));
1192 bool certain = true;
1193 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1194 if (id_to_int(rf->get_tid()) == (int)i)
1196 list = &(*thrd_lists)[i];
1198 /* Can we ensure no future writes from this thread may break
1199 * the release seq? */
1200 bool future_ordered = false;
1202 ModelAction *last = get_last_action(int_to_id(i));
1203 if (last && (rf->happens_before(last) ||
1204 last->get_type() == THREAD_FINISH))
1205 future_ordered = true;
1207 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1208 const ModelAction *act = *rit;
1209 /* Reach synchronization -> this thread is complete */
1210 if (act->happens_before(release))
1212 if (rf->happens_before(act)) {
1213 future_ordered = true;
1217 /* Only writes can break release sequences */
1218 if (!act->is_write())
1221 /* Check modification order */
1222 if (mo_graph->checkReachable(rf, act)) {
1223 /* rf --mo--> act */
1224 future_ordered = true;
1227 if (mo_graph->checkReachable(act, release))
1228 /* act --mo--> release */
1230 if (mo_graph->checkReachable(release, act) &&
1231 mo_graph->checkReachable(act, rf)) {
1232 /* release --mo-> act --mo--> rf */
1233 return true; /* complete */
1237 if (!future_ordered)
1238 return false; /* This thread is uncertain */
1242 release_heads->push_back(release);
1247 * A public interface for getting the release sequence head(s) with which a
1248 * given ModelAction must synchronize. This function only returns a non-empty
1249 * result when it can locate a release sequence head with certainty. Otherwise,
1250 * it may mark the internal state of the ModelChecker so that it will handle
1251 * the release sequence at a later time, causing @a act to update its
1252 * synchronization at some later point in execution.
1253 * @param act The 'acquire' action that may read from a release sequence
1254 * @param release_heads A pass-by-reference return parameter. Will be filled
1255 * with the head(s) of the release sequence(s), if they exists with certainty.
1256 * @see ModelChecker::release_seq_head
1258 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1260 const ModelAction *rf = act->get_reads_from();
1262 complete = release_seq_head(rf, release_heads);
1264 /* add act to 'lazy checking' list */
1265 pending_acq_rel_seq->push_back(act);
1270 * Attempt to resolve all stashed operations that might synchronize with a
1271 * release sequence for a given location. This implements the "lazy" portion of
1272 * determining whether or not a release sequence was contiguous, since not all
1273 * modification order information is present at the time an action occurs.
1275 * @param location The location/object that should be checked for release
1276 * sequence resolutions. A NULL value means to check all locations.
1277 * @param work_queue The work queue to which to add work items as they are
1279 * @return True if any updates occurred (new synchronization, new mo_graph
1282 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1284 bool updated = false;
1285 std::vector<ModelAction *>::iterator it = pending_acq_rel_seq->begin();
1286 while (it != pending_acq_rel_seq->end()) {
1287 ModelAction *act = *it;
1289 /* Only resolve sequences on the given location, if provided */
1290 if (location && act->get_location() != location) {
1295 const ModelAction *rf = act->get_reads_from();
1296 rel_heads_list_t release_heads;
1298 complete = release_seq_head(rf, &release_heads);
1299 for (unsigned int i = 0; i < release_heads.size(); i++) {
1300 if (!act->has_synchronized_with(release_heads[i])) {
1301 if (act->synchronize_with(release_heads[i]))
1304 set_bad_synchronization();
1309 /* Re-check act for mo_graph edges */
1310 work_queue->push_back(MOEdgeWorkEntry(act));
1312 /* propagate synchronization to later actions */
1313 action_list_t::reverse_iterator rit = action_trace->rbegin();
1314 for (; (*rit) != act; rit++) {
1315 ModelAction *propagate = *rit;
1316 if (act->happens_before(propagate)) {
1317 propagate->synchronize_with(act);
1318 /* Re-check 'propagate' for mo_graph edges */
1319 work_queue->push_back(MOEdgeWorkEntry(propagate));
1324 it = pending_acq_rel_seq->erase(it);
1329 // If we resolved promises or data races, see if we have realized a data race.
1330 if (checkDataRaces()) {
1338 * Performs various bookkeeping operations for the current ModelAction. For
1339 * instance, adds action to the per-object, per-thread action vector and to the
1340 * action trace list of all thread actions.
1342 * @param act is the ModelAction to add.
1344 void ModelChecker::add_action_to_lists(ModelAction *act)
1346 int tid = id_to_int(act->get_tid());
1347 action_trace->push_back(act);
1349 obj_map->get_safe_ptr(act->get_location())->push_back(act);
1351 std::vector<action_list_t> *vec = obj_thrd_map->get_safe_ptr(act->get_location());
1352 if (tid >= (int)vec->size())
1353 vec->resize(priv->next_thread_id);
1354 (*vec)[tid].push_back(act);
1356 if ((int)thrd_last_action->size() <= tid)
1357 thrd_last_action->resize(get_num_threads());
1358 (*thrd_last_action)[tid] = act;
1362 * @brief Get the last action performed by a particular Thread
1363 * @param tid The thread ID of the Thread in question
1364 * @return The last action in the thread
1366 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1368 int threadid = id_to_int(tid);
1369 if (threadid < (int)thrd_last_action->size())
1370 return (*thrd_last_action)[id_to_int(tid)];
1376 * Gets the last memory_order_seq_cst write (in the total global sequence)
1377 * performed on a particular object (i.e., memory location), not including the
1379 * @param curr The current ModelAction; also denotes the object location to
1381 * @return The last seq_cst write
1383 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1385 void *location = curr->get_location();
1386 action_list_t *list = obj_map->get_safe_ptr(location);
1387 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1388 action_list_t::reverse_iterator rit;
1389 for (rit = list->rbegin(); rit != list->rend(); rit++)
1390 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1396 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1397 * location). This function identifies the mutex according to the current
1398 * action, which is presumed to perform on the same mutex.
1399 * @param curr The current ModelAction; also denotes the object location to
1401 * @return The last unlock operation
1403 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1405 void *location = curr->get_location();
1406 action_list_t *list = obj_map->get_safe_ptr(location);
1407 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1408 action_list_t::reverse_iterator rit;
1409 for (rit = list->rbegin(); rit != list->rend(); rit++)
1410 if ((*rit)->is_unlock())
1415 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1417 ModelAction *parent = get_last_action(tid);
1419 parent = get_thread(tid)->get_creation();
1424 * Returns the clock vector for a given thread.
1425 * @param tid The thread whose clock vector we want
1426 * @return Desired clock vector
1428 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1430 return get_parent_action(tid)->get_cv();
1434 * Resolve a set of Promises with a current write. The set is provided in the
1435 * Node corresponding to @a write.
1436 * @param write The ModelAction that is fulfilling Promises
1437 * @return True if promises were resolved; false otherwise
1439 bool ModelChecker::resolve_promises(ModelAction *write)
1441 bool resolved = false;
1443 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1444 Promise *promise = (*promises)[promise_index];
1445 if (write->get_node()->get_promise(i)) {
1446 ModelAction *read = promise->get_action();
1447 if (read->is_rmw()) {
1448 mo_graph->addRMWEdge(write, read);
1450 read->read_from(write);
1451 //First fix up the modification order for actions that happened
1453 r_modification_order(read, write);
1454 //Next fix up the modification order for actions that happened
1456 post_r_modification_order(read, write);
1457 //Make sure the promise's value matches the write's value
1458 ASSERT(promise->get_value() == write->get_value());
1460 promises->erase(promises->begin() + promise_index);
1469 * Compute the set of promises that could potentially be satisfied by this
1470 * action. Note that the set computation actually appears in the Node, not in
1472 * @param curr The ModelAction that may satisfy promises
1474 void ModelChecker::compute_promises(ModelAction *curr)
1476 for (unsigned int i = 0; i < promises->size(); i++) {
1477 Promise *promise = (*promises)[i];
1478 const ModelAction *act = promise->get_action();
1479 if (!act->happens_before(curr) &&
1481 !act->is_synchronizing(curr) &&
1482 !act->same_thread(curr) &&
1483 promise->get_value() == curr->get_value()) {
1484 curr->get_node()->set_promise(i);
1489 /** Checks promises in response to change in ClockVector Threads. */
1490 void ModelChecker::check_promises(ClockVector *old_cv, ClockVector *merge_cv)
1492 for (unsigned int i = 0; i < promises->size(); i++) {
1493 Promise *promise = (*promises)[i];
1494 const ModelAction *act = promise->get_action();
1495 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1496 merge_cv->synchronized_since(act)) {
1497 //This thread is no longer able to send values back to satisfy the promise
1498 int num_synchronized_threads = promise->increment_threads();
1499 if (num_synchronized_threads == get_num_threads()) {
1500 //Promise has failed
1501 failed_promise = true;
1509 * Build up an initial set of all past writes that this 'read' action may read
1510 * from. This set is determined by the clock vector's "happens before"
1512 * @param curr is the current ModelAction that we are exploring; it must be a
1515 void ModelChecker::build_reads_from_past(ModelAction *curr)
1517 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1519 ASSERT(curr->is_read());
1521 ModelAction *last_seq_cst = NULL;
1523 /* Track whether this object has been initialized */
1524 bool initialized = false;
1526 if (curr->is_seqcst()) {
1527 last_seq_cst = get_last_seq_cst(curr);
1528 /* We have to at least see the last sequentially consistent write,
1529 so we are initialized. */
1530 if (last_seq_cst != NULL)
1534 /* Iterate over all threads */
1535 for (i = 0; i < thrd_lists->size(); i++) {
1536 /* Iterate over actions in thread, starting from most recent */
1537 action_list_t *list = &(*thrd_lists)[i];
1538 action_list_t::reverse_iterator rit;
1539 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1540 ModelAction *act = *rit;
1542 /* Only consider 'write' actions */
1543 if (!act->is_write() || act == curr)
1546 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1547 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
1548 DEBUG("Adding action to may_read_from:\n");
1549 if (DBG_ENABLED()) {
1553 curr->get_node()->add_read_from(act);
1556 /* Include at most one act per-thread that "happens before" curr */
1557 if (act->happens_before(curr)) {
1565 /** @todo Need a more informative way of reporting errors. */
1566 printf("ERROR: may read from uninitialized atomic\n");
1569 if (DBG_ENABLED() || !initialized) {
1570 printf("Reached read action:\n");
1572 printf("Printing may_read_from\n");
1573 curr->get_node()->print_may_read_from();
1574 printf("End printing may_read_from\n");
1577 ASSERT(initialized);
1580 static void print_list(action_list_t *list)
1582 action_list_t::iterator it;
1584 printf("---------------------------------------------------------------------\n");
1587 for (it = list->begin(); it != list->end(); it++) {
1590 printf("---------------------------------------------------------------------\n");
1593 #if SUPPORT_MOD_ORDER_DUMP
1594 void ModelChecker::dumpGraph(char *filename) {
1596 sprintf(buffer, "%s.dot",filename);
1597 FILE *file=fopen(buffer, "w");
1598 fprintf(file, "digraph %s {\n",filename);
1599 mo_graph->dumpNodes(file);
1600 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
1602 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
1603 ModelAction *action=*it;
1604 if (action->is_read()) {
1605 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
1606 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
1608 if (thread_array[action->get_tid()] != NULL) {
1609 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
1612 thread_array[action->get_tid()]=action;
1614 fprintf(file,"}\n");
1615 model_free(thread_array);
1620 void ModelChecker::print_summary()
1623 printf("Number of executions: %d\n", num_executions);
1624 printf("Number of feasible executions: %d\n", num_feasible_executions);
1625 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
1627 #if SUPPORT_MOD_ORDER_DUMP
1629 char buffername[100];
1630 sprintf(buffername, "exec%04u", num_executions);
1631 mo_graph->dumpGraphToFile(buffername);
1632 sprintf(buffername, "graph%04u", num_executions);
1633 dumpGraph(buffername);
1636 if (!isfinalfeasible())
1637 printf("INFEASIBLE EXECUTION!\n");
1638 print_list(action_trace);
1643 * Add a Thread to the system for the first time. Should only be called once
1645 * @param t The Thread to add
1647 void ModelChecker::add_thread(Thread *t)
1649 thread_map->put(id_to_int(t->get_id()), t);
1650 scheduler->add_thread(t);
1654 * Removes a thread from the scheduler.
1655 * @param the thread to remove.
1657 void ModelChecker::remove_thread(Thread *t)
1659 scheduler->remove_thread(t);
1663 * Switch from a user-context to the "master thread" context (a.k.a. system
1664 * context). This switch is made with the intention of exploring a particular
1665 * model-checking action (described by a ModelAction object). Must be called
1666 * from a user-thread context.
1667 * @param act The current action that will be explored. Must not be NULL.
1668 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
1670 int ModelChecker::switch_to_master(ModelAction *act)
1673 Thread *old = thread_current();
1674 set_current_action(act);
1675 old->set_state(THREAD_READY);
1676 return Thread::swap(old, &system_context);
1680 * Takes the next step in the execution, if possible.
1681 * @return Returns true (success) if a step was taken and false otherwise.
1683 bool ModelChecker::take_step() {
1687 Thread * curr = thread_current();
1689 if (curr->get_state() == THREAD_READY) {
1690 ASSERT(priv->current_action);
1692 priv->nextThread = check_current_action(priv->current_action);
1693 priv->current_action = NULL;
1695 if (curr->is_blocked() || curr->is_complete())
1696 scheduler->remove_thread(curr);
1701 Thread * next = scheduler->next_thread(priv->nextThread);
1703 /* Infeasible -> don't take any more steps */
1708 next->set_state(THREAD_RUNNING);
1709 DEBUG("(%d, %d)\n", curr ? curr->get_id() : -1, next ? next->get_id() : -1);
1711 /* next == NULL -> don't take any more steps */
1715 if ( next->get_pending() != NULL ) {
1716 //restart a pending action
1717 set_current_action(next->get_pending());
1718 next->set_pending(NULL);
1719 next->set_state(THREAD_READY);
1723 /* Return false only if swap fails with an error */
1724 return (Thread::swap(&system_context, next) == 0);
1727 /** Runs the current execution until threre are no more steps to take. */
1728 void ModelChecker::finish_execution() {
1731 while (take_step());