8 #include "snapshot-interface.h"
10 #include "clockvector.h"
11 #include "cyclegraph.h"
16 #define INITIAL_THREAD_ID 0
20 /** @brief Constructor */
21 ModelChecker::ModelChecker(struct model_params params) :
22 /* Initialize default scheduler */
23 scheduler(new Scheduler()),
25 num_feasible_executions(0),
28 action_trace(new action_list_t()),
29 thread_map(new HashTable<int, Thread *, int>()),
30 obj_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
31 lock_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
32 obj_thrd_map(new HashTable<void *, std::vector<action_list_t>, uintptr_t, 4 >()),
33 promises(new std::vector<Promise *>()),
34 futurevalues(new std::vector<struct PendingFutureValue>()),
35 lazy_sync_with_release(new HashTable<void *, action_list_t, uintptr_t, 4>()),
36 thrd_last_action(new std::vector<ModelAction *>(1)),
37 node_stack(new NodeStack()),
38 mo_graph(new CycleGraph()),
39 failed_promise(false),
40 too_many_reads(false),
43 /* Allocate this "size" on the snapshotting heap */
44 priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
45 /* First thread created will have id INITIAL_THREAD_ID */
46 priv->next_thread_id = INITIAL_THREAD_ID;
48 lazy_sync_size = &priv->lazy_sync_size;
51 /** @brief Destructor */
52 ModelChecker::~ModelChecker()
54 for (int i = 0; i < get_num_threads(); i++)
55 delete thread_map->get(i);
60 delete lock_waiters_map;
63 for (unsigned int i = 0; i < promises->size(); i++)
64 delete (*promises)[i];
67 delete lazy_sync_with_release;
69 delete thrd_last_action;
76 * Restores user program to initial state and resets all model-checker data
79 void ModelChecker::reset_to_initial_state()
81 DEBUG("+++ Resetting to initial state +++\n");
82 node_stack->reset_execution();
83 failed_promise = false;
84 too_many_reads = false;
86 snapshotObject->backTrackBeforeStep(0);
89 /** @return a thread ID for a new Thread */
90 thread_id_t ModelChecker::get_next_id()
92 return priv->next_thread_id++;
95 /** @return the number of user threads created during this execution */
96 int ModelChecker::get_num_threads()
98 return priv->next_thread_id;
101 /** @return a sequence number for a new ModelAction */
102 modelclock_t ModelChecker::get_next_seq_num()
104 return ++priv->used_sequence_numbers;
108 * @brief Choose the next thread to execute.
110 * This function chooses the next thread that should execute. It can force the
111 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
112 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
113 * The model-checker may have no preference regarding the next thread (i.e.,
114 * when exploring a new execution ordering), in which case this will return
116 * @param curr The current ModelAction. This action might guide the choice of
118 * @return The next thread to run. If the model-checker has no preference, NULL.
120 Thread * ModelChecker::get_next_thread(ModelAction *curr)
125 /* Do not split atomic actions. */
127 return thread_current();
128 /* The THREAD_CREATE action points to the created Thread */
129 else if (curr->get_type() == THREAD_CREATE)
130 return (Thread *)curr->get_location();
133 /* Have we completed exploring the preselected path? */
137 /* Else, we are trying to replay an execution */
138 ModelAction *next = node_stack->get_next()->get_action();
140 if (next == diverge) {
141 Node *nextnode = next->get_node();
142 /* Reached divergence point */
143 if (nextnode->increment_promise()) {
144 /* The next node will try to satisfy a different set of promises. */
145 tid = next->get_tid();
146 node_stack->pop_restofstack(2);
147 } else if (nextnode->increment_read_from()) {
148 /* The next node will read from a different value. */
149 tid = next->get_tid();
150 node_stack->pop_restofstack(2);
151 } else if (nextnode->increment_future_value()) {
152 /* The next node will try to read from a different future value. */
153 tid = next->get_tid();
154 node_stack->pop_restofstack(2);
156 /* Make a different thread execute for next step */
157 Node *node = nextnode->get_parent();
158 tid = node->get_next_backtrack();
159 node_stack->pop_restofstack(1);
161 DEBUG("*** Divergence point ***\n");
164 tid = next->get_tid();
166 DEBUG("*** ModelChecker chose next thread = %d ***\n", tid);
167 ASSERT(tid != THREAD_ID_T_NONE);
168 return thread_map->get(id_to_int(tid));
172 * Queries the model-checker for more executions to explore and, if one
173 * exists, resets the model-checker state to execute a new execution.
175 * @return If there are more executions to explore, return true. Otherwise,
178 bool ModelChecker::next_execution()
183 if (isfinalfeasible())
184 num_feasible_executions++;
186 if (isfinalfeasible() || DBG_ENABLED())
189 if ((diverge = get_next_backtrack()) == NULL)
193 printf("Next execution will diverge at:\n");
197 reset_to_initial_state();
201 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
203 switch (act->get_type()) {
207 /* linear search: from most recent to oldest */
208 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
209 action_list_t::reverse_iterator rit;
210 for (rit = list->rbegin(); rit != list->rend(); rit++) {
211 ModelAction *prev = *rit;
212 if (act->is_synchronizing(prev))
218 case ATOMIC_TRYLOCK: {
219 /* linear search: from most recent to oldest */
220 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
221 action_list_t::reverse_iterator rit;
222 for (rit = list->rbegin(); rit != list->rend(); rit++) {
223 ModelAction *prev = *rit;
224 if (act->is_conflicting_lock(prev))
229 case ATOMIC_UNLOCK: {
230 /* linear search: from most recent to oldest */
231 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
232 action_list_t::reverse_iterator rit;
233 for (rit = list->rbegin(); rit != list->rend(); rit++) {
234 ModelAction *prev = *rit;
235 if (!act->same_thread(prev)&&prev->is_failed_trylock())
246 /** This method find backtracking points where we should try to
247 * reorder the parameter ModelAction against.
249 * @param the ModelAction to find backtracking points for.
253 void ModelChecker::set_backtracking(ModelAction *act)
255 Thread *t = get_thread(act);
256 ModelAction * prev = get_last_conflict(act);
260 Node * node = prev->get_node()->get_parent();
262 int low_tid, high_tid;
263 if (node->is_enabled(t)) {
264 low_tid=id_to_int(act->get_tid());
268 high_tid=get_num_threads();
271 for(int i=low_tid;i<high_tid;i++) {
272 thread_id_t tid=int_to_id(i);
273 if (!node->is_enabled(tid))
276 /* Check if this has been explored already */
277 if (node->has_been_explored(tid))
280 /* Cache the latest backtracking point */
281 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
282 priv->next_backtrack = prev;
284 /* If this is a new backtracking point, mark the tree */
285 if (!node->set_backtrack(tid))
287 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
288 prev->get_tid(), t->get_id());
297 * Returns last backtracking point. The model checker will explore a different
298 * path for this point in the next execution.
299 * @return The ModelAction at which the next execution should diverge.
301 ModelAction * ModelChecker::get_next_backtrack()
303 ModelAction *next = priv->next_backtrack;
304 priv->next_backtrack = NULL;
309 * Processes a read or rmw model action.
310 * @param curr is the read model action to process.
311 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
312 * @return True if processing this read updates the mo_graph.
314 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
317 bool updated = false;
319 const ModelAction *reads_from = curr->get_node()->get_read_from();
320 if (reads_from != NULL) {
321 mo_graph->startChanges();
323 value = reads_from->get_value();
324 bool r_status = false;
326 if (!second_part_of_rmw) {
328 r_status = r_modification_order(curr, reads_from);
332 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
333 mo_graph->rollbackChanges();
334 too_many_reads = false;
338 curr->read_from(reads_from);
339 mo_graph->commitChanges();
341 } else if (!second_part_of_rmw) {
342 /* Read from future value */
343 value = curr->get_node()->get_future_value();
344 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
345 curr->read_from(NULL);
346 Promise *valuepromise = new Promise(curr, value, expiration);
347 promises->push_back(valuepromise);
349 get_thread(curr)->set_return_value(value);
355 * Processes a lock, trylock, or unlock model action. @param curr is
356 * the read model action to process.
358 * The try lock operation checks whether the lock is taken. If not,
359 * it falls to the normal lock operation case. If so, it returns
362 * The lock operation has already been checked that it is enabled, so
363 * it just grabs the lock and synchronizes with the previous unlock.
365 * The unlock operation has to re-enable all of the threads that are
366 * waiting on the lock.
369 void ModelChecker::process_mutex(ModelAction *curr) {
370 std::mutex * mutex=(std::mutex *) curr->get_location();
371 struct std::mutex_state * state=mutex->get_state();
372 switch(curr->get_type()) {
373 case ATOMIC_TRYLOCK: {
374 bool success=!state->islocked;
375 curr->set_try_lock(success);
377 get_thread(curr)->set_return_value(0);
380 get_thread(curr)->set_return_value(1);
382 //otherwise fall into the lock case
384 if (curr->get_cv()->getClock(state->alloc_tid)<=state->alloc_clock) {
385 printf("Lock access before initialization\n");
388 state->islocked=true;
389 ModelAction *unlock=get_last_unlock(curr);
390 //synchronize with the previous unlock statement
391 if ( unlock != NULL )
392 curr->synchronize_with(unlock);
395 case ATOMIC_UNLOCK: {
397 state->islocked=false;
398 //wake up the other threads
399 action_list_t * waiters = lock_waiters_map->get_safe_ptr(curr->get_location());
400 //activate all the waiting threads
401 for(action_list_t::iterator rit = waiters->begin(); rit!=waiters->end(); rit++) {
402 scheduler->add_thread(get_thread((*rit)->get_tid()));
414 * Process a write ModelAction
415 * @param curr The ModelAction to process
416 * @return True if the mo_graph was updated or promises were resolved
418 bool ModelChecker::process_write(ModelAction *curr)
420 bool updated_mod_order = w_modification_order(curr);
421 bool updated_promises = resolve_promises(curr);
423 if (promises->size() == 0) {
424 for (unsigned int i = 0; i<futurevalues->size(); i++) {
425 struct PendingFutureValue pfv = (*futurevalues)[i];
426 if (pfv.act->get_node()->add_future_value(pfv.value, pfv.expiration) &&
427 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
428 priv->next_backtrack = pfv.act;
430 futurevalues->resize(0);
433 mo_graph->commitChanges();
434 get_thread(curr)->set_return_value(VALUE_NONE);
435 return updated_mod_order || updated_promises;
439 * Initialize the current action by performing one or more of the following
440 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
441 * in the NodeStack, manipulating backtracking sets, allocating and
442 * initializing clock vectors, and computing the promises to fulfill.
444 * @param curr The current action, as passed from the user context; may be
445 * freed/invalidated after the execution of this function
446 * @return The current action, as processed by the ModelChecker. Is only the
447 * same as the parameter @a curr if this is a newly-explored action.
449 ModelAction * ModelChecker::initialize_curr_action(ModelAction *curr)
451 ModelAction *newcurr;
453 if (curr->is_rmwc() || curr->is_rmw()) {
454 newcurr = process_rmw(curr);
456 compute_promises(newcurr);
460 newcurr = node_stack->explore_action(curr, scheduler->get_enabled());
462 /* First restore type and order in case of RMW operation */
464 newcurr->copy_typeandorder(curr);
466 ASSERT(curr->get_location()==newcurr->get_location());
467 newcurr->copy_from_new(curr);
469 /* Discard duplicate ModelAction; use action from NodeStack */
472 /* If we have diverged, we need to reset the clock vector. */
474 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
478 * Perform one-time actions when pushing new ModelAction onto
481 curr->create_cv(get_parent_action(curr->get_tid()));
482 if (curr->is_write())
483 compute_promises(curr);
489 * This method checks whether a model action is enabled at the given point.
490 * At this point, it checks whether a lock operation would be successful at this point.
491 * If not, it puts the thread in a waiter list.
492 * @param curr is the ModelAction to check whether it is enabled.
493 * @return a bool that indicates whether the action is enabled.
496 bool ModelChecker::check_action_enabled(ModelAction *curr) {
497 if (curr->is_lock()) {
498 std::mutex * lock=(std::mutex *) curr->get_location();
499 struct std::mutex_state * state = lock->get_state();
500 if (state->islocked) {
501 //Stick the action in the appropriate waiting queue
502 lock_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
511 * This is the heart of the model checker routine. It performs model-checking
512 * actions corresponding to a given "current action." Among other processes, it
513 * calculates reads-from relationships, updates synchronization clock vectors,
514 * forms a memory_order constraints graph, and handles replay/backtrack
515 * execution when running permutations of previously-observed executions.
517 * @param curr The current action to process
518 * @return The next Thread that must be executed. May be NULL if ModelChecker
519 * makes no choice (e.g., according to replay execution, combining RMW actions,
522 Thread * ModelChecker::check_current_action(ModelAction *curr)
526 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
528 if (!check_action_enabled(curr)) {
529 //we'll make the execution look like we chose to run this action
530 //much later...when a lock is actually available to relese
531 get_current_thread()->set_pending(curr);
532 remove_thread(get_current_thread());
533 return get_next_thread(NULL);
536 ModelAction *newcurr = initialize_curr_action(curr);
538 /* Add the action to lists before any other model-checking tasks */
539 if (!second_part_of_rmw)
540 add_action_to_lists(newcurr);
542 /* Build may_read_from set for newly-created actions */
543 if (curr == newcurr && curr->is_read())
544 build_reads_from_past(curr);
547 /* Thread specific actions */
548 switch (curr->get_type()) {
549 case THREAD_CREATE: {
550 Thread *th = (Thread *)curr->get_location();
551 th->set_creation(curr);
555 Thread *waiting, *blocking;
556 waiting = get_thread(curr);
557 blocking = (Thread *)curr->get_location();
558 if (!blocking->is_complete()) {
559 blocking->push_wait_list(curr);
560 scheduler->sleep(waiting);
562 do_complete_join(curr);
566 case THREAD_FINISH: {
567 Thread *th = get_thread(curr);
568 while (!th->wait_list_empty()) {
569 ModelAction *act = th->pop_wait_list();
570 Thread *wake = get_thread(act);
571 scheduler->wake(wake);
572 do_complete_join(act);
578 check_promises(NULL, curr->get_cv());
585 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
587 while (!work_queue.empty()) {
588 WorkQueueEntry work = work_queue.front();
589 work_queue.pop_front();
592 case WORK_CHECK_CURR_ACTION: {
593 ModelAction *act = work.action;
594 bool updated = false;
595 if (act->is_read() && process_read(act, second_part_of_rmw))
598 if (act->is_write() && process_write(act))
601 if (act->is_mutex_op())
605 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
608 case WORK_CHECK_RELEASE_SEQ:
609 resolve_release_sequences(work.location, &work_queue);
611 case WORK_CHECK_MO_EDGES: {
612 /** @todo Complete verification of work_queue */
613 ModelAction *act = work.action;
614 bool updated = false;
616 if (act->is_read()) {
617 if (r_modification_order(act, act->get_reads_from()))
620 if (act->is_write()) {
621 if (w_modification_order(act))
626 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
635 check_curr_backtracking(curr);
637 set_backtracking(curr);
639 return get_next_thread(curr);
643 * Complete a THREAD_JOIN operation, by synchronizing with the THREAD_FINISH
644 * operation from the Thread it is joining with. Must be called after the
645 * completion of the Thread in question.
646 * @param join The THREAD_JOIN action
648 void ModelChecker::do_complete_join(ModelAction *join)
650 Thread *blocking = (Thread *)join->get_location();
651 ModelAction *act = get_last_action(blocking->get_id());
652 join->synchronize_with(act);
655 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
656 Node *currnode = curr->get_node();
657 Node *parnode = currnode->get_parent();
659 if ((!parnode->backtrack_empty() ||
660 !currnode->read_from_empty() ||
661 !currnode->future_value_empty() ||
662 !currnode->promise_empty())
663 && (!priv->next_backtrack ||
664 *curr > *priv->next_backtrack)) {
665 priv->next_backtrack = curr;
669 bool ModelChecker::promises_expired() {
670 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
671 Promise *promise = (*promises)[promise_index];
672 if (promise->get_expiration()<priv->used_sequence_numbers) {
679 /** @return whether the current partial trace must be a prefix of a
681 bool ModelChecker::isfeasibleprefix() {
682 return promises->size() == 0 && *lazy_sync_size == 0;
685 /** @return whether the current partial trace is feasible. */
686 bool ModelChecker::isfeasible() {
687 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
690 /** @return whether the current partial trace is feasible other than
691 * multiple RMW reading from the same store. */
692 bool ModelChecker::isfeasibleotherthanRMW() {
694 if (mo_graph->checkForCycles())
695 DEBUG("Infeasible: modification order cycles\n");
697 DEBUG("Infeasible: failed promise\n");
699 DEBUG("Infeasible: too many reads\n");
700 if (promises_expired())
701 DEBUG("Infeasible: promises expired\n");
703 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !promises_expired();
706 /** Returns whether the current completed trace is feasible. */
707 bool ModelChecker::isfinalfeasible() {
708 if (DBG_ENABLED() && promises->size() != 0)
709 DEBUG("Infeasible: unrevolved promises\n");
711 return isfeasible() && promises->size() == 0;
714 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
715 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
716 int tid = id_to_int(act->get_tid());
717 ModelAction *lastread = get_last_action(tid);
718 lastread->process_rmw(act);
719 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
720 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
721 mo_graph->commitChanges();
727 * Checks whether a thread has read from the same write for too many times
728 * without seeing the effects of a later write.
731 * 1) there must a different write that we could read from that would satisfy the modification order,
732 * 2) we must have read from the same value in excess of maxreads times, and
733 * 3) that other write must have been in the reads_from set for maxreads times.
735 * If so, we decide that the execution is no longer feasible.
737 void ModelChecker::check_recency(ModelAction *curr) {
738 if (params.maxreads != 0) {
739 if (curr->get_node()->get_read_from_size() <= 1)
742 //Must make sure that execution is currently feasible... We could
743 //accidentally clear by rolling back
747 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
748 int tid = id_to_int(curr->get_tid());
751 if ((int)thrd_lists->size() <= tid)
754 action_list_t *list = &(*thrd_lists)[tid];
756 action_list_t::reverse_iterator rit = list->rbegin();
758 for (; (*rit) != curr; rit++)
760 /* go past curr now */
763 action_list_t::reverse_iterator ritcopy = rit;
764 //See if we have enough reads from the same value
766 for (; count < params.maxreads; rit++,count++) {
767 if (rit==list->rend())
769 ModelAction *act = *rit;
772 if (act->get_reads_from() != curr->get_reads_from())
774 if (act->get_node()->get_read_from_size() <= 1)
778 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
780 const ModelAction * write = curr->get_node()->get_read_from_at(i);
781 //Need a different write
782 if (write==curr->get_reads_from())
785 /* Test to see whether this is a feasible write to read from*/
786 mo_graph->startChanges();
787 r_modification_order(curr, write);
788 bool feasiblereadfrom = isfeasible();
789 mo_graph->rollbackChanges();
791 if (!feasiblereadfrom)
795 bool feasiblewrite = true;
796 //new we need to see if this write works for everyone
798 for (int loop = count; loop>0; loop--,rit++) {
799 ModelAction *act=*rit;
800 bool foundvalue = false;
801 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
802 if (act->get_node()->get_read_from_at(i)==write) {
808 feasiblewrite = false;
813 too_many_reads = true;
821 * Updates the mo_graph with the constraints imposed from the current
824 * Basic idea is the following: Go through each other thread and find
825 * the lastest action that happened before our read. Two cases:
827 * (1) The action is a write => that write must either occur before
828 * the write we read from or be the write we read from.
830 * (2) The action is a read => the write that that action read from
831 * must occur before the write we read from or be the same write.
833 * @param curr The current action. Must be a read.
834 * @param rf The action that curr reads from. Must be a write.
835 * @return True if modification order edges were added; false otherwise
837 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
839 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
842 ASSERT(curr->is_read());
844 /* Iterate over all threads */
845 for (i = 0; i < thrd_lists->size(); i++) {
846 /* Iterate over actions in thread, starting from most recent */
847 action_list_t *list = &(*thrd_lists)[i];
848 action_list_t::reverse_iterator rit;
849 for (rit = list->rbegin(); rit != list->rend(); rit++) {
850 ModelAction *act = *rit;
853 * Include at most one act per-thread that "happens
854 * before" curr. Don't consider reflexively.
856 if (act->happens_before(curr) && act != curr) {
857 if (act->is_write()) {
859 mo_graph->addEdge(act, rf);
863 const ModelAction *prevreadfrom = act->get_reads_from();
864 if (prevreadfrom != NULL && rf != prevreadfrom) {
865 mo_graph->addEdge(prevreadfrom, rf);
877 /** This method fixes up the modification order when we resolve a
878 * promises. The basic problem is that actions that occur after the
879 * read curr could not property add items to the modification order
882 * So for each thread, we find the earliest item that happens after
883 * the read curr. This is the item we have to fix up with additional
884 * constraints. If that action is write, we add a MO edge between
885 * the Action rf and that action. If the action is a read, we add a
886 * MO edge between the Action rf, and whatever the read accessed.
888 * @param curr is the read ModelAction that we are fixing up MO edges for.
889 * @param rf is the write ModelAction that curr reads from.
893 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
895 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
897 ASSERT(curr->is_read());
899 /* Iterate over all threads */
900 for (i = 0; i < thrd_lists->size(); i++) {
901 /* Iterate over actions in thread, starting from most recent */
902 action_list_t *list = &(*thrd_lists)[i];
903 action_list_t::reverse_iterator rit;
904 ModelAction *lastact = NULL;
906 /* Find last action that happens after curr */
907 for (rit = list->rbegin(); rit != list->rend(); rit++) {
908 ModelAction *act = *rit;
909 if (curr->happens_before(act)) {
915 /* Include at most one act per-thread that "happens before" curr */
916 if (lastact != NULL) {
917 if (lastact->is_read()) {
918 const ModelAction *postreadfrom = lastact->get_reads_from();
919 if (postreadfrom != NULL&&rf != postreadfrom)
920 mo_graph->addEdge(rf, postreadfrom);
921 } else if (rf != lastact) {
922 mo_graph->addEdge(rf, lastact);
930 * Updates the mo_graph with the constraints imposed from the current write.
932 * Basic idea is the following: Go through each other thread and find
933 * the lastest action that happened before our write. Two cases:
935 * (1) The action is a write => that write must occur before
938 * (2) The action is a read => the write that that action read from
939 * must occur before the current write.
941 * This method also handles two other issues:
943 * (I) Sequential Consistency: Making sure that if the current write is
944 * seq_cst, that it occurs after the previous seq_cst write.
946 * (II) Sending the write back to non-synchronizing reads.
948 * @param curr The current action. Must be a write.
949 * @return True if modification order edges were added; false otherwise
951 bool ModelChecker::w_modification_order(ModelAction *curr)
953 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
956 ASSERT(curr->is_write());
958 if (curr->is_seqcst()) {
959 /* We have to at least see the last sequentially consistent write,
960 so we are initialized. */
961 ModelAction *last_seq_cst = get_last_seq_cst(curr);
962 if (last_seq_cst != NULL) {
963 mo_graph->addEdge(last_seq_cst, curr);
968 /* Iterate over all threads */
969 for (i = 0; i < thrd_lists->size(); i++) {
970 /* Iterate over actions in thread, starting from most recent */
971 action_list_t *list = &(*thrd_lists)[i];
972 action_list_t::reverse_iterator rit;
973 for (rit = list->rbegin(); rit != list->rend(); rit++) {
974 ModelAction *act = *rit;
977 * If RMW, we already have all relevant edges,
978 * so just skip to next thread.
979 * If normal write, we need to look at earlier
980 * actions, so continue processing list.
989 * Include at most one act per-thread that "happens
992 if (act->happens_before(curr)) {
994 * Note: if act is RMW, just add edge:
996 * The following edge should be handled elsewhere:
997 * readfrom(act) --mo--> act
1000 mo_graph->addEdge(act, curr);
1001 else if (act->is_read() && act->get_reads_from() != NULL)
1002 mo_graph->addEdge(act->get_reads_from(), curr);
1005 } else if (act->is_read() && !act->is_synchronizing(curr) &&
1006 !act->same_thread(curr)) {
1007 /* We have an action that:
1008 (1) did not happen before us
1009 (2) is a read and we are a write
1010 (3) cannot synchronize with us
1011 (4) is in a different thread
1013 that read could potentially read from our write.
1015 if (thin_air_constraint_may_allow(curr, act)) {
1017 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from()==act->get_reads_from() && isfeasibleotherthanRMW())) {
1018 struct PendingFutureValue pfv = {curr->get_value(),curr->get_seq_number()+params.maxfuturedelay,act};
1019 futurevalues->push_back(pfv);
1029 /** Arbitrary reads from the future are not allowed. Section 29.3
1030 * part 9 places some constraints. This method checks one result of constraint
1031 * constraint. Others require compiler support. */
1033 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1034 if (!writer->is_rmw())
1037 if (!reader->is_rmw())
1040 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1043 if (search->get_tid() == reader->get_tid() &&
1044 search->happens_before(reader))
1052 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1053 * The ModelAction under consideration is expected to be taking part in
1054 * release/acquire synchronization as an object of the "reads from" relation.
1055 * Note that this can only provide release sequence support for RMW chains
1056 * which do not read from the future, as those actions cannot be traced until
1057 * their "promise" is fulfilled. Similarly, we may not even establish the
1058 * presence of a release sequence with certainty, as some modification order
1059 * constraints may be decided further in the future. Thus, this function
1060 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1061 * and a boolean representing certainty.
1063 * @todo Finish lazy updating, when promises are fulfilled in the future
1064 * @param rf The action that might be part of a release sequence. Must be a
1066 * @param release_heads A pass-by-reference style return parameter. After
1067 * execution of this function, release_heads will contain the heads of all the
1068 * relevant release sequences, if any exists
1069 * @return true, if the ModelChecker is certain that release_heads is complete;
1072 bool ModelChecker::release_seq_head(const ModelAction *rf, rel_heads_list_t *release_heads) const
1075 /* read from future: need to settle this later */
1076 return false; /* incomplete */
1079 ASSERT(rf->is_write());
1081 if (rf->is_release())
1082 release_heads->push_back(rf);
1084 /* We need a RMW action that is both an acquire and release to stop */
1085 /** @todo Need to be smarter here... In the linux lock
1086 * example, this will run to the beginning of the program for
1088 if (rf->is_acquire() && rf->is_release())
1089 return true; /* complete */
1090 return release_seq_head(rf->get_reads_from(), release_heads);
1092 if (rf->is_release())
1093 return true; /* complete */
1095 /* else relaxed write; check modification order for contiguous subsequence
1096 * -> rf must be same thread as release */
1097 int tid = id_to_int(rf->get_tid());
1098 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(rf->get_location());
1099 action_list_t *list = &(*thrd_lists)[tid];
1100 action_list_t::const_reverse_iterator rit;
1102 /* Find rf in the thread list */
1103 rit = std::find(list->rbegin(), list->rend(), rf);
1104 ASSERT(rit != list->rend());
1106 /* Find the last write/release */
1107 for (; rit != list->rend(); rit++)
1108 if ((*rit)->is_release())
1110 if (rit == list->rend()) {
1111 /* No write-release in this thread */
1112 return true; /* complete */
1114 ModelAction *release = *rit;
1116 ASSERT(rf->same_thread(release));
1118 bool certain = true;
1119 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1120 if (id_to_int(rf->get_tid()) == (int)i)
1122 list = &(*thrd_lists)[i];
1124 /* Can we ensure no future writes from this thread may break
1125 * the release seq? */
1126 bool future_ordered = false;
1128 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1129 const ModelAction *act = *rit;
1130 if (!act->is_write())
1132 /* Reach synchronization -> this thread is complete */
1133 if (act->happens_before(release))
1135 if (rf->happens_before(act)) {
1136 future_ordered = true;
1140 /* Check modification order */
1141 if (mo_graph->checkReachable(rf, act)) {
1142 /* rf --mo--> act */
1143 future_ordered = true;
1146 if (mo_graph->checkReachable(act, release))
1147 /* act --mo--> release */
1149 if (mo_graph->checkReachable(release, act) &&
1150 mo_graph->checkReachable(act, rf)) {
1151 /* release --mo-> act --mo--> rf */
1152 return true; /* complete */
1156 if (!future_ordered)
1157 return false; /* This thread is uncertain */
1161 release_heads->push_back(release);
1166 * A public interface for getting the release sequence head(s) with which a
1167 * given ModelAction must synchronize. This function only returns a non-empty
1168 * result when it can locate a release sequence head with certainty. Otherwise,
1169 * it may mark the internal state of the ModelChecker so that it will handle
1170 * the release sequence at a later time, causing @a act to update its
1171 * synchronization at some later point in execution.
1172 * @param act The 'acquire' action that may read from a release sequence
1173 * @param release_heads A pass-by-reference return parameter. Will be filled
1174 * with the head(s) of the release sequence(s), if they exists with certainty.
1175 * @see ModelChecker::release_seq_head
1177 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1179 const ModelAction *rf = act->get_reads_from();
1181 complete = release_seq_head(rf, release_heads);
1183 /* add act to 'lazy checking' list */
1184 action_list_t *list;
1185 list = lazy_sync_with_release->get_safe_ptr(act->get_location());
1186 list->push_back(act);
1187 (*lazy_sync_size)++;
1192 * Attempt to resolve all stashed operations that might synchronize with a
1193 * release sequence for a given location. This implements the "lazy" portion of
1194 * determining whether or not a release sequence was contiguous, since not all
1195 * modification order information is present at the time an action occurs.
1197 * @param location The location/object that should be checked for release
1198 * sequence resolutions
1199 * @param work_queue The work queue to which to add work items as they are
1201 * @return True if any updates occurred (new synchronization, new mo_graph
1204 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1206 action_list_t *list;
1207 list = lazy_sync_with_release->getptr(location);
1211 bool updated = false;
1212 action_list_t::iterator it = list->begin();
1213 while (it != list->end()) {
1214 ModelAction *act = *it;
1215 const ModelAction *rf = act->get_reads_from();
1216 rel_heads_list_t release_heads;
1218 complete = release_seq_head(rf, &release_heads);
1219 for (unsigned int i = 0; i < release_heads.size(); i++) {
1220 if (!act->has_synchronized_with(release_heads[i])) {
1222 act->synchronize_with(release_heads[i]);
1227 /* Re-check act for mo_graph edges */
1228 work_queue->push_back(MOEdgeWorkEntry(act));
1230 /* propagate synchronization to later actions */
1231 action_list_t::reverse_iterator it = action_trace->rbegin();
1232 while ((*it) != act) {
1233 ModelAction *propagate = *it;
1234 if (act->happens_before(propagate)) {
1235 propagate->synchronize_with(act);
1236 /* Re-check 'propagate' for mo_graph edges */
1237 work_queue->push_back(MOEdgeWorkEntry(propagate));
1242 it = list->erase(it);
1243 (*lazy_sync_size)--;
1248 // If we resolved promises or data races, see if we have realized a data race.
1249 if (checkDataRaces()) {
1257 * Performs various bookkeeping operations for the current ModelAction. For
1258 * instance, adds action to the per-object, per-thread action vector and to the
1259 * action trace list of all thread actions.
1261 * @param act is the ModelAction to add.
1263 void ModelChecker::add_action_to_lists(ModelAction *act)
1265 int tid = id_to_int(act->get_tid());
1266 action_trace->push_back(act);
1268 obj_map->get_safe_ptr(act->get_location())->push_back(act);
1270 std::vector<action_list_t> *vec = obj_thrd_map->get_safe_ptr(act->get_location());
1271 if (tid >= (int)vec->size())
1272 vec->resize(priv->next_thread_id);
1273 (*vec)[tid].push_back(act);
1275 if ((int)thrd_last_action->size() <= tid)
1276 thrd_last_action->resize(get_num_threads());
1277 (*thrd_last_action)[tid] = act;
1280 ModelAction * ModelChecker::get_last_action(thread_id_t tid)
1282 int threadid=id_to_int(tid);
1283 if (threadid<(int)thrd_last_action->size())
1284 return (*thrd_last_action)[id_to_int(tid)];
1290 * Gets the last memory_order_seq_cst write (in the total global sequence)
1291 * performed on a particular object (i.e., memory location), not including the
1293 * @param curr The current ModelAction; also denotes the object location to
1295 * @return The last seq_cst write
1297 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr)
1299 void *location = curr->get_location();
1300 action_list_t *list = obj_map->get_safe_ptr(location);
1301 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1302 action_list_t::reverse_iterator rit;
1303 for (rit = list->rbegin(); rit != list->rend(); rit++)
1304 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1310 * Gets the last unlock operation
1311 * performed on a particular mutex (i.e., memory location).
1312 * @param curr The current ModelAction; also denotes the object location to
1314 * @return The last unlock operation
1317 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr)
1319 void *location = curr->get_location();
1320 action_list_t *list = obj_map->get_safe_ptr(location);
1321 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1322 action_list_t::reverse_iterator rit;
1323 for (rit = list->rbegin(); rit != list->rend(); rit++)
1324 if ((*rit)->is_unlock())
1329 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1331 ModelAction *parent = get_last_action(tid);
1333 parent = get_thread(tid)->get_creation();
1338 * Returns the clock vector for a given thread.
1339 * @param tid The thread whose clock vector we want
1340 * @return Desired clock vector
1342 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1344 return get_parent_action(tid)->get_cv();
1348 * Resolve a set of Promises with a current write. The set is provided in the
1349 * Node corresponding to @a write.
1350 * @param write The ModelAction that is fulfilling Promises
1351 * @return True if promises were resolved; false otherwise
1353 bool ModelChecker::resolve_promises(ModelAction *write)
1355 bool resolved = false;
1357 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1358 Promise *promise = (*promises)[promise_index];
1359 if (write->get_node()->get_promise(i)) {
1360 ModelAction *read = promise->get_action();
1361 read->read_from(write);
1362 if (read->is_rmw()) {
1363 mo_graph->addRMWEdge(write, read);
1365 //First fix up the modification order for actions that happened
1367 r_modification_order(read, write);
1368 //Next fix up the modification order for actions that happened
1370 post_r_modification_order(read, write);
1371 promises->erase(promises->begin() + promise_index);
1380 * Compute the set of promises that could potentially be satisfied by this
1381 * action. Note that the set computation actually appears in the Node, not in
1383 * @param curr The ModelAction that may satisfy promises
1385 void ModelChecker::compute_promises(ModelAction *curr)
1387 for (unsigned int i = 0; i < promises->size(); i++) {
1388 Promise *promise = (*promises)[i];
1389 const ModelAction *act = promise->get_action();
1390 if (!act->happens_before(curr) &&
1392 !act->is_synchronizing(curr) &&
1393 !act->same_thread(curr) &&
1394 promise->get_value() == curr->get_value()) {
1395 curr->get_node()->set_promise(i);
1400 /** Checks promises in response to change in ClockVector Threads. */
1401 void ModelChecker::check_promises(ClockVector *old_cv, ClockVector *merge_cv)
1403 for (unsigned int i = 0; i < promises->size(); i++) {
1404 Promise *promise = (*promises)[i];
1405 const ModelAction *act = promise->get_action();
1406 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1407 merge_cv->synchronized_since(act)) {
1408 //This thread is no longer able to send values back to satisfy the promise
1409 int num_synchronized_threads = promise->increment_threads();
1410 if (num_synchronized_threads == get_num_threads()) {
1411 //Promise has failed
1412 failed_promise = true;
1420 * Build up an initial set of all past writes that this 'read' action may read
1421 * from. This set is determined by the clock vector's "happens before"
1423 * @param curr is the current ModelAction that we are exploring; it must be a
1426 void ModelChecker::build_reads_from_past(ModelAction *curr)
1428 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1430 ASSERT(curr->is_read());
1432 ModelAction *last_seq_cst = NULL;
1434 /* Track whether this object has been initialized */
1435 bool initialized = false;
1437 if (curr->is_seqcst()) {
1438 last_seq_cst = get_last_seq_cst(curr);
1439 /* We have to at least see the last sequentially consistent write,
1440 so we are initialized. */
1441 if (last_seq_cst != NULL)
1445 /* Iterate over all threads */
1446 for (i = 0; i < thrd_lists->size(); i++) {
1447 /* Iterate over actions in thread, starting from most recent */
1448 action_list_t *list = &(*thrd_lists)[i];
1449 action_list_t::reverse_iterator rit;
1450 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1451 ModelAction *act = *rit;
1453 /* Only consider 'write' actions */
1454 if (!act->is_write() || act == curr)
1457 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1458 if (!curr->is_seqcst()|| (!act->is_seqcst() && (last_seq_cst==NULL||!act->happens_before(last_seq_cst))) || act == last_seq_cst) {
1459 DEBUG("Adding action to may_read_from:\n");
1460 if (DBG_ENABLED()) {
1464 curr->get_node()->add_read_from(act);
1467 /* Include at most one act per-thread that "happens before" curr */
1468 if (act->happens_before(curr)) {
1476 /** @todo Need a more informative way of reporting errors. */
1477 printf("ERROR: may read from uninitialized atomic\n");
1480 if (DBG_ENABLED() || !initialized) {
1481 printf("Reached read action:\n");
1483 printf("Printing may_read_from\n");
1484 curr->get_node()->print_may_read_from();
1485 printf("End printing may_read_from\n");
1488 ASSERT(initialized);
1491 static void print_list(action_list_t *list)
1493 action_list_t::iterator it;
1495 printf("---------------------------------------------------------------------\n");
1498 for (it = list->begin(); it != list->end(); it++) {
1501 printf("---------------------------------------------------------------------\n");
1504 void ModelChecker::print_summary()
1507 printf("Number of executions: %d\n", num_executions);
1508 printf("Number of feasible executions: %d\n", num_feasible_executions);
1509 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
1511 #if SUPPORT_MOD_ORDER_DUMP
1513 char buffername[100];
1514 sprintf(buffername, "exec%u",num_executions);
1515 mo_graph->dumpGraphToFile(buffername);
1518 if (!isfinalfeasible())
1519 printf("INFEASIBLE EXECUTION!\n");
1520 print_list(action_trace);
1525 * Add a Thread to the system for the first time. Should only be called once
1527 * @param t The Thread to add
1529 void ModelChecker::add_thread(Thread *t)
1531 thread_map->put(id_to_int(t->get_id()), t);
1532 scheduler->add_thread(t);
1536 * Removes a thread from the scheduler.
1537 * @param the thread to remove.
1540 void ModelChecker::remove_thread(Thread *t)
1542 scheduler->remove_thread(t);
1546 * Switch from a user-context to the "master thread" context (a.k.a. system
1547 * context). This switch is made with the intention of exploring a particular
1548 * model-checking action (described by a ModelAction object). Must be called
1549 * from a user-thread context.
1550 * @param act The current action that will be explored. Must not be NULL.
1551 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
1553 int ModelChecker::switch_to_master(ModelAction *act)
1556 Thread *old = thread_current();
1557 set_current_action(act);
1558 old->set_state(THREAD_READY);
1559 return Thread::swap(old, &system_context);
1563 * Takes the next step in the execution, if possible.
1564 * @return Returns true (success) if a step was taken and false otherwise.
1566 bool ModelChecker::take_step() {
1570 Thread * curr = thread_current();
1572 if (curr->get_state() == THREAD_READY) {
1573 ASSERT(priv->current_action);
1575 priv->nextThread = check_current_action(priv->current_action);
1576 priv->current_action = NULL;
1577 if (curr->is_blocked() || curr->is_complete())
1578 scheduler->remove_thread(curr);
1583 Thread * next = scheduler->next_thread(priv->nextThread);
1585 /* Infeasible -> don't take any more steps */
1590 next->set_state(THREAD_RUNNING);
1591 DEBUG("(%d, %d)\n", curr ? curr->get_id() : -1, next ? next->get_id() : -1);
1593 /* next == NULL -> don't take any more steps */
1597 if ( next->get_pending() != NULL ) {
1598 //restart a pending action
1599 set_current_action(next->get_pending());
1600 next->set_pending(NULL);
1601 next->set_state(THREAD_READY);
1605 /* Return false only if swap fails with an error */
1606 return (Thread::swap(&system_context, next) == 0);
1609 /** Runs the current execution until threre are no more steps to take. */
1610 void ModelChecker::finish_execution() {
1613 while (take_step());