8 #include "snapshot-interface.h"
10 #include "clockvector.h"
11 #include "cyclegraph.h"
16 #define INITIAL_THREAD_ID 0
20 /** @brief Constructor */
21 ModelChecker::ModelChecker(struct model_params params) :
22 /* Initialize default scheduler */
23 scheduler(new Scheduler()),
25 num_feasible_executions(0),
28 action_trace(new action_list_t()),
29 thread_map(new HashTable<int, Thread *, int>()),
30 obj_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
31 lock_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
32 obj_thrd_map(new HashTable<void *, std::vector<action_list_t>, uintptr_t, 4 >()),
33 promises(new std::vector<Promise *>()),
34 futurevalues(new std::vector<struct PendingFutureValue>()),
35 lazy_sync_with_release(new HashTable<void *, action_list_t, uintptr_t, 4>()),
36 thrd_last_action(new std::vector<ModelAction *>(1)),
37 node_stack(new NodeStack()),
38 mo_graph(new CycleGraph()),
39 failed_promise(false),
40 too_many_reads(false),
43 /* Allocate this "size" on the snapshotting heap */
44 priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
45 /* First thread created will have id INITIAL_THREAD_ID */
46 priv->next_thread_id = INITIAL_THREAD_ID;
48 lazy_sync_size = &priv->lazy_sync_size;
51 /** @brief Destructor */
52 ModelChecker::~ModelChecker()
54 for (int i = 0; i < get_num_threads(); i++)
55 delete thread_map->get(i);
60 delete lock_waiters_map;
63 for (unsigned int i = 0; i < promises->size(); i++)
64 delete (*promises)[i];
67 delete lazy_sync_with_release;
69 delete thrd_last_action;
76 * Restores user program to initial state and resets all model-checker data
79 void ModelChecker::reset_to_initial_state()
81 DEBUG("+++ Resetting to initial state +++\n");
82 node_stack->reset_execution();
83 failed_promise = false;
84 too_many_reads = false;
86 snapshotObject->backTrackBeforeStep(0);
89 /** @return a thread ID for a new Thread */
90 thread_id_t ModelChecker::get_next_id()
92 return priv->next_thread_id++;
95 /** @return the number of user threads created during this execution */
96 int ModelChecker::get_num_threads()
98 return priv->next_thread_id;
101 /** @return a sequence number for a new ModelAction */
102 modelclock_t ModelChecker::get_next_seq_num()
104 return ++priv->used_sequence_numbers;
108 * @brief Choose the next thread to execute.
110 * This function chooses the next thread that should execute. It can force the
111 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
112 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
113 * The model-checker may have no preference regarding the next thread (i.e.,
114 * when exploring a new execution ordering), in which case this will return
116 * @param curr The current ModelAction. This action might guide the choice of
118 * @return The next thread to run. If the model-checker has no preference, NULL.
120 Thread * ModelChecker::get_next_thread(ModelAction *curr)
125 /* Do not split atomic actions. */
127 return thread_current();
128 /* The THREAD_CREATE action points to the created Thread */
129 else if (curr->get_type() == THREAD_CREATE)
130 return (Thread *)curr->get_location();
133 /* Have we completed exploring the preselected path? */
137 /* Else, we are trying to replay an execution */
138 ModelAction *next = node_stack->get_next()->get_action();
140 if (next == diverge) {
141 Node *nextnode = next->get_node();
142 /* Reached divergence point */
143 if (nextnode->increment_promise()) {
144 /* The next node will try to satisfy a different set of promises. */
145 tid = next->get_tid();
146 node_stack->pop_restofstack(2);
147 } else if (nextnode->increment_read_from()) {
148 /* The next node will read from a different value. */
149 tid = next->get_tid();
150 node_stack->pop_restofstack(2);
151 } else if (nextnode->increment_future_value()) {
152 /* The next node will try to read from a different future value. */
153 tid = next->get_tid();
154 node_stack->pop_restofstack(2);
156 /* Make a different thread execute for next step */
157 Node *node = nextnode->get_parent();
158 tid = node->get_next_backtrack();
159 node_stack->pop_restofstack(1);
161 DEBUG("*** Divergence point ***\n");
164 tid = next->get_tid();
166 DEBUG("*** ModelChecker chose next thread = %d ***\n", tid);
167 ASSERT(tid != THREAD_ID_T_NONE);
168 return thread_map->get(id_to_int(tid));
172 * Queries the model-checker for more executions to explore and, if one
173 * exists, resets the model-checker state to execute a new execution.
175 * @return If there are more executions to explore, return true. Otherwise,
178 bool ModelChecker::next_execution()
183 if (isfinalfeasible())
184 num_feasible_executions++;
186 if (isfinalfeasible() || DBG_ENABLED())
189 if ((diverge = get_next_backtrack()) == NULL)
193 printf("Next execution will diverge at:\n");
197 reset_to_initial_state();
201 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
203 switch (act->get_type()) {
207 /* linear search: from most recent to oldest */
208 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
209 action_list_t::reverse_iterator rit;
210 for (rit = list->rbegin(); rit != list->rend(); rit++) {
211 ModelAction *prev = *rit;
212 if (act->is_synchronizing(prev))
218 case ATOMIC_TRYLOCK: {
219 /* linear search: from most recent to oldest */
220 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
221 action_list_t::reverse_iterator rit;
222 for (rit = list->rbegin(); rit != list->rend(); rit++) {
223 ModelAction *prev = *rit;
224 if (act->is_conflicting_lock(prev))
229 case ATOMIC_UNLOCK: {
230 /* linear search: from most recent to oldest */
231 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
232 action_list_t::reverse_iterator rit;
233 for (rit = list->rbegin(); rit != list->rend(); rit++) {
234 ModelAction *prev = *rit;
235 if (!act->same_thread(prev)&&prev->is_failed_trylock())
246 void ModelChecker::set_backtracking(ModelAction *act)
248 Thread *t = get_thread(act);
249 ModelAction * prev = get_last_conflict(act);
253 Node * node = prev->get_node()->get_parent();
255 int low_tid, high_tid;
256 if (node->is_enabled(t)) {
257 low_tid = id_to_int(act->get_tid());
258 high_tid = low_tid+1;
261 high_tid = get_num_threads();
264 for(int i = low_tid; i < high_tid; i++) {
265 thread_id_t tid = int_to_id(i);
266 if (!node->is_enabled(tid))
269 /* Check if this has been explored already */
270 if (node->has_been_explored(tid))
273 /* Cache the latest backtracking point */
274 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
275 priv->next_backtrack = prev;
277 /* If this is a new backtracking point, mark the tree */
278 if (!node->set_backtrack(tid))
280 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
281 prev->get_tid(), t->get_id());
290 * Returns last backtracking point. The model checker will explore a different
291 * path for this point in the next execution.
292 * @return The ModelAction at which the next execution should diverge.
294 ModelAction * ModelChecker::get_next_backtrack()
296 ModelAction *next = priv->next_backtrack;
297 priv->next_backtrack = NULL;
302 * Processes a read or rmw model action.
303 * @param curr is the read model action to process.
304 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
305 * @return True if processing this read updates the mo_graph.
307 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
310 bool updated = false;
312 const ModelAction *reads_from = curr->get_node()->get_read_from();
313 if (reads_from != NULL) {
314 mo_graph->startChanges();
316 value = reads_from->get_value();
317 bool r_status = false;
319 if (!second_part_of_rmw) {
321 r_status = r_modification_order(curr, reads_from);
325 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
326 mo_graph->rollbackChanges();
327 too_many_reads = false;
331 curr->read_from(reads_from);
332 mo_graph->commitChanges();
334 } else if (!second_part_of_rmw) {
335 /* Read from future value */
336 value = curr->get_node()->get_future_value();
337 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
338 curr->read_from(NULL);
339 Promise *valuepromise = new Promise(curr, value, expiration);
340 promises->push_back(valuepromise);
342 get_thread(curr)->set_return_value(value);
348 * Processes a lock, trylock, or unlock model action. @param curr is
349 * the read model action to process.
351 * The try lock operation checks whether the lock is taken. If not,
352 * it falls to the normal lock operation case. If so, it returns
355 * The lock operation has already been checked that it is enabled, so
356 * it just grabs the lock and synchronizes with the previous unlock.
358 * The unlock operation has to re-enable all of the threads that are
359 * waiting on the lock.
361 void ModelChecker::process_mutex(ModelAction *curr) {
362 std::mutex *mutex = (std::mutex *)curr->get_location();
363 struct std::mutex_state *state = mutex->get_state();
364 switch (curr->get_type()) {
365 case ATOMIC_TRYLOCK: {
366 bool success = !state->islocked;
367 curr->set_try_lock(success);
369 get_thread(curr)->set_return_value(0);
372 get_thread(curr)->set_return_value(1);
374 //otherwise fall into the lock case
376 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
377 printf("Lock access before initialization\n");
380 state->islocked = true;
381 ModelAction *unlock = get_last_unlock(curr);
382 //synchronize with the previous unlock statement
384 curr->synchronize_with(unlock);
387 case ATOMIC_UNLOCK: {
389 state->islocked = false;
390 //wake up the other threads
391 action_list_t *waiters = lock_waiters_map->get_safe_ptr(curr->get_location());
392 //activate all the waiting threads
393 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
394 scheduler->add_thread(get_thread((*rit)->get_tid()));
405 * Process a write ModelAction
406 * @param curr The ModelAction to process
407 * @return True if the mo_graph was updated or promises were resolved
409 bool ModelChecker::process_write(ModelAction *curr)
411 bool updated_mod_order = w_modification_order(curr);
412 bool updated_promises = resolve_promises(curr);
414 if (promises->size() == 0) {
415 for (unsigned int i = 0; i < futurevalues->size(); i++) {
416 struct PendingFutureValue pfv = (*futurevalues)[i];
417 if (pfv.act->get_node()->add_future_value(pfv.value, pfv.expiration) &&
418 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
419 priv->next_backtrack = pfv.act;
421 futurevalues->resize(0);
424 mo_graph->commitChanges();
425 get_thread(curr)->set_return_value(VALUE_NONE);
426 return updated_mod_order || updated_promises;
430 * Initialize the current action by performing one or more of the following
431 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
432 * in the NodeStack, manipulating backtracking sets, allocating and
433 * initializing clock vectors, and computing the promises to fulfill.
435 * @param curr The current action, as passed from the user context; may be
436 * freed/invalidated after the execution of this function
437 * @return The current action, as processed by the ModelChecker. Is only the
438 * same as the parameter @a curr if this is a newly-explored action.
440 ModelAction * ModelChecker::initialize_curr_action(ModelAction *curr)
442 ModelAction *newcurr;
444 if (curr->is_rmwc() || curr->is_rmw()) {
445 newcurr = process_rmw(curr);
447 compute_promises(newcurr);
451 newcurr = node_stack->explore_action(curr, scheduler->get_enabled());
453 /* First restore type and order in case of RMW operation */
455 newcurr->copy_typeandorder(curr);
457 ASSERT(curr->get_location() == newcurr->get_location());
458 newcurr->copy_from_new(curr);
460 /* Discard duplicate ModelAction; use action from NodeStack */
463 /* If we have diverged, we need to reset the clock vector. */
465 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
469 * Perform one-time actions when pushing new ModelAction onto
472 curr->create_cv(get_parent_action(curr->get_tid()));
473 if (curr->is_write())
474 compute_promises(curr);
480 * This method checks whether a model action is enabled at the given point.
481 * At this point, it checks whether a lock operation would be successful at this point.
482 * If not, it puts the thread in a waiter list.
483 * @param curr is the ModelAction to check whether it is enabled.
484 * @return a bool that indicates whether the action is enabled.
487 bool ModelChecker::check_action_enabled(ModelAction *curr) {
488 if (curr->is_lock()) {
489 std::mutex * lock = (std::mutex *)curr->get_location();
490 struct std::mutex_state * state = lock->get_state();
491 if (state->islocked) {
492 //Stick the action in the appropriate waiting queue
493 lock_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
502 * This is the heart of the model checker routine. It performs model-checking
503 * actions corresponding to a given "current action." Among other processes, it
504 * calculates reads-from relationships, updates synchronization clock vectors,
505 * forms a memory_order constraints graph, and handles replay/backtrack
506 * execution when running permutations of previously-observed executions.
508 * @param curr The current action to process
509 * @return The next Thread that must be executed. May be NULL if ModelChecker
510 * makes no choice (e.g., according to replay execution, combining RMW actions,
513 Thread * ModelChecker::check_current_action(ModelAction *curr)
517 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
519 if (!check_action_enabled(curr)) {
520 //we'll make the execution look like we chose to run this action
521 //much later...when a lock is actually available to relese
522 get_current_thread()->set_pending(curr);
523 remove_thread(get_current_thread());
524 return get_next_thread(NULL);
527 ModelAction *newcurr = initialize_curr_action(curr);
529 /* Add the action to lists before any other model-checking tasks */
530 if (!second_part_of_rmw)
531 add_action_to_lists(newcurr);
533 /* Build may_read_from set for newly-created actions */
534 if (curr == newcurr && curr->is_read())
535 build_reads_from_past(curr);
538 /* Thread specific actions */
539 switch (curr->get_type()) {
540 case THREAD_CREATE: {
541 Thread *th = (Thread *)curr->get_location();
542 th->set_creation(curr);
546 Thread *waiting, *blocking;
547 waiting = get_thread(curr);
548 blocking = (Thread *)curr->get_location();
549 if (!blocking->is_complete()) {
550 blocking->push_wait_list(curr);
551 scheduler->sleep(waiting);
553 do_complete_join(curr);
557 case THREAD_FINISH: {
558 Thread *th = get_thread(curr);
559 while (!th->wait_list_empty()) {
560 ModelAction *act = th->pop_wait_list();
561 Thread *wake = get_thread(act);
562 scheduler->wake(wake);
563 do_complete_join(act);
569 check_promises(NULL, curr->get_cv());
576 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
578 while (!work_queue.empty()) {
579 WorkQueueEntry work = work_queue.front();
580 work_queue.pop_front();
583 case WORK_CHECK_CURR_ACTION: {
584 ModelAction *act = work.action;
585 bool updated = false;
586 if (act->is_read() && process_read(act, second_part_of_rmw))
589 if (act->is_write() && process_write(act))
592 if (act->is_mutex_op())
596 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
599 case WORK_CHECK_RELEASE_SEQ:
600 resolve_release_sequences(work.location, &work_queue);
602 case WORK_CHECK_MO_EDGES: {
603 /** @todo Complete verification of work_queue */
604 ModelAction *act = work.action;
605 bool updated = false;
607 if (act->is_read()) {
608 if (r_modification_order(act, act->get_reads_from()))
611 if (act->is_write()) {
612 if (w_modification_order(act))
617 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
626 check_curr_backtracking(curr);
628 set_backtracking(curr);
630 return get_next_thread(curr);
634 * Complete a THREAD_JOIN operation, by synchronizing with the THREAD_FINISH
635 * operation from the Thread it is joining with. Must be called after the
636 * completion of the Thread in question.
637 * @param join The THREAD_JOIN action
639 void ModelChecker::do_complete_join(ModelAction *join)
641 Thread *blocking = (Thread *)join->get_location();
642 ModelAction *act = get_last_action(blocking->get_id());
643 join->synchronize_with(act);
646 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
647 Node *currnode = curr->get_node();
648 Node *parnode = currnode->get_parent();
650 if ((!parnode->backtrack_empty() ||
651 !currnode->read_from_empty() ||
652 !currnode->future_value_empty() ||
653 !currnode->promise_empty())
654 && (!priv->next_backtrack ||
655 *curr > *priv->next_backtrack)) {
656 priv->next_backtrack = curr;
660 bool ModelChecker::promises_expired() {
661 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
662 Promise *promise = (*promises)[promise_index];
663 if (promise->get_expiration()<priv->used_sequence_numbers) {
670 /** @return whether the current partial trace must be a prefix of a
672 bool ModelChecker::isfeasibleprefix() {
673 return promises->size() == 0 && *lazy_sync_size == 0;
676 /** @return whether the current partial trace is feasible. */
677 bool ModelChecker::isfeasible() {
678 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
681 /** @return whether the current partial trace is feasible other than
682 * multiple RMW reading from the same store. */
683 bool ModelChecker::isfeasibleotherthanRMW() {
685 if (mo_graph->checkForCycles())
686 DEBUG("Infeasible: modification order cycles\n");
688 DEBUG("Infeasible: failed promise\n");
690 DEBUG("Infeasible: too many reads\n");
691 if (promises_expired())
692 DEBUG("Infeasible: promises expired\n");
694 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !promises_expired();
697 /** Returns whether the current completed trace is feasible. */
698 bool ModelChecker::isfinalfeasible() {
699 if (DBG_ENABLED() && promises->size() != 0)
700 DEBUG("Infeasible: unrevolved promises\n");
702 return isfeasible() && promises->size() == 0;
705 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
706 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
707 int tid = id_to_int(act->get_tid());
708 ModelAction *lastread = get_last_action(tid);
709 lastread->process_rmw(act);
710 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
711 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
712 mo_graph->commitChanges();
718 * Checks whether a thread has read from the same write for too many times
719 * without seeing the effects of a later write.
722 * 1) there must a different write that we could read from that would satisfy the modification order,
723 * 2) we must have read from the same value in excess of maxreads times, and
724 * 3) that other write must have been in the reads_from set for maxreads times.
726 * If so, we decide that the execution is no longer feasible.
728 void ModelChecker::check_recency(ModelAction *curr) {
729 if (params.maxreads != 0) {
730 if (curr->get_node()->get_read_from_size() <= 1)
733 //Must make sure that execution is currently feasible... We could
734 //accidentally clear by rolling back
738 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
739 int tid = id_to_int(curr->get_tid());
742 if ((int)thrd_lists->size() <= tid)
745 action_list_t *list = &(*thrd_lists)[tid];
747 action_list_t::reverse_iterator rit = list->rbegin();
749 for (; (*rit) != curr; rit++)
751 /* go past curr now */
754 action_list_t::reverse_iterator ritcopy = rit;
755 //See if we have enough reads from the same value
757 for (; count < params.maxreads; rit++,count++) {
758 if (rit==list->rend())
760 ModelAction *act = *rit;
763 if (act->get_reads_from() != curr->get_reads_from())
765 if (act->get_node()->get_read_from_size() <= 1)
769 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
771 const ModelAction * write = curr->get_node()->get_read_from_at(i);
772 //Need a different write
773 if (write==curr->get_reads_from())
776 /* Test to see whether this is a feasible write to read from*/
777 mo_graph->startChanges();
778 r_modification_order(curr, write);
779 bool feasiblereadfrom = isfeasible();
780 mo_graph->rollbackChanges();
782 if (!feasiblereadfrom)
786 bool feasiblewrite = true;
787 //new we need to see if this write works for everyone
789 for (int loop = count; loop>0; loop--,rit++) {
790 ModelAction *act=*rit;
791 bool foundvalue = false;
792 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
793 if (act->get_node()->get_read_from_at(i)==write) {
799 feasiblewrite = false;
804 too_many_reads = true;
812 * Updates the mo_graph with the constraints imposed from the current
815 * Basic idea is the following: Go through each other thread and find
816 * the lastest action that happened before our read. Two cases:
818 * (1) The action is a write => that write must either occur before
819 * the write we read from or be the write we read from.
821 * (2) The action is a read => the write that that action read from
822 * must occur before the write we read from or be the same write.
824 * @param curr The current action. Must be a read.
825 * @param rf The action that curr reads from. Must be a write.
826 * @return True if modification order edges were added; false otherwise
828 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
830 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
833 ASSERT(curr->is_read());
835 /* Iterate over all threads */
836 for (i = 0; i < thrd_lists->size(); i++) {
837 /* Iterate over actions in thread, starting from most recent */
838 action_list_t *list = &(*thrd_lists)[i];
839 action_list_t::reverse_iterator rit;
840 for (rit = list->rbegin(); rit != list->rend(); rit++) {
841 ModelAction *act = *rit;
844 * Include at most one act per-thread that "happens
845 * before" curr. Don't consider reflexively.
847 if (act->happens_before(curr) && act != curr) {
848 if (act->is_write()) {
850 mo_graph->addEdge(act, rf);
854 const ModelAction *prevreadfrom = act->get_reads_from();
855 if (prevreadfrom != NULL && rf != prevreadfrom) {
856 mo_graph->addEdge(prevreadfrom, rf);
868 /** This method fixes up the modification order when we resolve a
869 * promises. The basic problem is that actions that occur after the
870 * read curr could not property add items to the modification order
873 * So for each thread, we find the earliest item that happens after
874 * the read curr. This is the item we have to fix up with additional
875 * constraints. If that action is write, we add a MO edge between
876 * the Action rf and that action. If the action is a read, we add a
877 * MO edge between the Action rf, and whatever the read accessed.
879 * @param curr is the read ModelAction that we are fixing up MO edges for.
880 * @param rf is the write ModelAction that curr reads from.
884 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
886 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
888 ASSERT(curr->is_read());
890 /* Iterate over all threads */
891 for (i = 0; i < thrd_lists->size(); i++) {
892 /* Iterate over actions in thread, starting from most recent */
893 action_list_t *list = &(*thrd_lists)[i];
894 action_list_t::reverse_iterator rit;
895 ModelAction *lastact = NULL;
897 /* Find last action that happens after curr */
898 for (rit = list->rbegin(); rit != list->rend(); rit++) {
899 ModelAction *act = *rit;
900 if (curr->happens_before(act)) {
906 /* Include at most one act per-thread that "happens before" curr */
907 if (lastact != NULL) {
908 if (lastact->is_read()) {
909 const ModelAction *postreadfrom = lastact->get_reads_from();
910 if (postreadfrom != NULL&&rf != postreadfrom)
911 mo_graph->addEdge(rf, postreadfrom);
912 } else if (rf != lastact) {
913 mo_graph->addEdge(rf, lastact);
921 * Updates the mo_graph with the constraints imposed from the current write.
923 * Basic idea is the following: Go through each other thread and find
924 * the lastest action that happened before our write. Two cases:
926 * (1) The action is a write => that write must occur before
929 * (2) The action is a read => the write that that action read from
930 * must occur before the current write.
932 * This method also handles two other issues:
934 * (I) Sequential Consistency: Making sure that if the current write is
935 * seq_cst, that it occurs after the previous seq_cst write.
937 * (II) Sending the write back to non-synchronizing reads.
939 * @param curr The current action. Must be a write.
940 * @return True if modification order edges were added; false otherwise
942 bool ModelChecker::w_modification_order(ModelAction *curr)
944 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
947 ASSERT(curr->is_write());
949 if (curr->is_seqcst()) {
950 /* We have to at least see the last sequentially consistent write,
951 so we are initialized. */
952 ModelAction *last_seq_cst = get_last_seq_cst(curr);
953 if (last_seq_cst != NULL) {
954 mo_graph->addEdge(last_seq_cst, curr);
959 /* Iterate over all threads */
960 for (i = 0; i < thrd_lists->size(); i++) {
961 /* Iterate over actions in thread, starting from most recent */
962 action_list_t *list = &(*thrd_lists)[i];
963 action_list_t::reverse_iterator rit;
964 for (rit = list->rbegin(); rit != list->rend(); rit++) {
965 ModelAction *act = *rit;
968 * If RMW, we already have all relevant edges,
969 * so just skip to next thread.
970 * If normal write, we need to look at earlier
971 * actions, so continue processing list.
980 * Include at most one act per-thread that "happens
983 if (act->happens_before(curr)) {
985 * Note: if act is RMW, just add edge:
987 * The following edge should be handled elsewhere:
988 * readfrom(act) --mo--> act
991 mo_graph->addEdge(act, curr);
992 else if (act->is_read() && act->get_reads_from() != NULL)
993 mo_graph->addEdge(act->get_reads_from(), curr);
996 } else if (act->is_read() && !act->is_synchronizing(curr) &&
997 !act->same_thread(curr)) {
998 /* We have an action that:
999 (1) did not happen before us
1000 (2) is a read and we are a write
1001 (3) cannot synchronize with us
1002 (4) is in a different thread
1004 that read could potentially read from our write.
1006 if (thin_air_constraint_may_allow(curr, act)) {
1008 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1009 struct PendingFutureValue pfv = {curr->get_value(),curr->get_seq_number()+params.maxfuturedelay,act};
1010 futurevalues->push_back(pfv);
1020 /** Arbitrary reads from the future are not allowed. Section 29.3
1021 * part 9 places some constraints. This method checks one result of constraint
1022 * constraint. Others require compiler support. */
1024 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1025 if (!writer->is_rmw())
1028 if (!reader->is_rmw())
1031 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1032 if (search == reader)
1034 if (search->get_tid() == reader->get_tid() &&
1035 search->happens_before(reader))
1043 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1044 * The ModelAction under consideration is expected to be taking part in
1045 * release/acquire synchronization as an object of the "reads from" relation.
1046 * Note that this can only provide release sequence support for RMW chains
1047 * which do not read from the future, as those actions cannot be traced until
1048 * their "promise" is fulfilled. Similarly, we may not even establish the
1049 * presence of a release sequence with certainty, as some modification order
1050 * constraints may be decided further in the future. Thus, this function
1051 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1052 * and a boolean representing certainty.
1054 * @todo Finish lazy updating, when promises are fulfilled in the future
1055 * @param rf The action that might be part of a release sequence. Must be a
1057 * @param release_heads A pass-by-reference style return parameter. After
1058 * execution of this function, release_heads will contain the heads of all the
1059 * relevant release sequences, if any exists
1060 * @return true, if the ModelChecker is certain that release_heads is complete;
1063 bool ModelChecker::release_seq_head(const ModelAction *rf, rel_heads_list_t *release_heads) const
1066 /* read from future: need to settle this later */
1067 return false; /* incomplete */
1070 ASSERT(rf->is_write());
1072 if (rf->is_release())
1073 release_heads->push_back(rf);
1075 /* We need a RMW action that is both an acquire and release to stop */
1076 /** @todo Need to be smarter here... In the linux lock
1077 * example, this will run to the beginning of the program for
1079 if (rf->is_acquire() && rf->is_release())
1080 return true; /* complete */
1081 return release_seq_head(rf->get_reads_from(), release_heads);
1083 if (rf->is_release())
1084 return true; /* complete */
1086 /* else relaxed write; check modification order for contiguous subsequence
1087 * -> rf must be same thread as release */
1088 int tid = id_to_int(rf->get_tid());
1089 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(rf->get_location());
1090 action_list_t *list = &(*thrd_lists)[tid];
1091 action_list_t::const_reverse_iterator rit;
1093 /* Find rf in the thread list */
1094 rit = std::find(list->rbegin(), list->rend(), rf);
1095 ASSERT(rit != list->rend());
1097 /* Find the last write/release */
1098 for (; rit != list->rend(); rit++)
1099 if ((*rit)->is_release())
1101 if (rit == list->rend()) {
1102 /* No write-release in this thread */
1103 return true; /* complete */
1105 ModelAction *release = *rit;
1107 ASSERT(rf->same_thread(release));
1109 bool certain = true;
1110 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1111 if (id_to_int(rf->get_tid()) == (int)i)
1113 list = &(*thrd_lists)[i];
1115 /* Can we ensure no future writes from this thread may break
1116 * the release seq? */
1117 bool future_ordered = false;
1119 ModelAction *last = get_last_action(int_to_id(i));
1120 if (last && rf->happens_before(last))
1121 future_ordered = true;
1123 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1124 const ModelAction *act = *rit;
1125 /* Reach synchronization -> this thread is complete */
1126 if (act->happens_before(release))
1128 if (rf->happens_before(act)) {
1129 future_ordered = true;
1133 /* Only writes can break release sequences */
1134 if (!act->is_write())
1137 /* Check modification order */
1138 if (mo_graph->checkReachable(rf, act)) {
1139 /* rf --mo--> act */
1140 future_ordered = true;
1143 if (mo_graph->checkReachable(act, release))
1144 /* act --mo--> release */
1146 if (mo_graph->checkReachable(release, act) &&
1147 mo_graph->checkReachable(act, rf)) {
1148 /* release --mo-> act --mo--> rf */
1149 return true; /* complete */
1153 if (!future_ordered)
1154 return false; /* This thread is uncertain */
1158 release_heads->push_back(release);
1163 * A public interface for getting the release sequence head(s) with which a
1164 * given ModelAction must synchronize. This function only returns a non-empty
1165 * result when it can locate a release sequence head with certainty. Otherwise,
1166 * it may mark the internal state of the ModelChecker so that it will handle
1167 * the release sequence at a later time, causing @a act to update its
1168 * synchronization at some later point in execution.
1169 * @param act The 'acquire' action that may read from a release sequence
1170 * @param release_heads A pass-by-reference return parameter. Will be filled
1171 * with the head(s) of the release sequence(s), if they exists with certainty.
1172 * @see ModelChecker::release_seq_head
1174 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1176 const ModelAction *rf = act->get_reads_from();
1178 complete = release_seq_head(rf, release_heads);
1180 /* add act to 'lazy checking' list */
1181 action_list_t *list;
1182 list = lazy_sync_with_release->get_safe_ptr(act->get_location());
1183 list->push_back(act);
1184 (*lazy_sync_size)++;
1189 * Attempt to resolve all stashed operations that might synchronize with a
1190 * release sequence for a given location. This implements the "lazy" portion of
1191 * determining whether or not a release sequence was contiguous, since not all
1192 * modification order information is present at the time an action occurs.
1194 * @param location The location/object that should be checked for release
1195 * sequence resolutions
1196 * @param work_queue The work queue to which to add work items as they are
1198 * @return True if any updates occurred (new synchronization, new mo_graph
1201 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1203 action_list_t *list;
1204 list = lazy_sync_with_release->getptr(location);
1208 bool updated = false;
1209 action_list_t::iterator it = list->begin();
1210 while (it != list->end()) {
1211 ModelAction *act = *it;
1212 const ModelAction *rf = act->get_reads_from();
1213 rel_heads_list_t release_heads;
1215 complete = release_seq_head(rf, &release_heads);
1216 for (unsigned int i = 0; i < release_heads.size(); i++) {
1217 if (!act->has_synchronized_with(release_heads[i])) {
1219 act->synchronize_with(release_heads[i]);
1224 /* Re-check act for mo_graph edges */
1225 work_queue->push_back(MOEdgeWorkEntry(act));
1227 /* propagate synchronization to later actions */
1228 action_list_t::reverse_iterator it = action_trace->rbegin();
1229 while ((*it) != act) {
1230 ModelAction *propagate = *it;
1231 if (act->happens_before(propagate)) {
1232 propagate->synchronize_with(act);
1233 /* Re-check 'propagate' for mo_graph edges */
1234 work_queue->push_back(MOEdgeWorkEntry(propagate));
1239 it = list->erase(it);
1240 (*lazy_sync_size)--;
1245 // If we resolved promises or data races, see if we have realized a data race.
1246 if (checkDataRaces()) {
1254 * Performs various bookkeeping operations for the current ModelAction. For
1255 * instance, adds action to the per-object, per-thread action vector and to the
1256 * action trace list of all thread actions.
1258 * @param act is the ModelAction to add.
1260 void ModelChecker::add_action_to_lists(ModelAction *act)
1262 int tid = id_to_int(act->get_tid());
1263 action_trace->push_back(act);
1265 obj_map->get_safe_ptr(act->get_location())->push_back(act);
1267 std::vector<action_list_t> *vec = obj_thrd_map->get_safe_ptr(act->get_location());
1268 if (tid >= (int)vec->size())
1269 vec->resize(priv->next_thread_id);
1270 (*vec)[tid].push_back(act);
1272 if ((int)thrd_last_action->size() <= tid)
1273 thrd_last_action->resize(get_num_threads());
1274 (*thrd_last_action)[tid] = act;
1278 * @brief Get the last action performed by a particular Thread
1279 * @param tid The thread ID of the Thread in question
1280 * @return The last action in the thread
1282 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1284 int threadid = id_to_int(tid);
1285 if (threadid < (int)thrd_last_action->size())
1286 return (*thrd_last_action)[id_to_int(tid)];
1292 * Gets the last memory_order_seq_cst write (in the total global sequence)
1293 * performed on a particular object (i.e., memory location), not including the
1295 * @param curr The current ModelAction; also denotes the object location to
1297 * @return The last seq_cst write
1299 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1301 void *location = curr->get_location();
1302 action_list_t *list = obj_map->get_safe_ptr(location);
1303 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1304 action_list_t::reverse_iterator rit;
1305 for (rit = list->rbegin(); rit != list->rend(); rit++)
1306 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1312 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1313 * location). This function identifies the mutex according to the current
1314 * action, which is presumed to perform on the same mutex.
1315 * @param curr The current ModelAction; also denotes the object location to
1317 * @return The last unlock operation
1319 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1321 void *location = curr->get_location();
1322 action_list_t *list = obj_map->get_safe_ptr(location);
1323 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1324 action_list_t::reverse_iterator rit;
1325 for (rit = list->rbegin(); rit != list->rend(); rit++)
1326 if ((*rit)->is_unlock())
1331 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1333 ModelAction *parent = get_last_action(tid);
1335 parent = get_thread(tid)->get_creation();
1340 * Returns the clock vector for a given thread.
1341 * @param tid The thread whose clock vector we want
1342 * @return Desired clock vector
1344 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1346 return get_parent_action(tid)->get_cv();
1350 * Resolve a set of Promises with a current write. The set is provided in the
1351 * Node corresponding to @a write.
1352 * @param write The ModelAction that is fulfilling Promises
1353 * @return True if promises were resolved; false otherwise
1355 bool ModelChecker::resolve_promises(ModelAction *write)
1357 bool resolved = false;
1359 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1360 Promise *promise = (*promises)[promise_index];
1361 if (write->get_node()->get_promise(i)) {
1362 ModelAction *read = promise->get_action();
1363 read->read_from(write);
1364 if (read->is_rmw()) {
1365 mo_graph->addRMWEdge(write, read);
1367 //First fix up the modification order for actions that happened
1369 r_modification_order(read, write);
1370 //Next fix up the modification order for actions that happened
1372 post_r_modification_order(read, write);
1373 promises->erase(promises->begin() + promise_index);
1382 * Compute the set of promises that could potentially be satisfied by this
1383 * action. Note that the set computation actually appears in the Node, not in
1385 * @param curr The ModelAction that may satisfy promises
1387 void ModelChecker::compute_promises(ModelAction *curr)
1389 for (unsigned int i = 0; i < promises->size(); i++) {
1390 Promise *promise = (*promises)[i];
1391 const ModelAction *act = promise->get_action();
1392 if (!act->happens_before(curr) &&
1394 !act->is_synchronizing(curr) &&
1395 !act->same_thread(curr) &&
1396 promise->get_value() == curr->get_value()) {
1397 curr->get_node()->set_promise(i);
1402 /** Checks promises in response to change in ClockVector Threads. */
1403 void ModelChecker::check_promises(ClockVector *old_cv, ClockVector *merge_cv)
1405 for (unsigned int i = 0; i < promises->size(); i++) {
1406 Promise *promise = (*promises)[i];
1407 const ModelAction *act = promise->get_action();
1408 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1409 merge_cv->synchronized_since(act)) {
1410 //This thread is no longer able to send values back to satisfy the promise
1411 int num_synchronized_threads = promise->increment_threads();
1412 if (num_synchronized_threads == get_num_threads()) {
1413 //Promise has failed
1414 failed_promise = true;
1422 * Build up an initial set of all past writes that this 'read' action may read
1423 * from. This set is determined by the clock vector's "happens before"
1425 * @param curr is the current ModelAction that we are exploring; it must be a
1428 void ModelChecker::build_reads_from_past(ModelAction *curr)
1430 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1432 ASSERT(curr->is_read());
1434 ModelAction *last_seq_cst = NULL;
1436 /* Track whether this object has been initialized */
1437 bool initialized = false;
1439 if (curr->is_seqcst()) {
1440 last_seq_cst = get_last_seq_cst(curr);
1441 /* We have to at least see the last sequentially consistent write,
1442 so we are initialized. */
1443 if (last_seq_cst != NULL)
1447 /* Iterate over all threads */
1448 for (i = 0; i < thrd_lists->size(); i++) {
1449 /* Iterate over actions in thread, starting from most recent */
1450 action_list_t *list = &(*thrd_lists)[i];
1451 action_list_t::reverse_iterator rit;
1452 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1453 ModelAction *act = *rit;
1455 /* Only consider 'write' actions */
1456 if (!act->is_write() || act == curr)
1459 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1460 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
1461 DEBUG("Adding action to may_read_from:\n");
1462 if (DBG_ENABLED()) {
1466 curr->get_node()->add_read_from(act);
1469 /* Include at most one act per-thread that "happens before" curr */
1470 if (act->happens_before(curr)) {
1478 /** @todo Need a more informative way of reporting errors. */
1479 printf("ERROR: may read from uninitialized atomic\n");
1482 if (DBG_ENABLED() || !initialized) {
1483 printf("Reached read action:\n");
1485 printf("Printing may_read_from\n");
1486 curr->get_node()->print_may_read_from();
1487 printf("End printing may_read_from\n");
1490 ASSERT(initialized);
1493 static void print_list(action_list_t *list)
1495 action_list_t::iterator it;
1497 printf("---------------------------------------------------------------------\n");
1500 for (it = list->begin(); it != list->end(); it++) {
1503 printf("---------------------------------------------------------------------\n");
1506 void ModelChecker::print_summary()
1509 printf("Number of executions: %d\n", num_executions);
1510 printf("Number of feasible executions: %d\n", num_feasible_executions);
1511 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
1513 #if SUPPORT_MOD_ORDER_DUMP
1515 char buffername[100];
1516 sprintf(buffername, "exec%04u", num_executions);
1517 mo_graph->dumpGraphToFile(buffername);
1520 if (!isfinalfeasible())
1521 printf("INFEASIBLE EXECUTION!\n");
1522 print_list(action_trace);
1527 * Add a Thread to the system for the first time. Should only be called once
1529 * @param t The Thread to add
1531 void ModelChecker::add_thread(Thread *t)
1533 thread_map->put(id_to_int(t->get_id()), t);
1534 scheduler->add_thread(t);
1537 void ModelChecker::remove_thread(Thread *t)
1539 scheduler->remove_thread(t);
1543 * Switch from a user-context to the "master thread" context (a.k.a. system
1544 * context). This switch is made with the intention of exploring a particular
1545 * model-checking action (described by a ModelAction object). Must be called
1546 * from a user-thread context.
1547 * @param act The current action that will be explored. Must not be NULL.
1548 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
1550 int ModelChecker::switch_to_master(ModelAction *act)
1553 Thread *old = thread_current();
1554 set_current_action(act);
1555 old->set_state(THREAD_READY);
1556 return Thread::swap(old, &system_context);
1560 * Takes the next step in the execution, if possible.
1561 * @return Returns true (success) if a step was taken and false otherwise.
1563 bool ModelChecker::take_step() {
1567 Thread * curr = thread_current();
1569 if (curr->get_state() == THREAD_READY) {
1570 ASSERT(priv->current_action);
1572 priv->nextThread = check_current_action(priv->current_action);
1573 priv->current_action = NULL;
1574 if (curr->is_blocked() || curr->is_complete())
1575 scheduler->remove_thread(curr);
1580 Thread * next = scheduler->next_thread(priv->nextThread);
1582 /* Infeasible -> don't take any more steps */
1587 next->set_state(THREAD_RUNNING);
1588 DEBUG("(%d, %d)\n", curr ? curr->get_id() : -1, next ? next->get_id() : -1);
1590 /* next == NULL -> don't take any more steps */
1594 if ( next->get_pending() != NULL ) {
1595 //restart a pending action
1596 set_current_action(next->get_pending());
1597 next->set_pending(NULL);
1598 next->set_state(THREAD_READY);
1602 /* Return false only if swap fails with an error */
1603 return (Thread::swap(&system_context, next) == 0);
1606 /** Runs the current execution until threre are no more steps to take. */
1607 void ModelChecker::finish_execution() {
1610 while (take_step());