0c201fd381aa239eeaa060d6e46f22aae9023579
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from_past()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_future_value()) {
267                         /* The next node will try to read from a different future value. */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else if (nextnode->increment_relseq_break()) {
271                         /* The next node will try to resolve a release sequence differently */
272                         tid = next->get_tid();
273                         node_stack->pop_restofstack(2);
274                 } else {
275                         ASSERT(prevnode);
276                         /* Make a different thread execute for next step */
277                         scheduler->add_sleep(get_thread(next->get_tid()));
278                         tid = prevnode->get_next_backtrack();
279                         /* Make sure the backtracked thread isn't sleeping. */
280                         node_stack->pop_restofstack(1);
281                         if (diverge == earliest_diverge) {
282                                 earliest_diverge = prevnode->get_action();
283                         }
284                 }
285                 /* Start the round robin scheduler from this thread id */
286                 scheduler->set_scheduler_thread(tid);
287                 /* The correct sleep set is in the parent node. */
288                 execute_sleep_set();
289
290                 DEBUG("*** Divergence point ***\n");
291
292                 diverge = NULL;
293         } else {
294                 tid = next->get_tid();
295         }
296         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
297         ASSERT(tid != THREAD_ID_T_NONE);
298         return thread_map->get(id_to_int(tid));
299 }
300
301 /**
302  * We need to know what the next actions of all threads in the sleep
303  * set will be.  This method computes them and stores the actions at
304  * the corresponding thread object's pending action.
305  */
306
307 void ModelChecker::execute_sleep_set()
308 {
309         for (unsigned int i = 0; i < get_num_threads(); i++) {
310                 thread_id_t tid = int_to_id(i);
311                 Thread *thr = get_thread(tid);
312                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
313                         thr->get_pending()->set_sleep_flag();
314                 }
315         }
316 }
317
318 /**
319  * @brief Should the current action wake up a given thread?
320  *
321  * @param curr The current action
322  * @param thread The thread that we might wake up
323  * @return True, if we should wake up the sleeping thread; false otherwise
324  */
325 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
326 {
327         const ModelAction *asleep = thread->get_pending();
328         /* Don't allow partial RMW to wake anyone up */
329         if (curr->is_rmwr())
330                 return false;
331         /* Synchronizing actions may have been backtracked */
332         if (asleep->could_synchronize_with(curr))
333                 return true;
334         /* All acquire/release fences and fence-acquire/store-release */
335         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
336                 return true;
337         /* Fence-release + store can awake load-acquire on the same location */
338         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
339                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
340                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
341                         return true;
342         }
343         return false;
344 }
345
346 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
347 {
348         for (unsigned int i = 0; i < get_num_threads(); i++) {
349                 Thread *thr = get_thread(int_to_id(i));
350                 if (scheduler->is_sleep_set(thr)) {
351                         if (should_wake_up(curr, thr))
352                                 /* Remove this thread from sleep set */
353                                 scheduler->remove_sleep(thr);
354                 }
355         }
356 }
357
358 /** @brief Alert the model-checker that an incorrectly-ordered
359  * synchronization was made */
360 void ModelChecker::set_bad_synchronization()
361 {
362         priv->bad_synchronization = true;
363 }
364
365 /**
366  * Check whether the current trace has triggered an assertion which should halt
367  * its execution.
368  *
369  * @return True, if the execution should be aborted; false otherwise
370  */
371 bool ModelChecker::has_asserted() const
372 {
373         return priv->asserted;
374 }
375
376 /**
377  * Trigger a trace assertion which should cause this execution to be halted.
378  * This can be due to a detected bug or due to an infeasibility that should
379  * halt ASAP.
380  */
381 void ModelChecker::set_assert()
382 {
383         priv->asserted = true;
384 }
385
386 /**
387  * Check if we are in a deadlock. Should only be called at the end of an
388  * execution, although it should not give false positives in the middle of an
389  * execution (there should be some ENABLED thread).
390  *
391  * @return True if program is in a deadlock; false otherwise
392  */
393 bool ModelChecker::is_deadlocked() const
394 {
395         bool blocking_threads = false;
396         for (unsigned int i = 0; i < get_num_threads(); i++) {
397                 thread_id_t tid = int_to_id(i);
398                 if (is_enabled(tid))
399                         return false;
400                 Thread *t = get_thread(tid);
401                 if (!t->is_model_thread() && t->get_pending())
402                         blocking_threads = true;
403         }
404         return blocking_threads;
405 }
406
407 /**
408  * Check if this is a complete execution. That is, have all thread completed
409  * execution (rather than exiting because sleep sets have forced a redundant
410  * execution).
411  *
412  * @return True if the execution is complete.
413  */
414 bool ModelChecker::is_complete_execution() const
415 {
416         for (unsigned int i = 0; i < get_num_threads(); i++)
417                 if (is_enabled(int_to_id(i)))
418                         return false;
419         return true;
420 }
421
422 /**
423  * @brief Assert a bug in the executing program.
424  *
425  * Use this function to assert any sort of bug in the user program. If the
426  * current trace is feasible (actually, a prefix of some feasible execution),
427  * then this execution will be aborted, printing the appropriate message. If
428  * the current trace is not yet feasible, the error message will be stashed and
429  * printed if the execution ever becomes feasible.
430  *
431  * @param msg Descriptive message for the bug (do not include newline char)
432  * @return True if bug is immediately-feasible
433  */
434 bool ModelChecker::assert_bug(const char *msg)
435 {
436         priv->bugs.push_back(new bug_message(msg));
437
438         if (isfeasibleprefix()) {
439                 set_assert();
440                 return true;
441         }
442         return false;
443 }
444
445 /**
446  * @brief Assert a bug in the executing program, asserted by a user thread
447  * @see ModelChecker::assert_bug
448  * @param msg Descriptive message for the bug (do not include newline char)
449  */
450 void ModelChecker::assert_user_bug(const char *msg)
451 {
452         /* If feasible bug, bail out now */
453         if (assert_bug(msg))
454                 switch_to_master(NULL);
455 }
456
457 /** @return True, if any bugs have been reported for this execution */
458 bool ModelChecker::have_bug_reports() const
459 {
460         return priv->bugs.size() != 0;
461 }
462
463 /** @brief Print bug report listing for this execution (if any bugs exist) */
464 void ModelChecker::print_bugs() const
465 {
466         if (have_bug_reports()) {
467                 model_print("Bug report: %zu bug%s detected\n",
468                                 priv->bugs.size(),
469                                 priv->bugs.size() > 1 ? "s" : "");
470                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
471                         priv->bugs[i]->print();
472         }
473 }
474
475 /**
476  * @brief Record end-of-execution stats
477  *
478  * Must be run when exiting an execution. Records various stats.
479  * @see struct execution_stats
480  */
481 void ModelChecker::record_stats()
482 {
483         stats.num_total++;
484         if (!isfeasibleprefix())
485                 stats.num_infeasible++;
486         else if (have_bug_reports())
487                 stats.num_buggy_executions++;
488         else if (is_complete_execution())
489                 stats.num_complete++;
490         else
491                 stats.num_redundant++;
492 }
493
494 /** @brief Print execution stats */
495 void ModelChecker::print_stats() const
496 {
497         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
498         model_print("Number of redundant executions: %d\n", stats.num_redundant);
499         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
500         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
501         model_print("Total executions: %d\n", stats.num_total);
502         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
503 }
504
505 /**
506  * @brief End-of-exeuction print
507  * @param printbugs Should any existing bugs be printed?
508  */
509 void ModelChecker::print_execution(bool printbugs) const
510 {
511         print_program_output();
512
513         if (DBG_ENABLED() || params.verbose) {
514                 model_print("Earliest divergence point since last feasible execution:\n");
515                 if (earliest_diverge)
516                         earliest_diverge->print();
517                 else
518                         model_print("(Not set)\n");
519
520                 model_print("\n");
521                 print_stats();
522         }
523
524         /* Don't print invalid bugs */
525         if (printbugs)
526                 print_bugs();
527
528         model_print("\n");
529         print_summary();
530 }
531
532 /**
533  * Queries the model-checker for more executions to explore and, if one
534  * exists, resets the model-checker state to execute a new execution.
535  *
536  * @return If there are more executions to explore, return true. Otherwise,
537  * return false.
538  */
539 bool ModelChecker::next_execution()
540 {
541         DBG();
542         /* Is this execution a feasible execution that's worth bug-checking? */
543         bool complete = isfeasibleprefix() && (is_complete_execution() ||
544                         have_bug_reports());
545
546         /* End-of-execution bug checks */
547         if (complete) {
548                 if (is_deadlocked())
549                         assert_bug("Deadlock detected");
550
551                 checkDataRaces();
552         }
553
554         record_stats();
555
556         /* Output */
557         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
558                 print_execution(complete);
559         else
560                 clear_program_output();
561
562         if (complete)
563                 earliest_diverge = NULL;
564
565         if ((diverge = get_next_backtrack()) == NULL)
566                 return false;
567
568         if (DBG_ENABLED()) {
569                 model_print("Next execution will diverge at:\n");
570                 diverge->print();
571         }
572
573         reset_to_initial_state();
574         return true;
575 }
576
577 /**
578  * @brief Find the last fence-related backtracking conflict for a ModelAction
579  *
580  * This function performs the search for the most recent conflicting action
581  * against which we should perform backtracking, as affected by fence
582  * operations. This includes pairs of potentially-synchronizing actions which
583  * occur due to fence-acquire or fence-release, and hence should be explored in
584  * the opposite execution order.
585  *
586  * @param act The current action
587  * @return The most recent action which conflicts with act due to fences
588  */
589 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
590 {
591         /* Only perform release/acquire fence backtracking for stores */
592         if (!act->is_write())
593                 return NULL;
594
595         /* Find a fence-release (or, act is a release) */
596         ModelAction *last_release;
597         if (act->is_release())
598                 last_release = act;
599         else
600                 last_release = get_last_fence_release(act->get_tid());
601         if (!last_release)
602                 return NULL;
603
604         /* Skip past the release */
605         action_list_t *list = action_trace;
606         action_list_t::reverse_iterator rit;
607         for (rit = list->rbegin(); rit != list->rend(); rit++)
608                 if (*rit == last_release)
609                         break;
610         ASSERT(rit != list->rend());
611
612         /* Find a prior:
613          *   load-acquire
614          * or
615          *   load --sb-> fence-acquire */
616         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
617         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
618         bool found_acquire_fences = false;
619         for ( ; rit != list->rend(); rit++) {
620                 ModelAction *prev = *rit;
621                 if (act->same_thread(prev))
622                         continue;
623
624                 int tid = id_to_int(prev->get_tid());
625
626                 if (prev->is_read() && act->same_var(prev)) {
627                         if (prev->is_acquire()) {
628                                 /* Found most recent load-acquire, don't need
629                                  * to search for more fences */
630                                 if (!found_acquire_fences)
631                                         return NULL;
632                         } else {
633                                 prior_loads[tid] = prev;
634                         }
635                 }
636                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
637                         found_acquire_fences = true;
638                         acquire_fences[tid] = prev;
639                 }
640         }
641
642         ModelAction *latest_backtrack = NULL;
643         for (unsigned int i = 0; i < acquire_fences.size(); i++)
644                 if (acquire_fences[i] && prior_loads[i])
645                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
646                                 latest_backtrack = acquire_fences[i];
647         return latest_backtrack;
648 }
649
650 /**
651  * @brief Find the last backtracking conflict for a ModelAction
652  *
653  * This function performs the search for the most recent conflicting action
654  * against which we should perform backtracking. This primary includes pairs of
655  * synchronizing actions which should be explored in the opposite execution
656  * order.
657  *
658  * @param act The current action
659  * @return The most recent action which conflicts with act
660  */
661 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
662 {
663         switch (act->get_type()) {
664         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
665         case ATOMIC_READ:
666         case ATOMIC_WRITE:
667         case ATOMIC_RMW: {
668                 ModelAction *ret = NULL;
669
670                 /* linear search: from most recent to oldest */
671                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
672                 action_list_t::reverse_iterator rit;
673                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
674                         ModelAction *prev = *rit;
675                         if (prev->could_synchronize_with(act)) {
676                                 ret = prev;
677                                 break;
678                         }
679                 }
680
681                 ModelAction *ret2 = get_last_fence_conflict(act);
682                 if (!ret2)
683                         return ret;
684                 if (!ret)
685                         return ret2;
686                 if (*ret < *ret2)
687                         return ret2;
688                 return ret;
689         }
690         case ATOMIC_LOCK:
691         case ATOMIC_TRYLOCK: {
692                 /* linear search: from most recent to oldest */
693                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
694                 action_list_t::reverse_iterator rit;
695                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
696                         ModelAction *prev = *rit;
697                         if (act->is_conflicting_lock(prev))
698                                 return prev;
699                 }
700                 break;
701         }
702         case ATOMIC_UNLOCK: {
703                 /* linear search: from most recent to oldest */
704                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
705                 action_list_t::reverse_iterator rit;
706                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
707                         ModelAction *prev = *rit;
708                         if (!act->same_thread(prev) && prev->is_failed_trylock())
709                                 return prev;
710                 }
711                 break;
712         }
713         case ATOMIC_WAIT: {
714                 /* linear search: from most recent to oldest */
715                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
716                 action_list_t::reverse_iterator rit;
717                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
718                         ModelAction *prev = *rit;
719                         if (!act->same_thread(prev) && prev->is_failed_trylock())
720                                 return prev;
721                         if (!act->same_thread(prev) && prev->is_notify())
722                                 return prev;
723                 }
724                 break;
725         }
726
727         case ATOMIC_NOTIFY_ALL:
728         case ATOMIC_NOTIFY_ONE: {
729                 /* linear search: from most recent to oldest */
730                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
731                 action_list_t::reverse_iterator rit;
732                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
733                         ModelAction *prev = *rit;
734                         if (!act->same_thread(prev) && prev->is_wait())
735                                 return prev;
736                 }
737                 break;
738         }
739         default:
740                 break;
741         }
742         return NULL;
743 }
744
745 /** This method finds backtracking points where we should try to
746  * reorder the parameter ModelAction against.
747  *
748  * @param the ModelAction to find backtracking points for.
749  */
750 void ModelChecker::set_backtracking(ModelAction *act)
751 {
752         Thread *t = get_thread(act);
753         ModelAction *prev = get_last_conflict(act);
754         if (prev == NULL)
755                 return;
756
757         Node *node = prev->get_node()->get_parent();
758
759         int low_tid, high_tid;
760         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
761                 low_tid = id_to_int(act->get_tid());
762                 high_tid = low_tid + 1;
763         } else {
764                 low_tid = 0;
765                 high_tid = get_num_threads();
766         }
767
768         for (int i = low_tid; i < high_tid; i++) {
769                 thread_id_t tid = int_to_id(i);
770
771                 /* Make sure this thread can be enabled here. */
772                 if (i >= node->get_num_threads())
773                         break;
774
775                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
776                 if (node->enabled_status(tid) != THREAD_ENABLED)
777                         continue;
778
779                 /* Check if this has been explored already */
780                 if (node->has_been_explored(tid))
781                         continue;
782
783                 /* See if fairness allows */
784                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
785                         bool unfair = false;
786                         for (int t = 0; t < node->get_num_threads(); t++) {
787                                 thread_id_t tother = int_to_id(t);
788                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
789                                         unfair = true;
790                                         break;
791                                 }
792                         }
793                         if (unfair)
794                                 continue;
795                 }
796                 /* Cache the latest backtracking point */
797                 set_latest_backtrack(prev);
798
799                 /* If this is a new backtracking point, mark the tree */
800                 if (!node->set_backtrack(tid))
801                         continue;
802                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
803                                         id_to_int(prev->get_tid()),
804                                         id_to_int(t->get_id()));
805                 if (DBG_ENABLED()) {
806                         prev->print();
807                         act->print();
808                 }
809         }
810 }
811
812 /**
813  * @brief Cache the a backtracking point as the "most recent", if eligible
814  *
815  * Note that this does not prepare the NodeStack for this backtracking
816  * operation, it only caches the action on a per-execution basis
817  *
818  * @param act The operation at which we should explore a different next action
819  * (i.e., backtracking point)
820  * @return True, if this action is now the most recent backtracking point;
821  * false otherwise
822  */
823 bool ModelChecker::set_latest_backtrack(ModelAction *act)
824 {
825         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
826                 priv->next_backtrack = act;
827                 return true;
828         }
829         return false;
830 }
831
832 /**
833  * Returns last backtracking point. The model checker will explore a different
834  * path for this point in the next execution.
835  * @return The ModelAction at which the next execution should diverge.
836  */
837 ModelAction * ModelChecker::get_next_backtrack()
838 {
839         ModelAction *next = priv->next_backtrack;
840         priv->next_backtrack = NULL;
841         return next;
842 }
843
844 /**
845  * Processes a read model action.
846  * @param curr is the read model action to process.
847  * @return True if processing this read updates the mo_graph.
848  */
849 bool ModelChecker::process_read(ModelAction *curr)
850 {
851         uint64_t value = VALUE_NONE;
852         bool updated = false;
853         while (true) {
854                 const ModelAction *rf = curr->get_node()->get_read_from_past();
855                 if (rf != NULL) {
856                         mo_graph->startChanges();
857
858                         value = rf->get_value();
859
860                         check_recency(curr, rf);
861                         bool r_status = r_modification_order(curr, rf);
862
863                         if (is_infeasible() && (curr->get_node()->increment_read_from_past() || curr->get_node()->increment_future_value())) {
864                                 mo_graph->rollbackChanges();
865                                 priv->too_many_reads = false;
866                                 continue;
867                         }
868
869                         read_from(curr, rf);
870                         mo_graph->commitChanges();
871                         mo_check_promises(curr, true);
872
873                         updated |= r_status;
874                 } else {
875                         /* Read from future value */
876                         struct future_value fv = curr->get_node()->get_future_value();
877                         Promise *promise = new Promise(curr, fv);
878                         value = fv.value;
879                         curr->set_read_from_promise(promise);
880                         promises->push_back(promise);
881                         mo_graph->startChanges();
882                         updated = r_modification_order(curr, promise);
883                         mo_graph->commitChanges();
884                 }
885                 get_thread(curr)->set_return_value(value);
886                 return updated;
887         }
888 }
889
890 /**
891  * Processes a lock, trylock, or unlock model action.  @param curr is
892  * the read model action to process.
893  *
894  * The try lock operation checks whether the lock is taken.  If not,
895  * it falls to the normal lock operation case.  If so, it returns
896  * fail.
897  *
898  * The lock operation has already been checked that it is enabled, so
899  * it just grabs the lock and synchronizes with the previous unlock.
900  *
901  * The unlock operation has to re-enable all of the threads that are
902  * waiting on the lock.
903  *
904  * @return True if synchronization was updated; false otherwise
905  */
906 bool ModelChecker::process_mutex(ModelAction *curr)
907 {
908         std::mutex *mutex = NULL;
909         struct std::mutex_state *state = NULL;
910
911         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
912                 mutex = (std::mutex *)curr->get_location();
913                 state = mutex->get_state();
914         } else if (curr->is_wait()) {
915                 mutex = (std::mutex *)curr->get_value();
916                 state = mutex->get_state();
917         }
918
919         switch (curr->get_type()) {
920         case ATOMIC_TRYLOCK: {
921                 bool success = !state->islocked;
922                 curr->set_try_lock(success);
923                 if (!success) {
924                         get_thread(curr)->set_return_value(0);
925                         break;
926                 }
927                 get_thread(curr)->set_return_value(1);
928         }
929                 //otherwise fall into the lock case
930         case ATOMIC_LOCK: {
931                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
932                         assert_bug("Lock access before initialization");
933                 state->islocked = true;
934                 ModelAction *unlock = get_last_unlock(curr);
935                 //synchronize with the previous unlock statement
936                 if (unlock != NULL) {
937                         curr->synchronize_with(unlock);
938                         return true;
939                 }
940                 break;
941         }
942         case ATOMIC_UNLOCK: {
943                 //unlock the lock
944                 state->islocked = false;
945                 //wake up the other threads
946                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
947                 //activate all the waiting threads
948                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
949                         scheduler->wake(get_thread(*rit));
950                 }
951                 waiters->clear();
952                 break;
953         }
954         case ATOMIC_WAIT: {
955                 //unlock the lock
956                 state->islocked = false;
957                 //wake up the other threads
958                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
959                 //activate all the waiting threads
960                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
961                         scheduler->wake(get_thread(*rit));
962                 }
963                 waiters->clear();
964                 //check whether we should go to sleep or not...simulate spurious failures
965                 if (curr->get_node()->get_misc() == 0) {
966                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
967                         //disable us
968                         scheduler->sleep(get_thread(curr));
969                 }
970                 break;
971         }
972         case ATOMIC_NOTIFY_ALL: {
973                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
974                 //activate all the waiting threads
975                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
976                         scheduler->wake(get_thread(*rit));
977                 }
978                 waiters->clear();
979                 break;
980         }
981         case ATOMIC_NOTIFY_ONE: {
982                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
983                 int wakeupthread = curr->get_node()->get_misc();
984                 action_list_t::iterator it = waiters->begin();
985                 advance(it, wakeupthread);
986                 scheduler->wake(get_thread(*it));
987                 waiters->erase(it);
988                 break;
989         }
990
991         default:
992                 ASSERT(0);
993         }
994         return false;
995 }
996
997 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
998 {
999         /* Do more ambitious checks now that mo is more complete */
1000         if (mo_may_allow(writer, reader)) {
1001                 Node *node = reader->get_node();
1002
1003                 /* Find an ancestor thread which exists at the time of the reader */
1004                 Thread *write_thread = get_thread(writer);
1005                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1006                         write_thread = write_thread->get_parent();
1007
1008                 struct future_value fv = {
1009                         writer->get_value(),
1010                         writer->get_seq_number() + params.maxfuturedelay,
1011                         write_thread->get_id(),
1012                 };
1013                 if (node->add_future_value(fv))
1014                         set_latest_backtrack(reader);
1015         }
1016 }
1017
1018 /**
1019  * Process a write ModelAction
1020  * @param curr The ModelAction to process
1021  * @return True if the mo_graph was updated or promises were resolved
1022  */
1023 bool ModelChecker::process_write(ModelAction *curr)
1024 {
1025         bool updated_mod_order = w_modification_order(curr);
1026         bool updated_promises = resolve_promises(curr);
1027
1028         if (promises->size() == 0) {
1029                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1030                         struct PendingFutureValue pfv = (*futurevalues)[i];
1031                         add_future_value(pfv.writer, pfv.act);
1032                 }
1033                 futurevalues->clear();
1034         }
1035
1036         mo_graph->commitChanges();
1037         mo_check_promises(curr, false);
1038
1039         get_thread(curr)->set_return_value(VALUE_NONE);
1040         return updated_mod_order || updated_promises;
1041 }
1042
1043 /**
1044  * Process a fence ModelAction
1045  * @param curr The ModelAction to process
1046  * @return True if synchronization was updated
1047  */
1048 bool ModelChecker::process_fence(ModelAction *curr)
1049 {
1050         /*
1051          * fence-relaxed: no-op
1052          * fence-release: only log the occurence (not in this function), for
1053          *   use in later synchronization
1054          * fence-acquire (this function): search for hypothetical release
1055          *   sequences
1056          */
1057         bool updated = false;
1058         if (curr->is_acquire()) {
1059                 action_list_t *list = action_trace;
1060                 action_list_t::reverse_iterator rit;
1061                 /* Find X : is_read(X) && X --sb-> curr */
1062                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1063                         ModelAction *act = *rit;
1064                         if (act == curr)
1065                                 continue;
1066                         if (act->get_tid() != curr->get_tid())
1067                                 continue;
1068                         /* Stop at the beginning of the thread */
1069                         if (act->is_thread_start())
1070                                 break;
1071                         /* Stop once we reach a prior fence-acquire */
1072                         if (act->is_fence() && act->is_acquire())
1073                                 break;
1074                         if (!act->is_read())
1075                                 continue;
1076                         /* read-acquire will find its own release sequences */
1077                         if (act->is_acquire())
1078                                 continue;
1079
1080                         /* Establish hypothetical release sequences */
1081                         rel_heads_list_t release_heads;
1082                         get_release_seq_heads(curr, act, &release_heads);
1083                         for (unsigned int i = 0; i < release_heads.size(); i++)
1084                                 if (!curr->synchronize_with(release_heads[i]))
1085                                         set_bad_synchronization();
1086                         if (release_heads.size() != 0)
1087                                 updated = true;
1088                 }
1089         }
1090         return updated;
1091 }
1092
1093 /**
1094  * @brief Process the current action for thread-related activity
1095  *
1096  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1097  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1098  * synchronization, etc.  This function is a no-op for non-THREAD actions
1099  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1100  *
1101  * @param curr The current action
1102  * @return True if synchronization was updated or a thread completed
1103  */
1104 bool ModelChecker::process_thread_action(ModelAction *curr)
1105 {
1106         bool updated = false;
1107
1108         switch (curr->get_type()) {
1109         case THREAD_CREATE: {
1110                 thrd_t *thrd = (thrd_t *)curr->get_location();
1111                 struct thread_params *params = (struct thread_params *)curr->get_value();
1112                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1113                 add_thread(th);
1114                 th->set_creation(curr);
1115                 /* Promises can be satisfied by children */
1116                 for (unsigned int i = 0; i < promises->size(); i++) {
1117                         Promise *promise = (*promises)[i];
1118                         if (promise->thread_is_available(curr->get_tid()))
1119                                 promise->add_thread(th->get_id());
1120                 }
1121                 break;
1122         }
1123         case THREAD_JOIN: {
1124                 Thread *blocking = curr->get_thread_operand();
1125                 ModelAction *act = get_last_action(blocking->get_id());
1126                 curr->synchronize_with(act);
1127                 updated = true; /* trigger rel-seq checks */
1128                 break;
1129         }
1130         case THREAD_FINISH: {
1131                 Thread *th = get_thread(curr);
1132                 while (!th->wait_list_empty()) {
1133                         ModelAction *act = th->pop_wait_list();
1134                         scheduler->wake(get_thread(act));
1135                 }
1136                 th->complete();
1137                 /* Completed thread can't satisfy promises */
1138                 for (unsigned int i = 0; i < promises->size(); i++) {
1139                         Promise *promise = (*promises)[i];
1140                         if (promise->thread_is_available(th->get_id()))
1141                                 if (promise->eliminate_thread(th->get_id()))
1142                                         priv->failed_promise = true;
1143                 }
1144                 updated = true; /* trigger rel-seq checks */
1145                 break;
1146         }
1147         case THREAD_START: {
1148                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1149                 break;
1150         }
1151         default:
1152                 break;
1153         }
1154
1155         return updated;
1156 }
1157
1158 /**
1159  * @brief Process the current action for release sequence fixup activity
1160  *
1161  * Performs model-checker release sequence fixups for the current action,
1162  * forcing a single pending release sequence to break (with a given, potential
1163  * "loose" write) or to complete (i.e., synchronize). If a pending release
1164  * sequence forms a complete release sequence, then we must perform the fixup
1165  * synchronization, mo_graph additions, etc.
1166  *
1167  * @param curr The current action; must be a release sequence fixup action
1168  * @param work_queue The work queue to which to add work items as they are
1169  * generated
1170  */
1171 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1172 {
1173         const ModelAction *write = curr->get_node()->get_relseq_break();
1174         struct release_seq *sequence = pending_rel_seqs->back();
1175         pending_rel_seqs->pop_back();
1176         ASSERT(sequence);
1177         ModelAction *acquire = sequence->acquire;
1178         const ModelAction *rf = sequence->rf;
1179         const ModelAction *release = sequence->release;
1180         ASSERT(acquire);
1181         ASSERT(release);
1182         ASSERT(rf);
1183         ASSERT(release->same_thread(rf));
1184
1185         if (write == NULL) {
1186                 /**
1187                  * @todo Forcing a synchronization requires that we set
1188                  * modification order constraints. For instance, we can't allow
1189                  * a fixup sequence in which two separate read-acquire
1190                  * operations read from the same sequence, where the first one
1191                  * synchronizes and the other doesn't. Essentially, we can't
1192                  * allow any writes to insert themselves between 'release' and
1193                  * 'rf'
1194                  */
1195
1196                 /* Must synchronize */
1197                 if (!acquire->synchronize_with(release)) {
1198                         set_bad_synchronization();
1199                         return;
1200                 }
1201                 /* Re-check all pending release sequences */
1202                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1203                 /* Re-check act for mo_graph edges */
1204                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1205
1206                 /* propagate synchronization to later actions */
1207                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1208                 for (; (*rit) != acquire; rit++) {
1209                         ModelAction *propagate = *rit;
1210                         if (acquire->happens_before(propagate)) {
1211                                 propagate->synchronize_with(acquire);
1212                                 /* Re-check 'propagate' for mo_graph edges */
1213                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1214                         }
1215                 }
1216         } else {
1217                 /* Break release sequence with new edges:
1218                  *   release --mo--> write --mo--> rf */
1219                 mo_graph->addEdge(release, write);
1220                 mo_graph->addEdge(write, rf);
1221         }
1222
1223         /* See if we have realized a data race */
1224         checkDataRaces();
1225 }
1226
1227 /**
1228  * Initialize the current action by performing one or more of the following
1229  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1230  * in the NodeStack, manipulating backtracking sets, allocating and
1231  * initializing clock vectors, and computing the promises to fulfill.
1232  *
1233  * @param curr The current action, as passed from the user context; may be
1234  * freed/invalidated after the execution of this function, with a different
1235  * action "returned" its place (pass-by-reference)
1236  * @return True if curr is a newly-explored action; false otherwise
1237  */
1238 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1239 {
1240         ModelAction *newcurr;
1241
1242         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1243                 newcurr = process_rmw(*curr);
1244                 delete *curr;
1245
1246                 if (newcurr->is_rmw())
1247                         compute_promises(newcurr);
1248
1249                 *curr = newcurr;
1250                 return false;
1251         }
1252
1253         (*curr)->set_seq_number(get_next_seq_num());
1254
1255         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1256         if (newcurr) {
1257                 /* First restore type and order in case of RMW operation */
1258                 if ((*curr)->is_rmwr())
1259                         newcurr->copy_typeandorder(*curr);
1260
1261                 ASSERT((*curr)->get_location() == newcurr->get_location());
1262                 newcurr->copy_from_new(*curr);
1263
1264                 /* Discard duplicate ModelAction; use action from NodeStack */
1265                 delete *curr;
1266
1267                 /* Always compute new clock vector */
1268                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1269
1270                 *curr = newcurr;
1271                 return false; /* Action was explored previously */
1272         } else {
1273                 newcurr = *curr;
1274
1275                 /* Always compute new clock vector */
1276                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1277
1278                 /* Assign most recent release fence */
1279                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1280
1281                 /*
1282                  * Perform one-time actions when pushing new ModelAction onto
1283                  * NodeStack
1284                  */
1285                 if (newcurr->is_write())
1286                         compute_promises(newcurr);
1287                 else if (newcurr->is_relseq_fixup())
1288                         compute_relseq_breakwrites(newcurr);
1289                 else if (newcurr->is_wait())
1290                         newcurr->get_node()->set_misc_max(2);
1291                 else if (newcurr->is_notify_one()) {
1292                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1293                 }
1294                 return true; /* This was a new ModelAction */
1295         }
1296 }
1297
1298 /**
1299  * @brief Establish reads-from relation between two actions
1300  *
1301  * Perform basic operations involved with establishing a concrete rf relation,
1302  * including setting the ModelAction data and checking for release sequences.
1303  *
1304  * @param act The action that is reading (must be a read)
1305  * @param rf The action from which we are reading (must be a write)
1306  *
1307  * @return True if this read established synchronization
1308  */
1309 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1310 {
1311         act->set_read_from(rf);
1312         if (rf != NULL && act->is_acquire()) {
1313                 rel_heads_list_t release_heads;
1314                 get_release_seq_heads(act, act, &release_heads);
1315                 int num_heads = release_heads.size();
1316                 for (unsigned int i = 0; i < release_heads.size(); i++)
1317                         if (!act->synchronize_with(release_heads[i])) {
1318                                 set_bad_synchronization();
1319                                 num_heads--;
1320                         }
1321                 return num_heads > 0;
1322         }
1323         return false;
1324 }
1325
1326 /**
1327  * Check promises and eliminate potentially-satisfying threads when a thread is
1328  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1329  * no longer satisfy a promise generated from that thread.
1330  *
1331  * @param blocker The thread on which a thread is waiting
1332  * @param waiting The waiting thread
1333  */
1334 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1335 {
1336         for (unsigned int i = 0; i < promises->size(); i++) {
1337                 Promise *promise = (*promises)[i];
1338                 ModelAction *reader = promise->get_action();
1339                 if (reader->get_tid() != blocker->get_id())
1340                         continue;
1341                 if (!promise->thread_is_available(waiting->get_id()))
1342                         continue;
1343                 if (promise->eliminate_thread(waiting->get_id())) {
1344                         /* Promise has failed */
1345                         priv->failed_promise = true;
1346                 }
1347         }
1348 }
1349
1350 /**
1351  * @brief Check whether a model action is enabled.
1352  *
1353  * Checks whether a lock or join operation would be successful (i.e., is the
1354  * lock already locked, or is the joined thread already complete). If not, put
1355  * the action in a waiter list.
1356  *
1357  * @param curr is the ModelAction to check whether it is enabled.
1358  * @return a bool that indicates whether the action is enabled.
1359  */
1360 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1361         if (curr->is_lock()) {
1362                 std::mutex *lock = (std::mutex *)curr->get_location();
1363                 struct std::mutex_state *state = lock->get_state();
1364                 if (state->islocked) {
1365                         //Stick the action in the appropriate waiting queue
1366                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1367                         return false;
1368                 }
1369         } else if (curr->get_type() == THREAD_JOIN) {
1370                 Thread *blocking = (Thread *)curr->get_location();
1371                 if (!blocking->is_complete()) {
1372                         blocking->push_wait_list(curr);
1373                         thread_blocking_check_promises(blocking, get_thread(curr));
1374                         return false;
1375                 }
1376         }
1377
1378         return true;
1379 }
1380
1381 /**
1382  * This is the heart of the model checker routine. It performs model-checking
1383  * actions corresponding to a given "current action." Among other processes, it
1384  * calculates reads-from relationships, updates synchronization clock vectors,
1385  * forms a memory_order constraints graph, and handles replay/backtrack
1386  * execution when running permutations of previously-observed executions.
1387  *
1388  * @param curr The current action to process
1389  * @return The ModelAction that is actually executed; may be different than
1390  * curr; may be NULL, if the current action is not enabled to run
1391  */
1392 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1393 {
1394         ASSERT(curr);
1395         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1396
1397         if (!check_action_enabled(curr)) {
1398                 /* Make the execution look like we chose to run this action
1399                  * much later, when a lock/join can succeed */
1400                 get_thread(curr)->set_pending(curr);
1401                 scheduler->sleep(get_thread(curr));
1402                 return NULL;
1403         }
1404
1405         bool newly_explored = initialize_curr_action(&curr);
1406
1407         DBG();
1408         if (DBG_ENABLED())
1409                 curr->print();
1410
1411         wake_up_sleeping_actions(curr);
1412
1413         /* Add the action to lists before any other model-checking tasks */
1414         if (!second_part_of_rmw)
1415                 add_action_to_lists(curr);
1416
1417         /* Build may_read_from set for newly-created actions */
1418         if (newly_explored && curr->is_read())
1419                 build_may_read_from(curr);
1420
1421         /* Initialize work_queue with the "current action" work */
1422         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1423         while (!work_queue.empty() && !has_asserted()) {
1424                 WorkQueueEntry work = work_queue.front();
1425                 work_queue.pop_front();
1426
1427                 switch (work.type) {
1428                 case WORK_CHECK_CURR_ACTION: {
1429                         ModelAction *act = work.action;
1430                         bool update = false; /* update this location's release seq's */
1431                         bool update_all = false; /* update all release seq's */
1432
1433                         if (process_thread_action(curr))
1434                                 update_all = true;
1435
1436                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1437                                 update = true;
1438
1439                         if (act->is_write() && process_write(act))
1440                                 update = true;
1441
1442                         if (act->is_fence() && process_fence(act))
1443                                 update_all = true;
1444
1445                         if (act->is_mutex_op() && process_mutex(act))
1446                                 update_all = true;
1447
1448                         if (act->is_relseq_fixup())
1449                                 process_relseq_fixup(curr, &work_queue);
1450
1451                         if (update_all)
1452                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1453                         else if (update)
1454                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1455                         break;
1456                 }
1457                 case WORK_CHECK_RELEASE_SEQ:
1458                         resolve_release_sequences(work.location, &work_queue);
1459                         break;
1460                 case WORK_CHECK_MO_EDGES: {
1461                         /** @todo Complete verification of work_queue */
1462                         ModelAction *act = work.action;
1463                         bool updated = false;
1464
1465                         if (act->is_read()) {
1466                                 const ModelAction *rf = act->get_reads_from();
1467                                 const Promise *promise = act->get_reads_from_promise();
1468                                 if (rf) {
1469                                         if (r_modification_order(act, rf))
1470                                                 updated = true;
1471                                 } else if (promise) {
1472                                         if (r_modification_order(act, promise))
1473                                                 updated = true;
1474                                 }
1475                         }
1476                         if (act->is_write()) {
1477                                 if (w_modification_order(act))
1478                                         updated = true;
1479                         }
1480                         mo_graph->commitChanges();
1481
1482                         if (updated)
1483                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1484                         break;
1485                 }
1486                 default:
1487                         ASSERT(false);
1488                         break;
1489                 }
1490         }
1491
1492         check_curr_backtracking(curr);
1493         set_backtracking(curr);
1494         return curr;
1495 }
1496
1497 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1498 {
1499         Node *currnode = curr->get_node();
1500         Node *parnode = currnode->get_parent();
1501
1502         if ((parnode && !parnode->backtrack_empty()) ||
1503                          !currnode->misc_empty() ||
1504                          !currnode->read_from_past_empty() ||
1505                          !currnode->future_value_empty() ||
1506                          !currnode->promise_empty() ||
1507                          !currnode->relseq_break_empty()) {
1508                 set_latest_backtrack(curr);
1509         }
1510 }
1511
1512 bool ModelChecker::promises_expired() const
1513 {
1514         for (unsigned int i = 0; i < promises->size(); i++) {
1515                 Promise *promise = (*promises)[i];
1516                 if (promise->get_expiration() < priv->used_sequence_numbers)
1517                         return true;
1518         }
1519         return false;
1520 }
1521
1522 /**
1523  * This is the strongest feasibility check available.
1524  * @return whether the current trace (partial or complete) must be a prefix of
1525  * a feasible trace.
1526  */
1527 bool ModelChecker::isfeasibleprefix() const
1528 {
1529         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1530 }
1531
1532 /**
1533  * Print disagnostic information about an infeasible execution
1534  * @param prefix A string to prefix the output with; if NULL, then a default
1535  * message prefix will be provided
1536  */
1537 void ModelChecker::print_infeasibility(const char *prefix) const
1538 {
1539         char buf[100];
1540         char *ptr = buf;
1541         if (mo_graph->checkForCycles())
1542                 ptr += sprintf(ptr, "[mo cycle]");
1543         if (priv->failed_promise)
1544                 ptr += sprintf(ptr, "[failed promise]");
1545         if (priv->too_many_reads)
1546                 ptr += sprintf(ptr, "[too many reads]");
1547         if (priv->no_valid_reads)
1548                 ptr += sprintf(ptr, "[no valid reads-from]");
1549         if (priv->bad_synchronization)
1550                 ptr += sprintf(ptr, "[bad sw ordering]");
1551         if (promises_expired())
1552                 ptr += sprintf(ptr, "[promise expired]");
1553         if (promises->size() != 0)
1554                 ptr += sprintf(ptr, "[unresolved promise]");
1555         if (ptr != buf)
1556                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1557 }
1558
1559 /**
1560  * Returns whether the current completed trace is feasible, except for pending
1561  * release sequences.
1562  */
1563 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1564 {
1565         return !is_infeasible() && promises->size() == 0;
1566 }
1567
1568 /**
1569  * Check if the current partial trace is infeasible. Does not check any
1570  * end-of-execution flags, which might rule out the execution. Thus, this is
1571  * useful only for ruling an execution as infeasible.
1572  * @return whether the current partial trace is infeasible.
1573  */
1574 bool ModelChecker::is_infeasible() const
1575 {
1576         return mo_graph->checkForCycles() ||
1577                 priv->no_valid_reads ||
1578                 priv->failed_promise ||
1579                 priv->too_many_reads ||
1580                 priv->bad_synchronization ||
1581                 promises_expired();
1582 }
1583
1584 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1585 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1586         ModelAction *lastread = get_last_action(act->get_tid());
1587         lastread->process_rmw(act);
1588         if (act->is_rmw()) {
1589                 if (lastread->get_reads_from())
1590                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1591                 else
1592                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1593                 mo_graph->commitChanges();
1594         }
1595         return lastread;
1596 }
1597
1598 /**
1599  * Checks whether a thread has read from the same write for too many times
1600  * without seeing the effects of a later write.
1601  *
1602  * Basic idea:
1603  * 1) there must a different write that we could read from that would satisfy the modification order,
1604  * 2) we must have read from the same value in excess of maxreads times, and
1605  * 3) that other write must have been in the reads_from set for maxreads times.
1606  *
1607  * If so, we decide that the execution is no longer feasible.
1608  */
1609 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1610 {
1611         if (params.maxreads != 0) {
1612                 if (curr->get_node()->get_read_from_past_size() <= 1)
1613                         return;
1614                 //Must make sure that execution is currently feasible...  We could
1615                 //accidentally clear by rolling back
1616                 if (is_infeasible())
1617                         return;
1618                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1619                 int tid = id_to_int(curr->get_tid());
1620
1621                 /* Skip checks */
1622                 if ((int)thrd_lists->size() <= tid)
1623                         return;
1624                 action_list_t *list = &(*thrd_lists)[tid];
1625
1626                 action_list_t::reverse_iterator rit = list->rbegin();
1627                 /* Skip past curr */
1628                 for (; (*rit) != curr; rit++)
1629                         ;
1630                 /* go past curr now */
1631                 rit++;
1632
1633                 action_list_t::reverse_iterator ritcopy = rit;
1634                 //See if we have enough reads from the same value
1635                 int count = 0;
1636                 for (; count < params.maxreads; rit++, count++) {
1637                         if (rit == list->rend())
1638                                 return;
1639                         ModelAction *act = *rit;
1640                         if (!act->is_read())
1641                                 return;
1642
1643                         if (act->get_reads_from() != rf)
1644                                 return;
1645                         if (act->get_node()->get_read_from_past_size() <= 1)
1646                                 return;
1647                 }
1648                 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1649                         /* Get write */
1650                         const ModelAction *write = curr->get_node()->get_read_from_past(i);
1651
1652                         /* Need a different write */
1653                         if (write == rf)
1654                                 continue;
1655
1656                         /* Test to see whether this is a feasible write to read from */
1657                         /** NOTE: all members of read-from set should be
1658                          *  feasible, so we no longer check it here **/
1659
1660                         rit = ritcopy;
1661
1662                         bool feasiblewrite = true;
1663                         //new we need to see if this write works for everyone
1664
1665                         for (int loop = count; loop > 0; loop--, rit++) {
1666                                 ModelAction *act = *rit;
1667                                 bool foundvalue = false;
1668                                 for (int j = 0; j < act->get_node()->get_read_from_past_size(); j++) {
1669                                         if (act->get_node()->get_read_from_past(j) == write) {
1670                                                 foundvalue = true;
1671                                                 break;
1672                                         }
1673                                 }
1674                                 if (!foundvalue) {
1675                                         feasiblewrite = false;
1676                                         break;
1677                                 }
1678                         }
1679                         if (feasiblewrite) {
1680                                 priv->too_many_reads = true;
1681                                 return;
1682                         }
1683                 }
1684         }
1685 }
1686
1687 /**
1688  * Updates the mo_graph with the constraints imposed from the current
1689  * read.
1690  *
1691  * Basic idea is the following: Go through each other thread and find
1692  * the last action that happened before our read.  Two cases:
1693  *
1694  * (1) The action is a write => that write must either occur before
1695  * the write we read from or be the write we read from.
1696  *
1697  * (2) The action is a read => the write that that action read from
1698  * must occur before the write we read from or be the same write.
1699  *
1700  * @param curr The current action. Must be a read.
1701  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1702  * @return True if modification order edges were added; false otherwise
1703  */
1704 template <typename rf_type>
1705 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1706 {
1707         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1708         unsigned int i;
1709         bool added = false;
1710         ASSERT(curr->is_read());
1711
1712         /* Last SC fence in the current thread */
1713         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1714
1715         /* Iterate over all threads */
1716         for (i = 0; i < thrd_lists->size(); i++) {
1717                 /* Last SC fence in thread i */
1718                 ModelAction *last_sc_fence_thread_local = NULL;
1719                 if (int_to_id((int)i) != curr->get_tid())
1720                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1721
1722                 /* Last SC fence in thread i, before last SC fence in current thread */
1723                 ModelAction *last_sc_fence_thread_before = NULL;
1724                 if (last_sc_fence_local)
1725                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1726
1727                 /* Iterate over actions in thread, starting from most recent */
1728                 action_list_t *list = &(*thrd_lists)[i];
1729                 action_list_t::reverse_iterator rit;
1730                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1731                         ModelAction *act = *rit;
1732
1733                         if (act->is_write() && !act->equals(rf) && act != curr) {
1734                                 /* C++, Section 29.3 statement 5 */
1735                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1736                                                 *act < *last_sc_fence_thread_local) {
1737                                         added = mo_graph->addEdge(act, rf) || added;
1738                                         break;
1739                                 }
1740                                 /* C++, Section 29.3 statement 4 */
1741                                 else if (act->is_seqcst() && last_sc_fence_local &&
1742                                                 *act < *last_sc_fence_local) {
1743                                         added = mo_graph->addEdge(act, rf) || added;
1744                                         break;
1745                                 }
1746                                 /* C++, Section 29.3 statement 6 */
1747                                 else if (last_sc_fence_thread_before &&
1748                                                 *act < *last_sc_fence_thread_before) {
1749                                         added = mo_graph->addEdge(act, rf) || added;
1750                                         break;
1751                                 }
1752                         }
1753
1754                         /*
1755                          * Include at most one act per-thread that "happens
1756                          * before" curr. Don't consider reflexively.
1757                          */
1758                         if (act->happens_before(curr) && act != curr) {
1759                                 if (act->is_write()) {
1760                                         if (!act->equals(rf)) {
1761                                                 added = mo_graph->addEdge(act, rf) || added;
1762                                         }
1763                                 } else {
1764                                         const ModelAction *prevrf = act->get_reads_from();
1765                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1766                                         if (prevrf) {
1767                                                 if (!prevrf->equals(rf))
1768                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1769                                         } else if (!prevrf_promise->equals(rf)) {
1770                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1771                                         }
1772                                 }
1773                                 break;
1774                         }
1775                 }
1776         }
1777
1778         /*
1779          * All compatible, thread-exclusive promises must be ordered after any
1780          * concrete loads from the same thread
1781          */
1782         for (unsigned int i = 0; i < promises->size(); i++)
1783                 if ((*promises)[i]->is_compatible_exclusive(curr))
1784                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1785
1786         return added;
1787 }
1788
1789 /**
1790  * Updates the mo_graph with the constraints imposed from the current write.
1791  *
1792  * Basic idea is the following: Go through each other thread and find
1793  * the lastest action that happened before our write.  Two cases:
1794  *
1795  * (1) The action is a write => that write must occur before
1796  * the current write
1797  *
1798  * (2) The action is a read => the write that that action read from
1799  * must occur before the current write.
1800  *
1801  * This method also handles two other issues:
1802  *
1803  * (I) Sequential Consistency: Making sure that if the current write is
1804  * seq_cst, that it occurs after the previous seq_cst write.
1805  *
1806  * (II) Sending the write back to non-synchronizing reads.
1807  *
1808  * @param curr The current action. Must be a write.
1809  * @return True if modification order edges were added; false otherwise
1810  */
1811 bool ModelChecker::w_modification_order(ModelAction *curr)
1812 {
1813         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1814         unsigned int i;
1815         bool added = false;
1816         ASSERT(curr->is_write());
1817
1818         if (curr->is_seqcst()) {
1819                 /* We have to at least see the last sequentially consistent write,
1820                          so we are initialized. */
1821                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1822                 if (last_seq_cst != NULL) {
1823                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1824                 }
1825         }
1826
1827         /* Last SC fence in the current thread */
1828         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1829
1830         /* Iterate over all threads */
1831         for (i = 0; i < thrd_lists->size(); i++) {
1832                 /* Last SC fence in thread i, before last SC fence in current thread */
1833                 ModelAction *last_sc_fence_thread_before = NULL;
1834                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1835                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1836
1837                 /* Iterate over actions in thread, starting from most recent */
1838                 action_list_t *list = &(*thrd_lists)[i];
1839                 action_list_t::reverse_iterator rit;
1840                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1841                         ModelAction *act = *rit;
1842                         if (act == curr) {
1843                                 /*
1844                                  * 1) If RMW and it actually read from something, then we
1845                                  * already have all relevant edges, so just skip to next
1846                                  * thread.
1847                                  *
1848                                  * 2) If RMW and it didn't read from anything, we should
1849                                  * whatever edge we can get to speed up convergence.
1850                                  *
1851                                  * 3) If normal write, we need to look at earlier actions, so
1852                                  * continue processing list.
1853                                  */
1854                                 if (curr->is_rmw()) {
1855                                         if (curr->get_reads_from() != NULL)
1856                                                 break;
1857                                         else
1858                                                 continue;
1859                                 } else
1860                                         continue;
1861                         }
1862
1863                         /* C++, Section 29.3 statement 7 */
1864                         if (last_sc_fence_thread_before && act->is_write() &&
1865                                         *act < *last_sc_fence_thread_before) {
1866                                 added = mo_graph->addEdge(act, curr) || added;
1867                                 break;
1868                         }
1869
1870                         /*
1871                          * Include at most one act per-thread that "happens
1872                          * before" curr
1873                          */
1874                         if (act->happens_before(curr)) {
1875                                 /*
1876                                  * Note: if act is RMW, just add edge:
1877                                  *   act --mo--> curr
1878                                  * The following edge should be handled elsewhere:
1879                                  *   readfrom(act) --mo--> act
1880                                  */
1881                                 if (act->is_write())
1882                                         added = mo_graph->addEdge(act, curr) || added;
1883                                 else if (act->is_read()) {
1884                                         //if previous read accessed a null, just keep going
1885                                         if (act->get_reads_from() == NULL)
1886                                                 continue;
1887                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1888                                 }
1889                                 break;
1890                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1891                                                      !act->same_thread(curr)) {
1892                                 /* We have an action that:
1893                                    (1) did not happen before us
1894                                    (2) is a read and we are a write
1895                                    (3) cannot synchronize with us
1896                                    (4) is in a different thread
1897                                    =>
1898                                    that read could potentially read from our write.  Note that
1899                                    these checks are overly conservative at this point, we'll
1900                                    do more checks before actually removing the
1901                                    pendingfuturevalue.
1902
1903                                  */
1904                                 if (thin_air_constraint_may_allow(curr, act)) {
1905                                         if (!is_infeasible())
1906                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1907                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1908                                                 add_future_value(curr, act);
1909                                 }
1910                         }
1911                 }
1912         }
1913
1914         /*
1915          * All compatible, thread-exclusive promises must be ordered after any
1916          * concrete stores to the same thread, or else they can be merged with
1917          * this store later
1918          */
1919         for (unsigned int i = 0; i < promises->size(); i++)
1920                 if ((*promises)[i]->is_compatible_exclusive(curr))
1921                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1922
1923         return added;
1924 }
1925
1926 /** Arbitrary reads from the future are not allowed.  Section 29.3
1927  * part 9 places some constraints.  This method checks one result of constraint
1928  * constraint.  Others require compiler support. */
1929 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1930 {
1931         if (!writer->is_rmw())
1932                 return true;
1933
1934         if (!reader->is_rmw())
1935                 return true;
1936
1937         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1938                 if (search == reader)
1939                         return false;
1940                 if (search->get_tid() == reader->get_tid() &&
1941                                 search->happens_before(reader))
1942                         break;
1943         }
1944
1945         return true;
1946 }
1947
1948 /**
1949  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1950  * some constraints. This method checks one the following constraint (others
1951  * require compiler support):
1952  *
1953  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1954  */
1955 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1956 {
1957         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1958         unsigned int i;
1959         /* Iterate over all threads */
1960         for (i = 0; i < thrd_lists->size(); i++) {
1961                 const ModelAction *write_after_read = NULL;
1962
1963                 /* Iterate over actions in thread, starting from most recent */
1964                 action_list_t *list = &(*thrd_lists)[i];
1965                 action_list_t::reverse_iterator rit;
1966                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1967                         ModelAction *act = *rit;
1968
1969                         /* Don't disallow due to act == reader */
1970                         if (!reader->happens_before(act) || reader == act)
1971                                 break;
1972                         else if (act->is_write())
1973                                 write_after_read = act;
1974                         else if (act->is_read() && act->get_reads_from() != NULL)
1975                                 write_after_read = act->get_reads_from();
1976                 }
1977
1978                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1979                         return false;
1980         }
1981         return true;
1982 }
1983
1984 /**
1985  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1986  * The ModelAction under consideration is expected to be taking part in
1987  * release/acquire synchronization as an object of the "reads from" relation.
1988  * Note that this can only provide release sequence support for RMW chains
1989  * which do not read from the future, as those actions cannot be traced until
1990  * their "promise" is fulfilled. Similarly, we may not even establish the
1991  * presence of a release sequence with certainty, as some modification order
1992  * constraints may be decided further in the future. Thus, this function
1993  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1994  * and a boolean representing certainty.
1995  *
1996  * @param rf The action that might be part of a release sequence. Must be a
1997  * write.
1998  * @param release_heads A pass-by-reference style return parameter. After
1999  * execution of this function, release_heads will contain the heads of all the
2000  * relevant release sequences, if any exists with certainty
2001  * @param pending A pass-by-reference style return parameter which is only used
2002  * when returning false (i.e., uncertain). Returns most information regarding
2003  * an uncertain release sequence, including any write operations that might
2004  * break the sequence.
2005  * @return true, if the ModelChecker is certain that release_heads is complete;
2006  * false otherwise
2007  */
2008 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2009                 rel_heads_list_t *release_heads,
2010                 struct release_seq *pending) const
2011 {
2012         /* Only check for release sequences if there are no cycles */
2013         if (mo_graph->checkForCycles())
2014                 return false;
2015
2016         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2017                 ASSERT(rf->is_write());
2018
2019                 if (rf->is_release())
2020                         release_heads->push_back(rf);
2021                 else if (rf->get_last_fence_release())
2022                         release_heads->push_back(rf->get_last_fence_release());
2023                 if (!rf->is_rmw())
2024                         break; /* End of RMW chain */
2025
2026                 /** @todo Need to be smarter here...  In the linux lock
2027                  * example, this will run to the beginning of the program for
2028                  * every acquire. */
2029                 /** @todo The way to be smarter here is to keep going until 1
2030                  * thread has a release preceded by an acquire and you've seen
2031                  *       both. */
2032
2033                 /* acq_rel RMW is a sufficient stopping condition */
2034                 if (rf->is_acquire() && rf->is_release())
2035                         return true; /* complete */
2036         };
2037         if (!rf) {
2038                 /* read from future: need to settle this later */
2039                 pending->rf = NULL;
2040                 return false; /* incomplete */
2041         }
2042
2043         if (rf->is_release())
2044                 return true; /* complete */
2045
2046         /* else relaxed write
2047          * - check for fence-release in the same thread (29.8, stmt. 3)
2048          * - check modification order for contiguous subsequence
2049          *   -> rf must be same thread as release */
2050
2051         const ModelAction *fence_release = rf->get_last_fence_release();
2052         /* Synchronize with a fence-release unconditionally; we don't need to
2053          * find any more "contiguous subsequence..." for it */
2054         if (fence_release)
2055                 release_heads->push_back(fence_release);
2056
2057         int tid = id_to_int(rf->get_tid());
2058         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2059         action_list_t *list = &(*thrd_lists)[tid];
2060         action_list_t::const_reverse_iterator rit;
2061
2062         /* Find rf in the thread list */
2063         rit = std::find(list->rbegin(), list->rend(), rf);
2064         ASSERT(rit != list->rend());
2065
2066         /* Find the last {write,fence}-release */
2067         for (; rit != list->rend(); rit++) {
2068                 if (fence_release && *(*rit) < *fence_release)
2069                         break;
2070                 if ((*rit)->is_release())
2071                         break;
2072         }
2073         if (rit == list->rend()) {
2074                 /* No write-release in this thread */
2075                 return true; /* complete */
2076         } else if (fence_release && *(*rit) < *fence_release) {
2077                 /* The fence-release is more recent (and so, "stronger") than
2078                  * the most recent write-release */
2079                 return true; /* complete */
2080         } /* else, need to establish contiguous release sequence */
2081         ModelAction *release = *rit;
2082
2083         ASSERT(rf->same_thread(release));
2084
2085         pending->writes.clear();
2086
2087         bool certain = true;
2088         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2089                 if (id_to_int(rf->get_tid()) == (int)i)
2090                         continue;
2091                 list = &(*thrd_lists)[i];
2092
2093                 /* Can we ensure no future writes from this thread may break
2094                  * the release seq? */
2095                 bool future_ordered = false;
2096
2097                 ModelAction *last = get_last_action(int_to_id(i));
2098                 Thread *th = get_thread(int_to_id(i));
2099                 if ((last && rf->happens_before(last)) ||
2100                                 !is_enabled(th) ||
2101                                 th->is_complete())
2102                         future_ordered = true;
2103
2104                 ASSERT(!th->is_model_thread() || future_ordered);
2105
2106                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2107                         const ModelAction *act = *rit;
2108                         /* Reach synchronization -> this thread is complete */
2109                         if (act->happens_before(release))
2110                                 break;
2111                         if (rf->happens_before(act)) {
2112                                 future_ordered = true;
2113                                 continue;
2114                         }
2115
2116                         /* Only non-RMW writes can break release sequences */
2117                         if (!act->is_write() || act->is_rmw())
2118                                 continue;
2119
2120                         /* Check modification order */
2121                         if (mo_graph->checkReachable(rf, act)) {
2122                                 /* rf --mo--> act */
2123                                 future_ordered = true;
2124                                 continue;
2125                         }
2126                         if (mo_graph->checkReachable(act, release))
2127                                 /* act --mo--> release */
2128                                 break;
2129                         if (mo_graph->checkReachable(release, act) &&
2130                                       mo_graph->checkReachable(act, rf)) {
2131                                 /* release --mo-> act --mo--> rf */
2132                                 return true; /* complete */
2133                         }
2134                         /* act may break release sequence */
2135                         pending->writes.push_back(act);
2136                         certain = false;
2137                 }
2138                 if (!future_ordered)
2139                         certain = false; /* This thread is uncertain */
2140         }
2141
2142         if (certain) {
2143                 release_heads->push_back(release);
2144                 pending->writes.clear();
2145         } else {
2146                 pending->release = release;
2147                 pending->rf = rf;
2148         }
2149         return certain;
2150 }
2151
2152 /**
2153  * An interface for getting the release sequence head(s) with which a
2154  * given ModelAction must synchronize. This function only returns a non-empty
2155  * result when it can locate a release sequence head with certainty. Otherwise,
2156  * it may mark the internal state of the ModelChecker so that it will handle
2157  * the release sequence at a later time, causing @a acquire to update its
2158  * synchronization at some later point in execution.
2159  *
2160  * @param acquire The 'acquire' action that may synchronize with a release
2161  * sequence
2162  * @param read The read action that may read from a release sequence; this may
2163  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2164  * when 'acquire' is a fence-acquire)
2165  * @param release_heads A pass-by-reference return parameter. Will be filled
2166  * with the head(s) of the release sequence(s), if they exists with certainty.
2167  * @see ModelChecker::release_seq_heads
2168  */
2169 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2170                 ModelAction *read, rel_heads_list_t *release_heads)
2171 {
2172         const ModelAction *rf = read->get_reads_from();
2173         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2174         sequence->acquire = acquire;
2175         sequence->read = read;
2176
2177         if (!release_seq_heads(rf, release_heads, sequence)) {
2178                 /* add act to 'lazy checking' list */
2179                 pending_rel_seqs->push_back(sequence);
2180         } else {
2181                 snapshot_free(sequence);
2182         }
2183 }
2184
2185 /**
2186  * Attempt to resolve all stashed operations that might synchronize with a
2187  * release sequence for a given location. This implements the "lazy" portion of
2188  * determining whether or not a release sequence was contiguous, since not all
2189  * modification order information is present at the time an action occurs.
2190  *
2191  * @param location The location/object that should be checked for release
2192  * sequence resolutions. A NULL value means to check all locations.
2193  * @param work_queue The work queue to which to add work items as they are
2194  * generated
2195  * @return True if any updates occurred (new synchronization, new mo_graph
2196  * edges)
2197  */
2198 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2199 {
2200         bool updated = false;
2201         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2202         while (it != pending_rel_seqs->end()) {
2203                 struct release_seq *pending = *it;
2204                 ModelAction *acquire = pending->acquire;
2205                 const ModelAction *read = pending->read;
2206
2207                 /* Only resolve sequences on the given location, if provided */
2208                 if (location && read->get_location() != location) {
2209                         it++;
2210                         continue;
2211                 }
2212
2213                 const ModelAction *rf = read->get_reads_from();
2214                 rel_heads_list_t release_heads;
2215                 bool complete;
2216                 complete = release_seq_heads(rf, &release_heads, pending);
2217                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2218                         if (!acquire->has_synchronized_with(release_heads[i])) {
2219                                 if (acquire->synchronize_with(release_heads[i]))
2220                                         updated = true;
2221                                 else
2222                                         set_bad_synchronization();
2223                         }
2224                 }
2225
2226                 if (updated) {
2227                         /* Re-check all pending release sequences */
2228                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2229                         /* Re-check read-acquire for mo_graph edges */
2230                         if (acquire->is_read())
2231                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2232
2233                         /* propagate synchronization to later actions */
2234                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2235                         for (; (*rit) != acquire; rit++) {
2236                                 ModelAction *propagate = *rit;
2237                                 if (acquire->happens_before(propagate)) {
2238                                         propagate->synchronize_with(acquire);
2239                                         /* Re-check 'propagate' for mo_graph edges */
2240                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2241                                 }
2242                         }
2243                 }
2244                 if (complete) {
2245                         it = pending_rel_seqs->erase(it);
2246                         snapshot_free(pending);
2247                 } else {
2248                         it++;
2249                 }
2250         }
2251
2252         // If we resolved promises or data races, see if we have realized a data race.
2253         checkDataRaces();
2254
2255         return updated;
2256 }
2257
2258 /**
2259  * Performs various bookkeeping operations for the current ModelAction. For
2260  * instance, adds action to the per-object, per-thread action vector and to the
2261  * action trace list of all thread actions.
2262  *
2263  * @param act is the ModelAction to add.
2264  */
2265 void ModelChecker::add_action_to_lists(ModelAction *act)
2266 {
2267         int tid = id_to_int(act->get_tid());
2268         ModelAction *uninit = NULL;
2269         int uninit_id = -1;
2270         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2271         if (list->empty() && act->is_atomic_var()) {
2272                 uninit = new_uninitialized_action(act->get_location());
2273                 uninit_id = id_to_int(uninit->get_tid());
2274                 list->push_back(uninit);
2275         }
2276         list->push_back(act);
2277
2278         action_trace->push_back(act);
2279         if (uninit)
2280                 action_trace->push_front(uninit);
2281
2282         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2283         if (tid >= (int)vec->size())
2284                 vec->resize(priv->next_thread_id);
2285         (*vec)[tid].push_back(act);
2286         if (uninit)
2287                 (*vec)[uninit_id].push_front(uninit);
2288
2289         if ((int)thrd_last_action->size() <= tid)
2290                 thrd_last_action->resize(get_num_threads());
2291         (*thrd_last_action)[tid] = act;
2292         if (uninit)
2293                 (*thrd_last_action)[uninit_id] = uninit;
2294
2295         if (act->is_fence() && act->is_release()) {
2296                 if ((int)thrd_last_fence_release->size() <= tid)
2297                         thrd_last_fence_release->resize(get_num_threads());
2298                 (*thrd_last_fence_release)[tid] = act;
2299         }
2300
2301         if (act->is_wait()) {
2302                 void *mutex_loc = (void *) act->get_value();
2303                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2304
2305                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2306                 if (tid >= (int)vec->size())
2307                         vec->resize(priv->next_thread_id);
2308                 (*vec)[tid].push_back(act);
2309         }
2310 }
2311
2312 /**
2313  * @brief Get the last action performed by a particular Thread
2314  * @param tid The thread ID of the Thread in question
2315  * @return The last action in the thread
2316  */
2317 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2318 {
2319         int threadid = id_to_int(tid);
2320         if (threadid < (int)thrd_last_action->size())
2321                 return (*thrd_last_action)[id_to_int(tid)];
2322         else
2323                 return NULL;
2324 }
2325
2326 /**
2327  * @brief Get the last fence release performed by a particular Thread
2328  * @param tid The thread ID of the Thread in question
2329  * @return The last fence release in the thread, if one exists; NULL otherwise
2330  */
2331 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2332 {
2333         int threadid = id_to_int(tid);
2334         if (threadid < (int)thrd_last_fence_release->size())
2335                 return (*thrd_last_fence_release)[id_to_int(tid)];
2336         else
2337                 return NULL;
2338 }
2339
2340 /**
2341  * Gets the last memory_order_seq_cst write (in the total global sequence)
2342  * performed on a particular object (i.e., memory location), not including the
2343  * current action.
2344  * @param curr The current ModelAction; also denotes the object location to
2345  * check
2346  * @return The last seq_cst write
2347  */
2348 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2349 {
2350         void *location = curr->get_location();
2351         action_list_t *list = get_safe_ptr_action(obj_map, location);
2352         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2353         action_list_t::reverse_iterator rit;
2354         for (rit = list->rbegin(); rit != list->rend(); rit++)
2355                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2356                         return *rit;
2357         return NULL;
2358 }
2359
2360 /**
2361  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2362  * performed in a particular thread, prior to a particular fence.
2363  * @param tid The ID of the thread to check
2364  * @param before_fence The fence from which to begin the search; if NULL, then
2365  * search for the most recent fence in the thread.
2366  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2367  */
2368 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2369 {
2370         /* All fences should have NULL location */
2371         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2372         action_list_t::reverse_iterator rit = list->rbegin();
2373
2374         if (before_fence) {
2375                 for (; rit != list->rend(); rit++)
2376                         if (*rit == before_fence)
2377                                 break;
2378
2379                 ASSERT(*rit == before_fence);
2380                 rit++;
2381         }
2382
2383         for (; rit != list->rend(); rit++)
2384                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2385                         return *rit;
2386         return NULL;
2387 }
2388
2389 /**
2390  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2391  * location). This function identifies the mutex according to the current
2392  * action, which is presumed to perform on the same mutex.
2393  * @param curr The current ModelAction; also denotes the object location to
2394  * check
2395  * @return The last unlock operation
2396  */
2397 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2398 {
2399         void *location = curr->get_location();
2400         action_list_t *list = get_safe_ptr_action(obj_map, location);
2401         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2402         action_list_t::reverse_iterator rit;
2403         for (rit = list->rbegin(); rit != list->rend(); rit++)
2404                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2405                         return *rit;
2406         return NULL;
2407 }
2408
2409 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2410 {
2411         ModelAction *parent = get_last_action(tid);
2412         if (!parent)
2413                 parent = get_thread(tid)->get_creation();
2414         return parent;
2415 }
2416
2417 /**
2418  * Returns the clock vector for a given thread.
2419  * @param tid The thread whose clock vector we want
2420  * @return Desired clock vector
2421  */
2422 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2423 {
2424         return get_parent_action(tid)->get_cv();
2425 }
2426
2427 /**
2428  * Resolve a set of Promises with a current write. The set is provided in the
2429  * Node corresponding to @a write.
2430  * @param write The ModelAction that is fulfilling Promises
2431  * @return True if promises were resolved; false otherwise
2432  */
2433 bool ModelChecker::resolve_promises(ModelAction *write)
2434 {
2435         bool haveResolved = false;
2436         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2437         promise_list_t mustResolve, resolved;
2438
2439         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2440                 Promise *promise = (*promises)[promise_index];
2441                 if (write->get_node()->get_promise(i)) {
2442                         ModelAction *read = promise->get_action();
2443                         read_from(read, write);
2444                         //Make sure the promise's value matches the write's value
2445                         ASSERT(promise->is_compatible(write));
2446                         mo_graph->resolvePromise(promise, write, &mustResolve);
2447
2448                         resolved.push_back(promise);
2449                         promises->erase(promises->begin() + promise_index);
2450                         actions_to_check.push_back(read);
2451
2452                         haveResolved = true;
2453                 } else
2454                         promise_index++;
2455         }
2456
2457         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2458                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2459                                 == resolved.end())
2460                         priv->failed_promise = true;
2461         }
2462         for (unsigned int i = 0; i < resolved.size(); i++)
2463                 delete resolved[i];
2464         //Check whether reading these writes has made threads unable to
2465         //resolve promises
2466
2467         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2468                 ModelAction *read = actions_to_check[i];
2469                 mo_check_promises(read, true);
2470         }
2471
2472         return haveResolved;
2473 }
2474
2475 /**
2476  * Compute the set of promises that could potentially be satisfied by this
2477  * action. Note that the set computation actually appears in the Node, not in
2478  * ModelChecker.
2479  * @param curr The ModelAction that may satisfy promises
2480  */
2481 void ModelChecker::compute_promises(ModelAction *curr)
2482 {
2483         for (unsigned int i = 0; i < promises->size(); i++) {
2484                 Promise *promise = (*promises)[i];
2485                 const ModelAction *act = promise->get_action();
2486                 ASSERT(act->is_read());
2487                 if (!act->happens_before(curr) &&
2488                                 !act->could_synchronize_with(curr) &&
2489                                 promise->is_compatible(curr) &&
2490                                 promise->get_value() == curr->get_value()) {
2491                         curr->get_node()->set_promise(i, act->is_rmw());
2492                 }
2493         }
2494 }
2495
2496 /** Checks promises in response to change in ClockVector Threads. */
2497 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2498 {
2499         for (unsigned int i = 0; i < promises->size(); i++) {
2500                 Promise *promise = (*promises)[i];
2501                 const ModelAction *act = promise->get_action();
2502                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2503                                 merge_cv->synchronized_since(act)) {
2504                         if (promise->eliminate_thread(tid)) {
2505                                 //Promise has failed
2506                                 priv->failed_promise = true;
2507                                 return;
2508                         }
2509                 }
2510         }
2511 }
2512
2513 void ModelChecker::check_promises_thread_disabled()
2514 {
2515         for (unsigned int i = 0; i < promises->size(); i++) {
2516                 Promise *promise = (*promises)[i];
2517                 if (promise->has_failed()) {
2518                         priv->failed_promise = true;
2519                         return;
2520                 }
2521         }
2522 }
2523
2524 /**
2525  * @brief Checks promises in response to addition to modification order for
2526  * threads.
2527  *
2528  * We test whether threads are still available for satisfying promises after an
2529  * addition to our modification order constraints. Those that are unavailable
2530  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2531  * that promise has failed.
2532  *
2533  * @param act The ModelAction which updated the modification order
2534  * @param is_read_check Should be true if act is a read and we must check for
2535  * updates to the store from which it read (there is a distinction here for
2536  * RMW's, which are both a load and a store)
2537  */
2538 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2539 {
2540         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2541
2542         for (unsigned int i = 0; i < promises->size(); i++) {
2543                 Promise *promise = (*promises)[i];
2544                 const ModelAction *pread = promise->get_action();
2545
2546                 // Is this promise on the same location?
2547                 if (!pread->same_var(write))
2548                         continue;
2549
2550                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2551                         priv->failed_promise = true;
2552                         return;
2553                 }
2554
2555                 // Don't do any lookups twice for the same thread
2556                 if (!promise->thread_is_available(act->get_tid()))
2557                         continue;
2558
2559                 if (mo_graph->checkReachable(promise, write)) {
2560                         if (mo_graph->checkPromise(write, promise)) {
2561                                 priv->failed_promise = true;
2562                                 return;
2563                         }
2564                 }
2565         }
2566 }
2567
2568 /**
2569  * Compute the set of writes that may break the current pending release
2570  * sequence. This information is extracted from previou release sequence
2571  * calculations.
2572  *
2573  * @param curr The current ModelAction. Must be a release sequence fixup
2574  * action.
2575  */
2576 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2577 {
2578         if (pending_rel_seqs->empty())
2579                 return;
2580
2581         struct release_seq *pending = pending_rel_seqs->back();
2582         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2583                 const ModelAction *write = pending->writes[i];
2584                 curr->get_node()->add_relseq_break(write);
2585         }
2586
2587         /* NULL means don't break the sequence; just synchronize */
2588         curr->get_node()->add_relseq_break(NULL);
2589 }
2590
2591 /**
2592  * Build up an initial set of all past writes that this 'read' action may read
2593  * from, as well as any previously-observed future values that must still be valid.
2594  *
2595  * @param curr is the current ModelAction that we are exploring; it must be a
2596  * 'read' operation.
2597  */
2598 void ModelChecker::build_may_read_from(ModelAction *curr)
2599 {
2600         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2601         unsigned int i;
2602         ASSERT(curr->is_read());
2603
2604         ModelAction *last_sc_write = NULL;
2605
2606         if (curr->is_seqcst())
2607                 last_sc_write = get_last_seq_cst_write(curr);
2608
2609         /* Iterate over all threads */
2610         for (i = 0; i < thrd_lists->size(); i++) {
2611                 /* Iterate over actions in thread, starting from most recent */
2612                 action_list_t *list = &(*thrd_lists)[i];
2613                 action_list_t::reverse_iterator rit;
2614                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2615                         ModelAction *act = *rit;
2616
2617                         /* Only consider 'write' actions */
2618                         if (!act->is_write() || act == curr)
2619                                 continue;
2620
2621                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2622                         bool allow_read = true;
2623
2624                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2625                                 allow_read = false;
2626                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2627                                 allow_read = false;
2628
2629                         if (allow_read) {
2630                                 /* Only add feasible reads */
2631                                 mo_graph->startChanges();
2632                                 r_modification_order(curr, act);
2633                                 if (!is_infeasible())
2634                                         curr->get_node()->add_read_from_past(act);
2635                                 mo_graph->rollbackChanges();
2636                         }
2637
2638                         /* Include at most one act per-thread that "happens before" curr */
2639                         if (act->happens_before(curr))
2640                                 break;
2641                 }
2642         }
2643
2644         /* Inherit existing, promised future values */
2645         for (i = 0; i < promises->size(); i++) {
2646                 const Promise *promise = (*promises)[i];
2647                 const ModelAction *promise_read = promise->get_action();
2648                 if (promise_read->same_var(curr)) {
2649                         /* Only add feasible future-values */
2650                         mo_graph->startChanges();
2651                         r_modification_order(curr, promise);
2652                         if (!is_infeasible()) {
2653                                 const struct future_value fv = promise->get_fv();
2654                                 curr->get_node()->add_future_value(fv);
2655                         }
2656                         mo_graph->rollbackChanges();
2657                 }
2658         }
2659
2660         /* We may find no valid may-read-from only if the execution is doomed */
2661         if (!curr->get_node()->get_read_from_past_size() && curr->get_node()->future_value_empty()) {
2662                 priv->no_valid_reads = true;
2663                 set_assert();
2664         }
2665
2666         if (DBG_ENABLED()) {
2667                 model_print("Reached read action:\n");
2668                 curr->print();
2669                 model_print("Printing read_from_past\n");
2670                 curr->get_node()->print_read_from_past();
2671                 model_print("End printing read_from_past\n");
2672         }
2673 }
2674
2675 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2676 {
2677         for ( ; write != NULL; write = write->get_reads_from()) {
2678                 /* UNINIT actions don't have a Node, and they never sleep */
2679                 if (write->is_uninitialized())
2680                         return true;
2681                 Node *prevnode = write->get_node()->get_parent();
2682
2683                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2684                 if (write->is_release() && thread_sleep)
2685                         return true;
2686                 if (!write->is_rmw())
2687                         return false;
2688         }
2689         return true;
2690 }
2691
2692 /**
2693  * @brief Create a new action representing an uninitialized atomic
2694  * @param location The memory location of the atomic object
2695  * @return A pointer to a new ModelAction
2696  */
2697 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2698 {
2699         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2700         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2701         act->create_cv(NULL);
2702         return act;
2703 }
2704
2705 static void print_list(action_list_t *list)
2706 {
2707         action_list_t::iterator it;
2708
2709         model_print("---------------------------------------------------------------------\n");
2710
2711         unsigned int hash = 0;
2712
2713         for (it = list->begin(); it != list->end(); it++) {
2714                 (*it)->print();
2715                 hash = hash^(hash<<3)^((*it)->hash());
2716         }
2717         model_print("HASH %u\n", hash);
2718         model_print("---------------------------------------------------------------------\n");
2719 }
2720
2721 #if SUPPORT_MOD_ORDER_DUMP
2722 void ModelChecker::dumpGraph(char *filename) const
2723 {
2724         char buffer[200];
2725         sprintf(buffer, "%s.dot", filename);
2726         FILE *file = fopen(buffer, "w");
2727         fprintf(file, "digraph %s {\n", filename);
2728         mo_graph->dumpNodes(file);
2729         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2730
2731         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2732                 ModelAction *action = *it;
2733                 if (action->is_read()) {
2734                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2735                         if (action->get_reads_from() != NULL)
2736                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2737                 }
2738                 if (thread_array[action->get_tid()] != NULL) {
2739                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2740                 }
2741
2742                 thread_array[action->get_tid()] = action;
2743         }
2744         fprintf(file, "}\n");
2745         model_free(thread_array);
2746         fclose(file);
2747 }
2748 #endif
2749
2750 /** @brief Prints an execution trace summary. */
2751 void ModelChecker::print_summary() const
2752 {
2753 #if SUPPORT_MOD_ORDER_DUMP
2754         char buffername[100];
2755         sprintf(buffername, "exec%04u", stats.num_total);
2756         mo_graph->dumpGraphToFile(buffername);
2757         sprintf(buffername, "graph%04u", stats.num_total);
2758         dumpGraph(buffername);
2759 #endif
2760
2761         model_print("Execution %d:", stats.num_total);
2762         if (isfeasibleprefix())
2763                 model_print("\n");
2764         else
2765                 print_infeasibility(" INFEASIBLE");
2766         print_list(action_trace);
2767         model_print("\n");
2768 }
2769
2770 /**
2771  * Add a Thread to the system for the first time. Should only be called once
2772  * per thread.
2773  * @param t The Thread to add
2774  */
2775 void ModelChecker::add_thread(Thread *t)
2776 {
2777         thread_map->put(id_to_int(t->get_id()), t);
2778         scheduler->add_thread(t);
2779 }
2780
2781 /**
2782  * Removes a thread from the scheduler.
2783  * @param the thread to remove.
2784  */
2785 void ModelChecker::remove_thread(Thread *t)
2786 {
2787         scheduler->remove_thread(t);
2788 }
2789
2790 /**
2791  * @brief Get a Thread reference by its ID
2792  * @param tid The Thread's ID
2793  * @return A Thread reference
2794  */
2795 Thread * ModelChecker::get_thread(thread_id_t tid) const
2796 {
2797         return thread_map->get(id_to_int(tid));
2798 }
2799
2800 /**
2801  * @brief Get a reference to the Thread in which a ModelAction was executed
2802  * @param act The ModelAction
2803  * @return A Thread reference
2804  */
2805 Thread * ModelChecker::get_thread(const ModelAction *act) const
2806 {
2807         return get_thread(act->get_tid());
2808 }
2809
2810 /**
2811  * @brief Check if a Thread is currently enabled
2812  * @param t The Thread to check
2813  * @return True if the Thread is currently enabled
2814  */
2815 bool ModelChecker::is_enabled(Thread *t) const
2816 {
2817         return scheduler->is_enabled(t);
2818 }
2819
2820 /**
2821  * @brief Check if a Thread is currently enabled
2822  * @param tid The ID of the Thread to check
2823  * @return True if the Thread is currently enabled
2824  */
2825 bool ModelChecker::is_enabled(thread_id_t tid) const
2826 {
2827         return scheduler->is_enabled(tid);
2828 }
2829
2830 /**
2831  * Switch from a model-checker context to a user-thread context. This is the
2832  * complement of ModelChecker::switch_to_master and must be called from the
2833  * model-checker context
2834  *
2835  * @param thread The user-thread to switch to
2836  */
2837 void ModelChecker::switch_from_master(Thread *thread)
2838 {
2839         scheduler->set_current_thread(thread);
2840         Thread::swap(&system_context, thread);
2841 }
2842
2843 /**
2844  * Switch from a user-context to the "master thread" context (a.k.a. system
2845  * context). This switch is made with the intention of exploring a particular
2846  * model-checking action (described by a ModelAction object). Must be called
2847  * from a user-thread context.
2848  *
2849  * @param act The current action that will be explored. May be NULL only if
2850  * trace is exiting via an assertion (see ModelChecker::set_assert and
2851  * ModelChecker::has_asserted).
2852  * @return Return the value returned by the current action
2853  */
2854 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2855 {
2856         DBG();
2857         Thread *old = thread_current();
2858         ASSERT(!old->get_pending());
2859         old->set_pending(act);
2860         if (Thread::swap(old, &system_context) < 0) {
2861                 perror("swap threads");
2862                 exit(EXIT_FAILURE);
2863         }
2864         return old->get_return_value();
2865 }
2866
2867 /**
2868  * Takes the next step in the execution, if possible.
2869  * @param curr The current step to take
2870  * @return Returns the next Thread to run, if any; NULL if this execution
2871  * should terminate
2872  */
2873 Thread * ModelChecker::take_step(ModelAction *curr)
2874 {
2875         Thread *curr_thrd = get_thread(curr);
2876         ASSERT(curr_thrd->get_state() == THREAD_READY);
2877
2878         curr = check_current_action(curr);
2879
2880         /* Infeasible -> don't take any more steps */
2881         if (is_infeasible())
2882                 return NULL;
2883         else if (isfeasibleprefix() && have_bug_reports()) {
2884                 set_assert();
2885                 return NULL;
2886         }
2887
2888         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2889                 return NULL;
2890
2891         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2892                 scheduler->remove_thread(curr_thrd);
2893
2894         Thread *next_thrd = get_next_thread(curr);
2895
2896         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2897                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2898
2899         return next_thrd;
2900 }
2901
2902 /** Wrapper to run the user's main function, with appropriate arguments */
2903 void user_main_wrapper(void *)
2904 {
2905         user_main(model->params.argc, model->params.argv);
2906 }
2907
2908 /** @brief Run ModelChecker for the user program */
2909 void ModelChecker::run()
2910 {
2911         do {
2912                 thrd_t user_thread;
2913                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
2914                 add_thread(t);
2915
2916                 do {
2917                         /*
2918                          * Stash next pending action(s) for thread(s). There
2919                          * should only need to stash one thread's action--the
2920                          * thread which just took a step--plus the first step
2921                          * for any newly-created thread
2922                          */
2923                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2924                                 thread_id_t tid = int_to_id(i);
2925                                 Thread *thr = get_thread(tid);
2926                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2927                                         switch_from_master(thr);
2928                                 }
2929                         }
2930
2931                         /* Catch assertions from prior take_step or from
2932                          * between-ModelAction bugs (e.g., data races) */
2933                         if (has_asserted())
2934                                 break;
2935
2936                         /* Consume the next action for a Thread */
2937                         ModelAction *curr = t->get_pending();
2938                         t->set_pending(NULL);
2939                         t = take_step(curr);
2940                 } while (t && !t->is_model_thread());
2941
2942                 /*
2943                  * Launch end-of-execution release sequence fixups only when
2944                  * the execution is otherwise feasible AND there are:
2945                  *
2946                  * (1) pending release sequences
2947                  * (2) pending assertions that could be invalidated by a change
2948                  * in clock vectors (i.e., data races)
2949                  * (3) no pending promises
2950                  */
2951                 while (!pending_rel_seqs->empty() &&
2952                                 is_feasible_prefix_ignore_relseq() &&
2953                                 !unrealizedraces.empty()) {
2954                         model_print("*** WARNING: release sequence fixup action "
2955                                         "(%zu pending release seuqence(s)) ***\n",
2956                                         pending_rel_seqs->size());
2957                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2958                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
2959                                         model_thread);
2960                         take_step(fixup);
2961                 };
2962         } while (next_execution());
2963
2964         model_print("******* Model-checking complete: *******\n");
2965         print_stats();
2966 }