model: refactor check_current_action, next thread computation
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 current_action(NULL),
43                 /* First thread created will have id INITIAL_THREAD_ID */
44                 next_thread_id(INITIAL_THREAD_ID),
45                 used_sequence_numbers(0),
46                 next_backtrack(NULL),
47                 bugs(),
48                 stats(),
49                 failed_promise(false),
50                 too_many_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         ModelAction *current_action;
62         unsigned int next_thread_id;
63         modelclock_t used_sequence_numbers;
64         ModelAction *next_backtrack;
65         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
66         struct execution_stats stats;
67         bool failed_promise;
68         bool too_many_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         snapshotObject->backTrackBeforeStep(0);
163 }
164
165 /** @return a thread ID for a new Thread */
166 thread_id_t ModelChecker::get_next_id()
167 {
168         return priv->next_thread_id++;
169 }
170
171 /** @return the number of user threads created during this execution */
172 unsigned int ModelChecker::get_num_threads() const
173 {
174         return priv->next_thread_id;
175 }
176
177 /** @return The currently executing Thread. */
178 Thread * ModelChecker::get_current_thread() const
179 {
180         return scheduler->get_current_thread();
181 }
182
183 /** @return a sequence number for a new ModelAction */
184 modelclock_t ModelChecker::get_next_seq_num()
185 {
186         return ++priv->used_sequence_numbers;
187 }
188
189 Node * ModelChecker::get_curr_node() const
190 {
191         return node_stack->get_head();
192 }
193
194 /**
195  * @brief Choose the next thread to execute.
196  *
197  * This function chooses the next thread that should execute. It can force the
198  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
199  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
200  * The model-checker may have no preference regarding the next thread (i.e.,
201  * when exploring a new execution ordering), in which case this will return
202  * NULL.
203  * @param curr The current ModelAction. This action might guide the choice of
204  * next thread.
205  * @return The next thread to run. If the model-checker has no preference, NULL.
206  */
207 Thread * ModelChecker::get_next_thread(ModelAction *curr)
208 {
209         thread_id_t tid;
210
211         if (curr != NULL) {
212                 /* Do not split atomic actions. */
213                 if (curr->is_rmwr())
214                         return thread_current();
215                 /* The THREAD_CREATE action points to the created Thread */
216                 else if (curr->get_type() == THREAD_CREATE)
217                         return (Thread *)curr->get_location();
218         }
219
220         /* Have we completed exploring the preselected path? */
221         if (diverge == NULL)
222                 return NULL;
223
224         /* Else, we are trying to replay an execution */
225         ModelAction *next = node_stack->get_next()->get_action();
226
227         if (next == diverge) {
228                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
229                         earliest_diverge = diverge;
230
231                 Node *nextnode = next->get_node();
232                 Node *prevnode = nextnode->get_parent();
233                 scheduler->update_sleep_set(prevnode);
234
235                 /* Reached divergence point */
236                 if (nextnode->increment_misc()) {
237                         /* The next node will try to satisfy a different misc_index values. */
238                         tid = next->get_tid();
239                         node_stack->pop_restofstack(2);
240                 } else if (nextnode->increment_promise()) {
241                         /* The next node will try to satisfy a different set of promises. */
242                         tid = next->get_tid();
243                         node_stack->pop_restofstack(2);
244                 } else if (nextnode->increment_read_from()) {
245                         /* The next node will read from a different value. */
246                         tid = next->get_tid();
247                         node_stack->pop_restofstack(2);
248                 } else if (nextnode->increment_future_value()) {
249                         /* The next node will try to read from a different future value. */
250                         tid = next->get_tid();
251                         node_stack->pop_restofstack(2);
252                 } else if (nextnode->increment_relseq_break()) {
253                         /* The next node will try to resolve a release sequence differently */
254                         tid = next->get_tid();
255                         node_stack->pop_restofstack(2);
256                 } else {
257                         ASSERT(prevnode);
258                         /* Make a different thread execute for next step */
259                         scheduler->add_sleep(get_thread(next->get_tid()));
260                         tid = prevnode->get_next_backtrack();
261                         /* Make sure the backtracked thread isn't sleeping. */
262                         node_stack->pop_restofstack(1);
263                         if (diverge == earliest_diverge) {
264                                 earliest_diverge = prevnode->get_action();
265                         }
266                 }
267                 /* The correct sleep set is in the parent node. */
268                 execute_sleep_set();
269
270                 DEBUG("*** Divergence point ***\n");
271
272                 diverge = NULL;
273         } else {
274                 tid = next->get_tid();
275         }
276         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
277         ASSERT(tid != THREAD_ID_T_NONE);
278         return thread_map->get(id_to_int(tid));
279 }
280
281 /**
282  * We need to know what the next actions of all threads in the sleep
283  * set will be.  This method computes them and stores the actions at
284  * the corresponding thread object's pending action.
285  */
286
287 void ModelChecker::execute_sleep_set()
288 {
289         for (unsigned int i = 0; i < get_num_threads(); i++) {
290                 thread_id_t tid = int_to_id(i);
291                 Thread *thr = get_thread(tid);
292                 if (scheduler->is_sleep_set(thr) && thr->get_pending() == NULL) {
293                         thr->set_state(THREAD_RUNNING);
294                         scheduler->next_thread(thr);
295                         Thread::swap(&system_context, thr);
296                         priv->current_action->set_sleep_flag();
297                         thr->set_pending(priv->current_action);
298                 }
299         }
300 }
301
302 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
303 {
304         for (unsigned int i = 0; i < get_num_threads(); i++) {
305                 Thread *thr = get_thread(int_to_id(i));
306                 if (scheduler->is_sleep_set(thr)) {
307                         ModelAction *pending_act = thr->get_pending();
308                         if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
309                                 //Remove this thread from sleep set
310                                 scheduler->remove_sleep(thr);
311                 }
312         }
313 }
314
315 /** @brief Alert the model-checker that an incorrectly-ordered
316  * synchronization was made */
317 void ModelChecker::set_bad_synchronization()
318 {
319         priv->bad_synchronization = true;
320 }
321
322 bool ModelChecker::has_asserted() const
323 {
324         return priv->asserted;
325 }
326
327 void ModelChecker::set_assert()
328 {
329         priv->asserted = true;
330 }
331
332 /**
333  * Check if we are in a deadlock. Should only be called at the end of an
334  * execution, although it should not give false positives in the middle of an
335  * execution (there should be some ENABLED thread).
336  *
337  * @return True if program is in a deadlock; false otherwise
338  */
339 bool ModelChecker::is_deadlocked() const
340 {
341         bool blocking_threads = false;
342         for (unsigned int i = 0; i < get_num_threads(); i++) {
343                 thread_id_t tid = int_to_id(i);
344                 if (is_enabled(tid))
345                         return false;
346                 Thread *t = get_thread(tid);
347                 if (!t->is_model_thread() && t->get_pending())
348                         blocking_threads = true;
349         }
350         return blocking_threads;
351 }
352
353 /**
354  * Check if this is a complete execution. That is, have all thread completed
355  * execution (rather than exiting because sleep sets have forced a redundant
356  * execution).
357  *
358  * @return True if the execution is complete.
359  */
360 bool ModelChecker::is_complete_execution() const
361 {
362         for (unsigned int i = 0; i < get_num_threads(); i++)
363                 if (is_enabled(int_to_id(i)))
364                         return false;
365         return true;
366 }
367
368 /**
369  * @brief Assert a bug in the executing program.
370  *
371  * Use this function to assert any sort of bug in the user program. If the
372  * current trace is feasible (actually, a prefix of some feasible execution),
373  * then this execution will be aborted, printing the appropriate message. If
374  * the current trace is not yet feasible, the error message will be stashed and
375  * printed if the execution ever becomes feasible.
376  *
377  * @param msg Descriptive message for the bug (do not include newline char)
378  * @return True if bug is immediately-feasible
379  */
380 bool ModelChecker::assert_bug(const char *msg)
381 {
382         priv->bugs.push_back(new bug_message(msg));
383
384         if (isfeasibleprefix()) {
385                 set_assert();
386                 return true;
387         }
388         return false;
389 }
390
391 /**
392  * @brief Assert a bug in the executing program, asserted by a user thread
393  * @see ModelChecker::assert_bug
394  * @param msg Descriptive message for the bug (do not include newline char)
395  */
396 void ModelChecker::assert_user_bug(const char *msg)
397 {
398         /* If feasible bug, bail out now */
399         if (assert_bug(msg))
400                 switch_to_master(NULL);
401 }
402
403 /** @return True, if any bugs have been reported for this execution */
404 bool ModelChecker::have_bug_reports() const
405 {
406         return priv->bugs.size() != 0;
407 }
408
409 /** @brief Print bug report listing for this execution (if any bugs exist) */
410 void ModelChecker::print_bugs() const
411 {
412         if (have_bug_reports()) {
413                 model_print("Bug report: %zu bug%s detected\n",
414                                 priv->bugs.size(),
415                                 priv->bugs.size() > 1 ? "s" : "");
416                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
417                         priv->bugs[i]->print();
418         }
419 }
420
421 /**
422  * @brief Record end-of-execution stats
423  *
424  * Must be run when exiting an execution. Records various stats.
425  * @see struct execution_stats
426  */
427 void ModelChecker::record_stats()
428 {
429         stats.num_total++;
430         if (!isfeasibleprefix())
431                 stats.num_infeasible++;
432         else if (have_bug_reports())
433                 stats.num_buggy_executions++;
434         else if (is_complete_execution())
435                 stats.num_complete++;
436         else
437                 stats.num_redundant++;
438 }
439
440 /** @brief Print execution stats */
441 void ModelChecker::print_stats() const
442 {
443         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
444         model_print("Number of redundant executions: %d\n", stats.num_redundant);
445         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
446         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
447         model_print("Total executions: %d\n", stats.num_total);
448         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
449 }
450
451 /**
452  * @brief End-of-exeuction print
453  * @param printbugs Should any existing bugs be printed?
454  */
455 void ModelChecker::print_execution(bool printbugs) const
456 {
457         print_program_output();
458
459         if (DBG_ENABLED() || params.verbose) {
460                 model_print("Earliest divergence point since last feasible execution:\n");
461                 if (earliest_diverge)
462                         earliest_diverge->print();
463                 else
464                         model_print("(Not set)\n");
465
466                 model_print("\n");
467                 print_stats();
468         }
469
470         /* Don't print invalid bugs */
471         if (printbugs)
472                 print_bugs();
473
474         model_print("\n");
475         print_summary();
476 }
477
478 /**
479  * Queries the model-checker for more executions to explore and, if one
480  * exists, resets the model-checker state to execute a new execution.
481  *
482  * @return If there are more executions to explore, return true. Otherwise,
483  * return false.
484  */
485 bool ModelChecker::next_execution()
486 {
487         DBG();
488         /* Is this execution a feasible execution that's worth bug-checking? */
489         bool complete = isfeasibleprefix() && (is_complete_execution() ||
490                         have_bug_reports());
491
492         /* End-of-execution bug checks */
493         if (complete) {
494                 if (is_deadlocked())
495                         assert_bug("Deadlock detected");
496
497                 checkDataRaces();
498         }
499
500         record_stats();
501
502         /* Output */
503         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
504                 print_execution(complete);
505         else
506                 clear_program_output();
507
508         if (complete)
509                 earliest_diverge = NULL;
510
511         if ((diverge = get_next_backtrack()) == NULL)
512                 return false;
513
514         if (DBG_ENABLED()) {
515                 model_print("Next execution will diverge at:\n");
516                 diverge->print();
517         }
518
519         reset_to_initial_state();
520         return true;
521 }
522
523 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
524 {
525         switch (act->get_type()) {
526         case ATOMIC_FENCE:
527         case ATOMIC_READ:
528         case ATOMIC_WRITE:
529         case ATOMIC_RMW: {
530                 /* Optimization: relaxed operations don't need backtracking */
531                 if (act->is_relaxed())
532                         return NULL;
533                 /* linear search: from most recent to oldest */
534                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
535                 action_list_t::reverse_iterator rit;
536                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
537                         ModelAction *prev = *rit;
538                         if (prev->could_synchronize_with(act))
539                                 return prev;
540                 }
541                 break;
542         }
543         case ATOMIC_LOCK:
544         case ATOMIC_TRYLOCK: {
545                 /* linear search: from most recent to oldest */
546                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
547                 action_list_t::reverse_iterator rit;
548                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
549                         ModelAction *prev = *rit;
550                         if (act->is_conflicting_lock(prev))
551                                 return prev;
552                 }
553                 break;
554         }
555         case ATOMIC_UNLOCK: {
556                 /* linear search: from most recent to oldest */
557                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
558                 action_list_t::reverse_iterator rit;
559                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
560                         ModelAction *prev = *rit;
561                         if (!act->same_thread(prev) && prev->is_failed_trylock())
562                                 return prev;
563                 }
564                 break;
565         }
566         case ATOMIC_WAIT: {
567                 /* linear search: from most recent to oldest */
568                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
569                 action_list_t::reverse_iterator rit;
570                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
571                         ModelAction *prev = *rit;
572                         if (!act->same_thread(prev) && prev->is_failed_trylock())
573                                 return prev;
574                         if (!act->same_thread(prev) && prev->is_notify())
575                                 return prev;
576                 }
577                 break;
578         }
579
580         case ATOMIC_NOTIFY_ALL:
581         case ATOMIC_NOTIFY_ONE: {
582                 /* linear search: from most recent to oldest */
583                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
584                 action_list_t::reverse_iterator rit;
585                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
586                         ModelAction *prev = *rit;
587                         if (!act->same_thread(prev) && prev->is_wait())
588                                 return prev;
589                 }
590                 break;
591         }
592         default:
593                 break;
594         }
595         return NULL;
596 }
597
598 /** This method finds backtracking points where we should try to
599  * reorder the parameter ModelAction against.
600  *
601  * @param the ModelAction to find backtracking points for.
602  */
603 void ModelChecker::set_backtracking(ModelAction *act)
604 {
605         Thread *t = get_thread(act);
606         ModelAction *prev = get_last_conflict(act);
607         if (prev == NULL)
608                 return;
609
610         Node *node = prev->get_node()->get_parent();
611
612         int low_tid, high_tid;
613         if (node->is_enabled(t)) {
614                 low_tid = id_to_int(act->get_tid());
615                 high_tid = low_tid + 1;
616         } else {
617                 low_tid = 0;
618                 high_tid = get_num_threads();
619         }
620
621         for (int i = low_tid; i < high_tid; i++) {
622                 thread_id_t tid = int_to_id(i);
623
624                 /* Make sure this thread can be enabled here. */
625                 if (i >= node->get_num_threads())
626                         break;
627
628                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
629                 if (node->enabled_status(tid) != THREAD_ENABLED)
630                         continue;
631
632                 /* Check if this has been explored already */
633                 if (node->has_been_explored(tid))
634                         continue;
635
636                 /* See if fairness allows */
637                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
638                         bool unfair = false;
639                         for (int t = 0; t < node->get_num_threads(); t++) {
640                                 thread_id_t tother = int_to_id(t);
641                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
642                                         unfair = true;
643                                         break;
644                                 }
645                         }
646                         if (unfair)
647                                 continue;
648                 }
649                 /* Cache the latest backtracking point */
650                 set_latest_backtrack(prev);
651
652                 /* If this is a new backtracking point, mark the tree */
653                 if (!node->set_backtrack(tid))
654                         continue;
655                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
656                                         id_to_int(prev->get_tid()),
657                                         id_to_int(t->get_id()));
658                 if (DBG_ENABLED()) {
659                         prev->print();
660                         act->print();
661                 }
662         }
663 }
664
665 /**
666  * @brief Cache the a backtracking point as the "most recent", if eligible
667  *
668  * Note that this does not prepare the NodeStack for this backtracking
669  * operation, it only caches the action on a per-execution basis
670  *
671  * @param act The operation at which we should explore a different next action
672  * (i.e., backtracking point)
673  * @return True, if this action is now the most recent backtracking point;
674  * false otherwise
675  */
676 bool ModelChecker::set_latest_backtrack(ModelAction *act)
677 {
678         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
679                 priv->next_backtrack = act;
680                 return true;
681         }
682         return false;
683 }
684
685 /**
686  * Returns last backtracking point. The model checker will explore a different
687  * path for this point in the next execution.
688  * @return The ModelAction at which the next execution should diverge.
689  */
690 ModelAction * ModelChecker::get_next_backtrack()
691 {
692         ModelAction *next = priv->next_backtrack;
693         priv->next_backtrack = NULL;
694         return next;
695 }
696
697 /**
698  * Processes a read or rmw model action.
699  * @param curr is the read model action to process.
700  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
701  * @return True if processing this read updates the mo_graph.
702  */
703 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
704 {
705         uint64_t value = VALUE_NONE;
706         bool updated = false;
707         while (true) {
708                 const ModelAction *reads_from = curr->get_node()->get_read_from();
709                 if (reads_from != NULL) {
710                         mo_graph->startChanges();
711
712                         value = reads_from->get_value();
713                         bool r_status = false;
714
715                         if (!second_part_of_rmw) {
716                                 check_recency(curr, reads_from);
717                                 r_status = r_modification_order(curr, reads_from);
718                         }
719
720
721                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
722                                 mo_graph->rollbackChanges();
723                                 priv->too_many_reads = false;
724                                 continue;
725                         }
726
727                         read_from(curr, reads_from);
728                         mo_graph->commitChanges();
729                         mo_check_promises(curr->get_tid(), reads_from);
730
731                         updated |= r_status;
732                 } else if (!second_part_of_rmw) {
733                         /* Read from future value */
734                         value = curr->get_node()->get_future_value();
735                         modelclock_t expiration = curr->get_node()->get_future_value_expiration();
736                         curr->set_read_from(NULL);
737                         Promise *valuepromise = new Promise(curr, value, expiration);
738                         promises->push_back(valuepromise);
739                 }
740                 get_thread(curr)->set_return_value(value);
741                 return updated;
742         }
743 }
744
745 /**
746  * Processes a lock, trylock, or unlock model action.  @param curr is
747  * the read model action to process.
748  *
749  * The try lock operation checks whether the lock is taken.  If not,
750  * it falls to the normal lock operation case.  If so, it returns
751  * fail.
752  *
753  * The lock operation has already been checked that it is enabled, so
754  * it just grabs the lock and synchronizes with the previous unlock.
755  *
756  * The unlock operation has to re-enable all of the threads that are
757  * waiting on the lock.
758  *
759  * @return True if synchronization was updated; false otherwise
760  */
761 bool ModelChecker::process_mutex(ModelAction *curr)
762 {
763         std::mutex *mutex = NULL;
764         struct std::mutex_state *state = NULL;
765
766         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
767                 mutex = (std::mutex *)curr->get_location();
768                 state = mutex->get_state();
769         } else if (curr->is_wait()) {
770                 mutex = (std::mutex *)curr->get_value();
771                 state = mutex->get_state();
772         }
773
774         switch (curr->get_type()) {
775         case ATOMIC_TRYLOCK: {
776                 bool success = !state->islocked;
777                 curr->set_try_lock(success);
778                 if (!success) {
779                         get_thread(curr)->set_return_value(0);
780                         break;
781                 }
782                 get_thread(curr)->set_return_value(1);
783         }
784                 //otherwise fall into the lock case
785         case ATOMIC_LOCK: {
786                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
787                         assert_bug("Lock access before initialization");
788                 state->islocked = true;
789                 ModelAction *unlock = get_last_unlock(curr);
790                 //synchronize with the previous unlock statement
791                 if (unlock != NULL) {
792                         curr->synchronize_with(unlock);
793                         return true;
794                 }
795                 break;
796         }
797         case ATOMIC_UNLOCK: {
798                 //unlock the lock
799                 state->islocked = false;
800                 //wake up the other threads
801                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
802                 //activate all the waiting threads
803                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
804                         scheduler->wake(get_thread(*rit));
805                 }
806                 waiters->clear();
807                 break;
808         }
809         case ATOMIC_WAIT: {
810                 //unlock the lock
811                 state->islocked = false;
812                 //wake up the other threads
813                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
814                 //activate all the waiting threads
815                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
816                         scheduler->wake(get_thread(*rit));
817                 }
818                 waiters->clear();
819                 //check whether we should go to sleep or not...simulate spurious failures
820                 if (curr->get_node()->get_misc() == 0) {
821                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
822                         //disable us
823                         scheduler->sleep(get_current_thread());
824                 }
825                 break;
826         }
827         case ATOMIC_NOTIFY_ALL: {
828                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
829                 //activate all the waiting threads
830                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
831                         scheduler->wake(get_thread(*rit));
832                 }
833                 waiters->clear();
834                 break;
835         }
836         case ATOMIC_NOTIFY_ONE: {
837                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
838                 int wakeupthread = curr->get_node()->get_misc();
839                 action_list_t::iterator it = waiters->begin();
840                 advance(it, wakeupthread);
841                 scheduler->wake(get_thread(*it));
842                 waiters->erase(it);
843                 break;
844         }
845
846         default:
847                 ASSERT(0);
848         }
849         return false;
850 }
851
852 /**
853  * Process a write ModelAction
854  * @param curr The ModelAction to process
855  * @return True if the mo_graph was updated or promises were resolved
856  */
857 bool ModelChecker::process_write(ModelAction *curr)
858 {
859         bool updated_mod_order = w_modification_order(curr);
860         bool updated_promises = resolve_promises(curr);
861
862         if (promises->size() == 0) {
863                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
864                         struct PendingFutureValue pfv = (*futurevalues)[i];
865                         //Do more ambitious checks now that mo is more complete
866                         if (mo_may_allow(pfv.writer, pfv.act) &&
867                                         pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number() + params.maxfuturedelay))
868                                 set_latest_backtrack(pfv.act);
869                 }
870                 futurevalues->resize(0);
871         }
872
873         mo_graph->commitChanges();
874         mo_check_promises(curr->get_tid(), curr);
875
876         get_thread(curr)->set_return_value(VALUE_NONE);
877         return updated_mod_order || updated_promises;
878 }
879
880 /**
881  * Process a fence ModelAction
882  * @param curr The ModelAction to process
883  * @return True if synchronization was updated
884  */
885 bool ModelChecker::process_fence(ModelAction *curr)
886 {
887         /*
888          * fence-relaxed: no-op
889          * fence-release: only log the occurence (not in this function), for
890          *   use in later synchronization
891          * fence-acquire (this function): search for hypothetical release
892          *   sequences
893          */
894         bool updated = false;
895         if (curr->is_acquire()) {
896                 action_list_t *list = action_trace;
897                 action_list_t::reverse_iterator rit;
898                 /* Find X : is_read(X) && X --sb-> curr */
899                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
900                         ModelAction *act = *rit;
901                         if (act == curr)
902                                 continue;
903                         if (act->get_tid() != curr->get_tid())
904                                 continue;
905                         /* Stop at the beginning of the thread */
906                         if (act->is_thread_start())
907                                 break;
908                         /* Stop once we reach a prior fence-acquire */
909                         if (act->is_fence() && act->is_acquire())
910                                 break;
911                         if (!act->is_read())
912                                 continue;
913                         /* read-acquire will find its own release sequences */
914                         if (act->is_acquire())
915                                 continue;
916
917                         /* Establish hypothetical release sequences */
918                         rel_heads_list_t release_heads;
919                         get_release_seq_heads(curr, act, &release_heads);
920                         for (unsigned int i = 0; i < release_heads.size(); i++)
921                                 if (!curr->synchronize_with(release_heads[i]))
922                                         set_bad_synchronization();
923                         if (release_heads.size() != 0)
924                                 updated = true;
925                 }
926         }
927         return updated;
928 }
929
930 /**
931  * @brief Process the current action for thread-related activity
932  *
933  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
934  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
935  * synchronization, etc.  This function is a no-op for non-THREAD actions
936  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
937  *
938  * @param curr The current action
939  * @return True if synchronization was updated or a thread completed
940  */
941 bool ModelChecker::process_thread_action(ModelAction *curr)
942 {
943         bool updated = false;
944
945         switch (curr->get_type()) {
946         case THREAD_CREATE: {
947                 Thread *th = (Thread *)curr->get_location();
948                 th->set_creation(curr);
949                 break;
950         }
951         case THREAD_JOIN: {
952                 Thread *blocking = (Thread *)curr->get_location();
953                 ModelAction *act = get_last_action(blocking->get_id());
954                 curr->synchronize_with(act);
955                 updated = true; /* trigger rel-seq checks */
956                 break;
957         }
958         case THREAD_FINISH: {
959                 Thread *th = get_thread(curr);
960                 while (!th->wait_list_empty()) {
961                         ModelAction *act = th->pop_wait_list();
962                         scheduler->wake(get_thread(act));
963                 }
964                 th->complete();
965                 updated = true; /* trigger rel-seq checks */
966                 break;
967         }
968         case THREAD_START: {
969                 check_promises(curr->get_tid(), NULL, curr->get_cv());
970                 break;
971         }
972         default:
973                 break;
974         }
975
976         return updated;
977 }
978
979 /**
980  * @brief Process the current action for release sequence fixup activity
981  *
982  * Performs model-checker release sequence fixups for the current action,
983  * forcing a single pending release sequence to break (with a given, potential
984  * "loose" write) or to complete (i.e., synchronize). If a pending release
985  * sequence forms a complete release sequence, then we must perform the fixup
986  * synchronization, mo_graph additions, etc.
987  *
988  * @param curr The current action; must be a release sequence fixup action
989  * @param work_queue The work queue to which to add work items as they are
990  * generated
991  */
992 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
993 {
994         const ModelAction *write = curr->get_node()->get_relseq_break();
995         struct release_seq *sequence = pending_rel_seqs->back();
996         pending_rel_seqs->pop_back();
997         ASSERT(sequence);
998         ModelAction *acquire = sequence->acquire;
999         const ModelAction *rf = sequence->rf;
1000         const ModelAction *release = sequence->release;
1001         ASSERT(acquire);
1002         ASSERT(release);
1003         ASSERT(rf);
1004         ASSERT(release->same_thread(rf));
1005
1006         if (write == NULL) {
1007                 /**
1008                  * @todo Forcing a synchronization requires that we set
1009                  * modification order constraints. For instance, we can't allow
1010                  * a fixup sequence in which two separate read-acquire
1011                  * operations read from the same sequence, where the first one
1012                  * synchronizes and the other doesn't. Essentially, we can't
1013                  * allow any writes to insert themselves between 'release' and
1014                  * 'rf'
1015                  */
1016
1017                 /* Must synchronize */
1018                 if (!acquire->synchronize_with(release)) {
1019                         set_bad_synchronization();
1020                         return;
1021                 }
1022                 /* Re-check all pending release sequences */
1023                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1024                 /* Re-check act for mo_graph edges */
1025                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1026
1027                 /* propagate synchronization to later actions */
1028                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1029                 for (; (*rit) != acquire; rit++) {
1030                         ModelAction *propagate = *rit;
1031                         if (acquire->happens_before(propagate)) {
1032                                 propagate->synchronize_with(acquire);
1033                                 /* Re-check 'propagate' for mo_graph edges */
1034                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1035                         }
1036                 }
1037         } else {
1038                 /* Break release sequence with new edges:
1039                  *   release --mo--> write --mo--> rf */
1040                 mo_graph->addEdge(release, write);
1041                 mo_graph->addEdge(write, rf);
1042         }
1043
1044         /* See if we have realized a data race */
1045         checkDataRaces();
1046 }
1047
1048 /**
1049  * Initialize the current action by performing one or more of the following
1050  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1051  * in the NodeStack, manipulating backtracking sets, allocating and
1052  * initializing clock vectors, and computing the promises to fulfill.
1053  *
1054  * @param curr The current action, as passed from the user context; may be
1055  * freed/invalidated after the execution of this function, with a different
1056  * action "returned" its place (pass-by-reference)
1057  * @return True if curr is a newly-explored action; false otherwise
1058  */
1059 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1060 {
1061         ModelAction *newcurr;
1062
1063         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1064                 newcurr = process_rmw(*curr);
1065                 delete *curr;
1066
1067                 if (newcurr->is_rmw())
1068                         compute_promises(newcurr);
1069
1070                 *curr = newcurr;
1071                 return false;
1072         }
1073
1074         (*curr)->set_seq_number(get_next_seq_num());
1075
1076         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1077         if (newcurr) {
1078                 /* First restore type and order in case of RMW operation */
1079                 if ((*curr)->is_rmwr())
1080                         newcurr->copy_typeandorder(*curr);
1081
1082                 ASSERT((*curr)->get_location() == newcurr->get_location());
1083                 newcurr->copy_from_new(*curr);
1084
1085                 /* Discard duplicate ModelAction; use action from NodeStack */
1086                 delete *curr;
1087
1088                 /* Always compute new clock vector */
1089                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1090
1091                 *curr = newcurr;
1092                 return false; /* Action was explored previously */
1093         } else {
1094                 newcurr = *curr;
1095
1096                 /* Always compute new clock vector */
1097                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1098
1099                 /* Assign most recent release fence */
1100                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1101
1102                 /*
1103                  * Perform one-time actions when pushing new ModelAction onto
1104                  * NodeStack
1105                  */
1106                 if (newcurr->is_write())
1107                         compute_promises(newcurr);
1108                 else if (newcurr->is_relseq_fixup())
1109                         compute_relseq_breakwrites(newcurr);
1110                 else if (newcurr->is_wait())
1111                         newcurr->get_node()->set_misc_max(2);
1112                 else if (newcurr->is_notify_one()) {
1113                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1114                 }
1115                 return true; /* This was a new ModelAction */
1116         }
1117 }
1118
1119 /**
1120  * @brief Establish reads-from relation between two actions
1121  *
1122  * Perform basic operations involved with establishing a concrete rf relation,
1123  * including setting the ModelAction data and checking for release sequences.
1124  *
1125  * @param act The action that is reading (must be a read)
1126  * @param rf The action from which we are reading (must be a write)
1127  *
1128  * @return True if this read established synchronization
1129  */
1130 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1131 {
1132         act->set_read_from(rf);
1133         if (rf != NULL && act->is_acquire()) {
1134                 rel_heads_list_t release_heads;
1135                 get_release_seq_heads(act, act, &release_heads);
1136                 int num_heads = release_heads.size();
1137                 for (unsigned int i = 0; i < release_heads.size(); i++)
1138                         if (!act->synchronize_with(release_heads[i])) {
1139                                 set_bad_synchronization();
1140                                 num_heads--;
1141                         }
1142                 return num_heads > 0;
1143         }
1144         return false;
1145 }
1146
1147 /**
1148  * @brief Check whether a model action is enabled.
1149  *
1150  * Checks whether a lock or join operation would be successful (i.e., is the
1151  * lock already locked, or is the joined thread already complete). If not, put
1152  * the action in a waiter list.
1153  *
1154  * @param curr is the ModelAction to check whether it is enabled.
1155  * @return a bool that indicates whether the action is enabled.
1156  */
1157 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1158         if (curr->is_lock()) {
1159                 std::mutex *lock = (std::mutex *)curr->get_location();
1160                 struct std::mutex_state *state = lock->get_state();
1161                 if (state->islocked) {
1162                         //Stick the action in the appropriate waiting queue
1163                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1164                         return false;
1165                 }
1166         } else if (curr->get_type() == THREAD_JOIN) {
1167                 Thread *blocking = (Thread *)curr->get_location();
1168                 if (!blocking->is_complete()) {
1169                         blocking->push_wait_list(curr);
1170                         return false;
1171                 }
1172         }
1173
1174         return true;
1175 }
1176
1177 /**
1178  * Stores the ModelAction for the current thread action.  Call this
1179  * immediately before switching from user- to system-context to pass
1180  * data between them.
1181  * @param act The ModelAction created by the user-thread action
1182  */
1183 void ModelChecker::set_current_action(ModelAction *act) {
1184         priv->current_action = act;
1185 }
1186
1187 /**
1188  * This is the heart of the model checker routine. It performs model-checking
1189  * actions corresponding to a given "current action." Among other processes, it
1190  * calculates reads-from relationships, updates synchronization clock vectors,
1191  * forms a memory_order constraints graph, and handles replay/backtrack
1192  * execution when running permutations of previously-observed executions.
1193  *
1194  * @param curr The current action to process
1195  * @return The ModelAction that is actually executed; may be different than
1196  * curr; may be NULL, if the current action is not enabled to run
1197  */
1198 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1199 {
1200         ASSERT(curr);
1201         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1202
1203         if (!check_action_enabled(curr)) {
1204                 /* Make the execution look like we chose to run this action
1205                  * much later, when a lock/join can succeed */
1206                 get_current_thread()->set_pending(curr);
1207                 scheduler->sleep(get_current_thread());
1208                 return NULL;
1209         }
1210
1211         bool newly_explored = initialize_curr_action(&curr);
1212
1213         DBG();
1214         if (DBG_ENABLED())
1215                 curr->print();
1216
1217         wake_up_sleeping_actions(curr);
1218
1219         /* Add the action to lists before any other model-checking tasks */
1220         if (!second_part_of_rmw)
1221                 add_action_to_lists(curr);
1222
1223         /* Build may_read_from set for newly-created actions */
1224         if (newly_explored && curr->is_read())
1225                 build_reads_from_past(curr);
1226
1227         /* Initialize work_queue with the "current action" work */
1228         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1229         while (!work_queue.empty() && !has_asserted()) {
1230                 WorkQueueEntry work = work_queue.front();
1231                 work_queue.pop_front();
1232
1233                 switch (work.type) {
1234                 case WORK_CHECK_CURR_ACTION: {
1235                         ModelAction *act = work.action;
1236                         bool update = false; /* update this location's release seq's */
1237                         bool update_all = false; /* update all release seq's */
1238
1239                         if (process_thread_action(curr))
1240                                 update_all = true;
1241
1242                         if (act->is_read() && process_read(act, second_part_of_rmw))
1243                                 update = true;
1244
1245                         if (act->is_write() && process_write(act))
1246                                 update = true;
1247
1248                         if (act->is_fence() && process_fence(act))
1249                                 update_all = true;
1250
1251                         if (act->is_mutex_op() && process_mutex(act))
1252                                 update_all = true;
1253
1254                         if (act->is_relseq_fixup())
1255                                 process_relseq_fixup(curr, &work_queue);
1256
1257                         if (update_all)
1258                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1259                         else if (update)
1260                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1261                         break;
1262                 }
1263                 case WORK_CHECK_RELEASE_SEQ:
1264                         resolve_release_sequences(work.location, &work_queue);
1265                         break;
1266                 case WORK_CHECK_MO_EDGES: {
1267                         /** @todo Complete verification of work_queue */
1268                         ModelAction *act = work.action;
1269                         bool updated = false;
1270
1271                         if (act->is_read()) {
1272                                 const ModelAction *rf = act->get_reads_from();
1273                                 if (rf != NULL && r_modification_order(act, rf))
1274                                         updated = true;
1275                         }
1276                         if (act->is_write()) {
1277                                 if (w_modification_order(act))
1278                                         updated = true;
1279                         }
1280                         mo_graph->commitChanges();
1281
1282                         if (updated)
1283                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1284                         break;
1285                 }
1286                 default:
1287                         ASSERT(false);
1288                         break;
1289                 }
1290         }
1291
1292         check_curr_backtracking(curr);
1293         set_backtracking(curr);
1294         return curr;
1295 }
1296
1297 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1298 {
1299         Node *currnode = curr->get_node();
1300         Node *parnode = currnode->get_parent();
1301
1302         if ((parnode && !parnode->backtrack_empty()) ||
1303                          !currnode->misc_empty() ||
1304                          !currnode->read_from_empty() ||
1305                          !currnode->future_value_empty() ||
1306                          !currnode->promise_empty() ||
1307                          !currnode->relseq_break_empty()) {
1308                 set_latest_backtrack(curr);
1309         }
1310 }
1311
1312 bool ModelChecker::promises_expired() const
1313 {
1314         for (unsigned int i = 0; i < promises->size(); i++) {
1315                 Promise *promise = (*promises)[i];
1316                 if (promise->get_expiration() < priv->used_sequence_numbers)
1317                         return true;
1318         }
1319         return false;
1320 }
1321
1322 /**
1323  * This is the strongest feasibility check available.
1324  * @return whether the current trace (partial or complete) must be a prefix of
1325  * a feasible trace.
1326  */
1327 bool ModelChecker::isfeasibleprefix() const
1328 {
1329         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1330 }
1331
1332 /**
1333  * Returns whether the current completed trace is feasible, except for pending
1334  * release sequences.
1335  */
1336 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1337 {
1338         if (DBG_ENABLED() && promises->size() != 0)
1339                 DEBUG("Infeasible: unrevolved promises\n");
1340
1341         return !is_infeasible() && promises->size() == 0;
1342 }
1343
1344 /**
1345  * Check if the current partial trace is infeasible. Does not check any
1346  * end-of-execution flags, which might rule out the execution. Thus, this is
1347  * useful only for ruling an execution as infeasible.
1348  * @return whether the current partial trace is infeasible.
1349  */
1350 bool ModelChecker::is_infeasible() const
1351 {
1352         if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1353                 DEBUG("Infeasible: RMW violation\n");
1354
1355         return mo_graph->checkForRMWViolation() || is_infeasible_ignoreRMW();
1356 }
1357
1358 /**
1359  * Check If the current partial trace is infeasible, while ignoring
1360  * infeasibility related to 2 RMW's reading from the same store. It does not
1361  * check end-of-execution feasibility.
1362  * @see ModelChecker::is_infeasible
1363  * @return whether the current partial trace is infeasible, ignoring multiple
1364  * RMWs reading from the same store.
1365  * */
1366 bool ModelChecker::is_infeasible_ignoreRMW() const
1367 {
1368         if (DBG_ENABLED()) {
1369                 if (mo_graph->checkForCycles())
1370                         DEBUG("Infeasible: modification order cycles\n");
1371                 if (priv->failed_promise)
1372                         DEBUG("Infeasible: failed promise\n");
1373                 if (priv->too_many_reads)
1374                         DEBUG("Infeasible: too many reads\n");
1375                 if (priv->bad_synchronization)
1376                         DEBUG("Infeasible: bad synchronization ordering\n");
1377                 if (promises_expired())
1378                         DEBUG("Infeasible: promises expired\n");
1379         }
1380         return mo_graph->checkForCycles() || priv->failed_promise ||
1381                 priv->too_many_reads || priv->bad_synchronization ||
1382                 promises_expired();
1383 }
1384
1385 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1386 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1387         ModelAction *lastread = get_last_action(act->get_tid());
1388         lastread->process_rmw(act);
1389         if (act->is_rmw() && lastread->get_reads_from() != NULL) {
1390                 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1391                 mo_graph->commitChanges();
1392         }
1393         return lastread;
1394 }
1395
1396 /**
1397  * Checks whether a thread has read from the same write for too many times
1398  * without seeing the effects of a later write.
1399  *
1400  * Basic idea:
1401  * 1) there must a different write that we could read from that would satisfy the modification order,
1402  * 2) we must have read from the same value in excess of maxreads times, and
1403  * 3) that other write must have been in the reads_from set for maxreads times.
1404  *
1405  * If so, we decide that the execution is no longer feasible.
1406  */
1407 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1408 {
1409         if (params.maxreads != 0) {
1410                 if (curr->get_node()->get_read_from_size() <= 1)
1411                         return;
1412                 //Must make sure that execution is currently feasible...  We could
1413                 //accidentally clear by rolling back
1414                 if (is_infeasible())
1415                         return;
1416                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1417                 int tid = id_to_int(curr->get_tid());
1418
1419                 /* Skip checks */
1420                 if ((int)thrd_lists->size() <= tid)
1421                         return;
1422                 action_list_t *list = &(*thrd_lists)[tid];
1423
1424                 action_list_t::reverse_iterator rit = list->rbegin();
1425                 /* Skip past curr */
1426                 for (; (*rit) != curr; rit++)
1427                         ;
1428                 /* go past curr now */
1429                 rit++;
1430
1431                 action_list_t::reverse_iterator ritcopy = rit;
1432                 //See if we have enough reads from the same value
1433                 int count = 0;
1434                 for (; count < params.maxreads; rit++, count++) {
1435                         if (rit == list->rend())
1436                                 return;
1437                         ModelAction *act = *rit;
1438                         if (!act->is_read())
1439                                 return;
1440
1441                         if (act->get_reads_from() != rf)
1442                                 return;
1443                         if (act->get_node()->get_read_from_size() <= 1)
1444                                 return;
1445                 }
1446                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1447                         /* Get write */
1448                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1449
1450                         /* Need a different write */
1451                         if (write == rf)
1452                                 continue;
1453
1454                         /* Test to see whether this is a feasible write to read from */
1455                         mo_graph->startChanges();
1456                         r_modification_order(curr, write);
1457                         bool feasiblereadfrom = !is_infeasible();
1458                         mo_graph->rollbackChanges();
1459
1460                         if (!feasiblereadfrom)
1461                                 continue;
1462                         rit = ritcopy;
1463
1464                         bool feasiblewrite = true;
1465                         //new we need to see if this write works for everyone
1466
1467                         for (int loop = count; loop > 0; loop--, rit++) {
1468                                 ModelAction *act = *rit;
1469                                 bool foundvalue = false;
1470                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1471                                         if (act->get_node()->get_read_from_at(j) == write) {
1472                                                 foundvalue = true;
1473                                                 break;
1474                                         }
1475                                 }
1476                                 if (!foundvalue) {
1477                                         feasiblewrite = false;
1478                                         break;
1479                                 }
1480                         }
1481                         if (feasiblewrite) {
1482                                 priv->too_many_reads = true;
1483                                 return;
1484                         }
1485                 }
1486         }
1487 }
1488
1489 /**
1490  * Updates the mo_graph with the constraints imposed from the current
1491  * read.
1492  *
1493  * Basic idea is the following: Go through each other thread and find
1494  * the lastest action that happened before our read.  Two cases:
1495  *
1496  * (1) The action is a write => that write must either occur before
1497  * the write we read from or be the write we read from.
1498  *
1499  * (2) The action is a read => the write that that action read from
1500  * must occur before the write we read from or be the same write.
1501  *
1502  * @param curr The current action. Must be a read.
1503  * @param rf The action that curr reads from. Must be a write.
1504  * @return True if modification order edges were added; false otherwise
1505  */
1506 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1507 {
1508         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1509         unsigned int i;
1510         bool added = false;
1511         ASSERT(curr->is_read());
1512
1513         /* Last SC fence in the current thread */
1514         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1515
1516         /* Iterate over all threads */
1517         for (i = 0; i < thrd_lists->size(); i++) {
1518                 /* Last SC fence in thread i */
1519                 ModelAction *last_sc_fence_thread_local = NULL;
1520                 if (int_to_id((int)i) != curr->get_tid())
1521                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1522
1523                 /* Last SC fence in thread i, before last SC fence in current thread */
1524                 ModelAction *last_sc_fence_thread_before = NULL;
1525                 if (last_sc_fence_local)
1526                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1527
1528                 /* Iterate over actions in thread, starting from most recent */
1529                 action_list_t *list = &(*thrd_lists)[i];
1530                 action_list_t::reverse_iterator rit;
1531                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1532                         ModelAction *act = *rit;
1533
1534                         if (act->is_write() && act != rf && act != curr) {
1535                                 /* C++, Section 29.3 statement 5 */
1536                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1537                                                 *act < *last_sc_fence_thread_local) {
1538                                         mo_graph->addEdge(act, rf);
1539                                         added = true;
1540                                         break;
1541                                 }
1542                                 /* C++, Section 29.3 statement 4 */
1543                                 else if (act->is_seqcst() && last_sc_fence_local &&
1544                                                 *act < *last_sc_fence_local) {
1545                                         mo_graph->addEdge(act, rf);
1546                                         added = true;
1547                                         break;
1548                                 }
1549                                 /* C++, Section 29.3 statement 6 */
1550                                 else if (last_sc_fence_thread_before &&
1551                                                 *act < *last_sc_fence_thread_before) {
1552                                         mo_graph->addEdge(act, rf);
1553                                         added = true;
1554                                         break;
1555                                 }
1556                         }
1557
1558                         /*
1559                          * Include at most one act per-thread that "happens
1560                          * before" curr. Don't consider reflexively.
1561                          */
1562                         if (act->happens_before(curr) && act != curr) {
1563                                 if (act->is_write()) {
1564                                         if (rf != act) {
1565                                                 mo_graph->addEdge(act, rf);
1566                                                 added = true;
1567                                         }
1568                                 } else {
1569                                         const ModelAction *prevreadfrom = act->get_reads_from();
1570                                         //if the previous read is unresolved, keep going...
1571                                         if (prevreadfrom == NULL)
1572                                                 continue;
1573
1574                                         if (rf != prevreadfrom) {
1575                                                 mo_graph->addEdge(prevreadfrom, rf);
1576                                                 added = true;
1577                                         }
1578                                 }
1579                                 break;
1580                         }
1581                 }
1582         }
1583
1584         return added;
1585 }
1586
1587 /** This method fixes up the modification order when we resolve a
1588  *  promises.  The basic problem is that actions that occur after the
1589  *  read curr could not property add items to the modification order
1590  *  for our read.
1591  *
1592  *  So for each thread, we find the earliest item that happens after
1593  *  the read curr.  This is the item we have to fix up with additional
1594  *  constraints.  If that action is write, we add a MO edge between
1595  *  the Action rf and that action.  If the action is a read, we add a
1596  *  MO edge between the Action rf, and whatever the read accessed.
1597  *
1598  * @param curr is the read ModelAction that we are fixing up MO edges for.
1599  * @param rf is the write ModelAction that curr reads from.
1600  *
1601  */
1602 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1603 {
1604         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1605         unsigned int i;
1606         ASSERT(curr->is_read());
1607
1608         /* Iterate over all threads */
1609         for (i = 0; i < thrd_lists->size(); i++) {
1610                 /* Iterate over actions in thread, starting from most recent */
1611                 action_list_t *list = &(*thrd_lists)[i];
1612                 action_list_t::reverse_iterator rit;
1613                 ModelAction *lastact = NULL;
1614
1615                 /* Find last action that happens after curr that is either not curr or a rmw */
1616                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1617                         ModelAction *act = *rit;
1618                         if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1619                                 lastact = act;
1620                         } else
1621                                 break;
1622                 }
1623
1624                         /* Include at most one act per-thread that "happens before" curr */
1625                 if (lastact != NULL) {
1626                         if (lastact == curr) {
1627                                 //Case 1: The resolved read is a RMW, and we need to make sure
1628                                 //that the write portion of the RMW mod order after rf
1629
1630                                 mo_graph->addEdge(rf, lastact);
1631                         } else if (lastact->is_read()) {
1632                                 //Case 2: The resolved read is a normal read and the next
1633                                 //operation is a read, and we need to make sure the value read
1634                                 //is mod ordered after rf
1635
1636                                 const ModelAction *postreadfrom = lastact->get_reads_from();
1637                                 if (postreadfrom != NULL && rf != postreadfrom)
1638                                         mo_graph->addEdge(rf, postreadfrom);
1639                         } else {
1640                                 //Case 3: The resolved read is a normal read and the next
1641                                 //operation is a write, and we need to make sure that the
1642                                 //write is mod ordered after rf
1643                                 if (lastact != rf)
1644                                         mo_graph->addEdge(rf, lastact);
1645                         }
1646                         break;
1647                 }
1648         }
1649 }
1650
1651 /**
1652  * Updates the mo_graph with the constraints imposed from the current write.
1653  *
1654  * Basic idea is the following: Go through each other thread and find
1655  * the lastest action that happened before our write.  Two cases:
1656  *
1657  * (1) The action is a write => that write must occur before
1658  * the current write
1659  *
1660  * (2) The action is a read => the write that that action read from
1661  * must occur before the current write.
1662  *
1663  * This method also handles two other issues:
1664  *
1665  * (I) Sequential Consistency: Making sure that if the current write is
1666  * seq_cst, that it occurs after the previous seq_cst write.
1667  *
1668  * (II) Sending the write back to non-synchronizing reads.
1669  *
1670  * @param curr The current action. Must be a write.
1671  * @return True if modification order edges were added; false otherwise
1672  */
1673 bool ModelChecker::w_modification_order(ModelAction *curr)
1674 {
1675         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1676         unsigned int i;
1677         bool added = false;
1678         ASSERT(curr->is_write());
1679
1680         if (curr->is_seqcst()) {
1681                 /* We have to at least see the last sequentially consistent write,
1682                          so we are initialized. */
1683                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1684                 if (last_seq_cst != NULL) {
1685                         mo_graph->addEdge(last_seq_cst, curr);
1686                         added = true;
1687                 }
1688         }
1689
1690         /* Last SC fence in the current thread */
1691         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1692
1693         /* Iterate over all threads */
1694         for (i = 0; i < thrd_lists->size(); i++) {
1695                 /* Last SC fence in thread i, before last SC fence in current thread */
1696                 ModelAction *last_sc_fence_thread_before = NULL;
1697                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1698                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1699
1700                 /* Iterate over actions in thread, starting from most recent */
1701                 action_list_t *list = &(*thrd_lists)[i];
1702                 action_list_t::reverse_iterator rit;
1703                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1704                         ModelAction *act = *rit;
1705                         if (act == curr) {
1706                                 /*
1707                                  * 1) If RMW and it actually read from something, then we
1708                                  * already have all relevant edges, so just skip to next
1709                                  * thread.
1710                                  *
1711                                  * 2) If RMW and it didn't read from anything, we should
1712                                  * whatever edge we can get to speed up convergence.
1713                                  *
1714                                  * 3) If normal write, we need to look at earlier actions, so
1715                                  * continue processing list.
1716                                  */
1717                                 if (curr->is_rmw()) {
1718                                         if (curr->get_reads_from() != NULL)
1719                                                 break;
1720                                         else
1721                                                 continue;
1722                                 } else
1723                                         continue;
1724                         }
1725
1726                         /* C++, Section 29.3 statement 7 */
1727                         if (last_sc_fence_thread_before && act->is_write() &&
1728                                         *act < *last_sc_fence_thread_before) {
1729                                 mo_graph->addEdge(act, curr);
1730                                 added = true;
1731                                 break;
1732                         }
1733
1734                         /*
1735                          * Include at most one act per-thread that "happens
1736                          * before" curr
1737                          */
1738                         if (act->happens_before(curr)) {
1739                                 /*
1740                                  * Note: if act is RMW, just add edge:
1741                                  *   act --mo--> curr
1742                                  * The following edge should be handled elsewhere:
1743                                  *   readfrom(act) --mo--> act
1744                                  */
1745                                 if (act->is_write())
1746                                         mo_graph->addEdge(act, curr);
1747                                 else if (act->is_read()) {
1748                                         //if previous read accessed a null, just keep going
1749                                         if (act->get_reads_from() == NULL)
1750                                                 continue;
1751                                         mo_graph->addEdge(act->get_reads_from(), curr);
1752                                 }
1753                                 added = true;
1754                                 break;
1755                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1756                                                      !act->same_thread(curr)) {
1757                                 /* We have an action that:
1758                                    (1) did not happen before us
1759                                    (2) is a read and we are a write
1760                                    (3) cannot synchronize with us
1761                                    (4) is in a different thread
1762                                    =>
1763                                    that read could potentially read from our write.  Note that
1764                                    these checks are overly conservative at this point, we'll
1765                                    do more checks before actually removing the
1766                                    pendingfuturevalue.
1767
1768                                  */
1769                                 if (thin_air_constraint_may_allow(curr, act)) {
1770                                         if (!is_infeasible() ||
1771                                                         (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && !is_infeasible_ignoreRMW())) {
1772                                                 struct PendingFutureValue pfv = {curr, act};
1773                                                 futurevalues->push_back(pfv);
1774                                         }
1775                                 }
1776                         }
1777                 }
1778         }
1779
1780         return added;
1781 }
1782
1783 /** Arbitrary reads from the future are not allowed.  Section 29.3
1784  * part 9 places some constraints.  This method checks one result of constraint
1785  * constraint.  Others require compiler support. */
1786 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1787 {
1788         if (!writer->is_rmw())
1789                 return true;
1790
1791         if (!reader->is_rmw())
1792                 return true;
1793
1794         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1795                 if (search == reader)
1796                         return false;
1797                 if (search->get_tid() == reader->get_tid() &&
1798                                 search->happens_before(reader))
1799                         break;
1800         }
1801
1802         return true;
1803 }
1804
1805 /**
1806  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1807  * some constraints. This method checks one the following constraint (others
1808  * require compiler support):
1809  *
1810  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1811  */
1812 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1813 {
1814         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1815         unsigned int i;
1816         /* Iterate over all threads */
1817         for (i = 0; i < thrd_lists->size(); i++) {
1818                 const ModelAction *write_after_read = NULL;
1819
1820                 /* Iterate over actions in thread, starting from most recent */
1821                 action_list_t *list = &(*thrd_lists)[i];
1822                 action_list_t::reverse_iterator rit;
1823                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1824                         ModelAction *act = *rit;
1825
1826                         /* Don't disallow due to act == reader */
1827                         if (!reader->happens_before(act) || reader == act)
1828                                 break;
1829                         else if (act->is_write())
1830                                 write_after_read = act;
1831                         else if (act->is_read() && act->get_reads_from() != NULL)
1832                                 write_after_read = act->get_reads_from();
1833                 }
1834
1835                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1836                         return false;
1837         }
1838         return true;
1839 }
1840
1841 /**
1842  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1843  * The ModelAction under consideration is expected to be taking part in
1844  * release/acquire synchronization as an object of the "reads from" relation.
1845  * Note that this can only provide release sequence support for RMW chains
1846  * which do not read from the future, as those actions cannot be traced until
1847  * their "promise" is fulfilled. Similarly, we may not even establish the
1848  * presence of a release sequence with certainty, as some modification order
1849  * constraints may be decided further in the future. Thus, this function
1850  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1851  * and a boolean representing certainty.
1852  *
1853  * @param rf The action that might be part of a release sequence. Must be a
1854  * write.
1855  * @param release_heads A pass-by-reference style return parameter. After
1856  * execution of this function, release_heads will contain the heads of all the
1857  * relevant release sequences, if any exists with certainty
1858  * @param pending A pass-by-reference style return parameter which is only used
1859  * when returning false (i.e., uncertain). Returns most information regarding
1860  * an uncertain release sequence, including any write operations that might
1861  * break the sequence.
1862  * @return true, if the ModelChecker is certain that release_heads is complete;
1863  * false otherwise
1864  */
1865 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1866                 rel_heads_list_t *release_heads,
1867                 struct release_seq *pending) const
1868 {
1869         /* Only check for release sequences if there are no cycles */
1870         if (mo_graph->checkForCycles())
1871                 return false;
1872
1873         while (rf) {
1874                 ASSERT(rf->is_write());
1875
1876                 if (rf->is_release())
1877                         release_heads->push_back(rf);
1878                 else if (rf->get_last_fence_release())
1879                         release_heads->push_back(rf->get_last_fence_release());
1880                 if (!rf->is_rmw())
1881                         break; /* End of RMW chain */
1882
1883                 /** @todo Need to be smarter here...  In the linux lock
1884                  * example, this will run to the beginning of the program for
1885                  * every acquire. */
1886                 /** @todo The way to be smarter here is to keep going until 1
1887                  * thread has a release preceded by an acquire and you've seen
1888                  *       both. */
1889
1890                 /* acq_rel RMW is a sufficient stopping condition */
1891                 if (rf->is_acquire() && rf->is_release())
1892                         return true; /* complete */
1893
1894                 rf = rf->get_reads_from();
1895         };
1896         if (!rf) {
1897                 /* read from future: need to settle this later */
1898                 pending->rf = NULL;
1899                 return false; /* incomplete */
1900         }
1901
1902         if (rf->is_release())
1903                 return true; /* complete */
1904
1905         /* else relaxed write
1906          * - check for fence-release in the same thread (29.8, stmt. 3)
1907          * - check modification order for contiguous subsequence
1908          *   -> rf must be same thread as release */
1909
1910         const ModelAction *fence_release = rf->get_last_fence_release();
1911         /* Synchronize with a fence-release unconditionally; we don't need to
1912          * find any more "contiguous subsequence..." for it */
1913         if (fence_release)
1914                 release_heads->push_back(fence_release);
1915
1916         int tid = id_to_int(rf->get_tid());
1917         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1918         action_list_t *list = &(*thrd_lists)[tid];
1919         action_list_t::const_reverse_iterator rit;
1920
1921         /* Find rf in the thread list */
1922         rit = std::find(list->rbegin(), list->rend(), rf);
1923         ASSERT(rit != list->rend());
1924
1925         /* Find the last {write,fence}-release */
1926         for (; rit != list->rend(); rit++) {
1927                 if (fence_release && *(*rit) < *fence_release)
1928                         break;
1929                 if ((*rit)->is_release())
1930                         break;
1931         }
1932         if (rit == list->rend()) {
1933                 /* No write-release in this thread */
1934                 return true; /* complete */
1935         } else if (fence_release && *(*rit) < *fence_release) {
1936                 /* The fence-release is more recent (and so, "stronger") than
1937                  * the most recent write-release */
1938                 return true; /* complete */
1939         } /* else, need to establish contiguous release sequence */
1940         ModelAction *release = *rit;
1941
1942         ASSERT(rf->same_thread(release));
1943
1944         pending->writes.clear();
1945
1946         bool certain = true;
1947         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1948                 if (id_to_int(rf->get_tid()) == (int)i)
1949                         continue;
1950                 list = &(*thrd_lists)[i];
1951
1952                 /* Can we ensure no future writes from this thread may break
1953                  * the release seq? */
1954                 bool future_ordered = false;
1955
1956                 ModelAction *last = get_last_action(int_to_id(i));
1957                 Thread *th = get_thread(int_to_id(i));
1958                 if ((last && rf->happens_before(last)) ||
1959                                 !is_enabled(th) ||
1960                                 th->is_complete())
1961                         future_ordered = true;
1962
1963                 ASSERT(!th->is_model_thread() || future_ordered);
1964
1965                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1966                         const ModelAction *act = *rit;
1967                         /* Reach synchronization -> this thread is complete */
1968                         if (act->happens_before(release))
1969                                 break;
1970                         if (rf->happens_before(act)) {
1971                                 future_ordered = true;
1972                                 continue;
1973                         }
1974
1975                         /* Only non-RMW writes can break release sequences */
1976                         if (!act->is_write() || act->is_rmw())
1977                                 continue;
1978
1979                         /* Check modification order */
1980                         if (mo_graph->checkReachable(rf, act)) {
1981                                 /* rf --mo--> act */
1982                                 future_ordered = true;
1983                                 continue;
1984                         }
1985                         if (mo_graph->checkReachable(act, release))
1986                                 /* act --mo--> release */
1987                                 break;
1988                         if (mo_graph->checkReachable(release, act) &&
1989                                       mo_graph->checkReachable(act, rf)) {
1990                                 /* release --mo-> act --mo--> rf */
1991                                 return true; /* complete */
1992                         }
1993                         /* act may break release sequence */
1994                         pending->writes.push_back(act);
1995                         certain = false;
1996                 }
1997                 if (!future_ordered)
1998                         certain = false; /* This thread is uncertain */
1999         }
2000
2001         if (certain) {
2002                 release_heads->push_back(release);
2003                 pending->writes.clear();
2004         } else {
2005                 pending->release = release;
2006                 pending->rf = rf;
2007         }
2008         return certain;
2009 }
2010
2011 /**
2012  * An interface for getting the release sequence head(s) with which a
2013  * given ModelAction must synchronize. This function only returns a non-empty
2014  * result when it can locate a release sequence head with certainty. Otherwise,
2015  * it may mark the internal state of the ModelChecker so that it will handle
2016  * the release sequence at a later time, causing @a acquire to update its
2017  * synchronization at some later point in execution.
2018  *
2019  * @param acquire The 'acquire' action that may synchronize with a release
2020  * sequence
2021  * @param read The read action that may read from a release sequence; this may
2022  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2023  * when 'acquire' is a fence-acquire)
2024  * @param release_heads A pass-by-reference return parameter. Will be filled
2025  * with the head(s) of the release sequence(s), if they exists with certainty.
2026  * @see ModelChecker::release_seq_heads
2027  */
2028 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2029                 ModelAction *read, rel_heads_list_t *release_heads)
2030 {
2031         const ModelAction *rf = read->get_reads_from();
2032         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2033         sequence->acquire = acquire;
2034         sequence->read = read;
2035
2036         if (!release_seq_heads(rf, release_heads, sequence)) {
2037                 /* add act to 'lazy checking' list */
2038                 pending_rel_seqs->push_back(sequence);
2039         } else {
2040                 snapshot_free(sequence);
2041         }
2042 }
2043
2044 /**
2045  * Attempt to resolve all stashed operations that might synchronize with a
2046  * release sequence for a given location. This implements the "lazy" portion of
2047  * determining whether or not a release sequence was contiguous, since not all
2048  * modification order information is present at the time an action occurs.
2049  *
2050  * @param location The location/object that should be checked for release
2051  * sequence resolutions. A NULL value means to check all locations.
2052  * @param work_queue The work queue to which to add work items as they are
2053  * generated
2054  * @return True if any updates occurred (new synchronization, new mo_graph
2055  * edges)
2056  */
2057 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2058 {
2059         bool updated = false;
2060         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2061         while (it != pending_rel_seqs->end()) {
2062                 struct release_seq *pending = *it;
2063                 ModelAction *acquire = pending->acquire;
2064                 const ModelAction *read = pending->read;
2065
2066                 /* Only resolve sequences on the given location, if provided */
2067                 if (location && read->get_location() != location) {
2068                         it++;
2069                         continue;
2070                 }
2071
2072                 const ModelAction *rf = read->get_reads_from();
2073                 rel_heads_list_t release_heads;
2074                 bool complete;
2075                 complete = release_seq_heads(rf, &release_heads, pending);
2076                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2077                         if (!acquire->has_synchronized_with(release_heads[i])) {
2078                                 if (acquire->synchronize_with(release_heads[i]))
2079                                         updated = true;
2080                                 else
2081                                         set_bad_synchronization();
2082                         }
2083                 }
2084
2085                 if (updated) {
2086                         /* Re-check all pending release sequences */
2087                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2088                         /* Re-check read-acquire for mo_graph edges */
2089                         if (acquire->is_read())
2090                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2091
2092                         /* propagate synchronization to later actions */
2093                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2094                         for (; (*rit) != acquire; rit++) {
2095                                 ModelAction *propagate = *rit;
2096                                 if (acquire->happens_before(propagate)) {
2097                                         propagate->synchronize_with(acquire);
2098                                         /* Re-check 'propagate' for mo_graph edges */
2099                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2100                                 }
2101                         }
2102                 }
2103                 if (complete) {
2104                         it = pending_rel_seqs->erase(it);
2105                         snapshot_free(pending);
2106                 } else {
2107                         it++;
2108                 }
2109         }
2110
2111         // If we resolved promises or data races, see if we have realized a data race.
2112         checkDataRaces();
2113
2114         return updated;
2115 }
2116
2117 /**
2118  * Performs various bookkeeping operations for the current ModelAction. For
2119  * instance, adds action to the per-object, per-thread action vector and to the
2120  * action trace list of all thread actions.
2121  *
2122  * @param act is the ModelAction to add.
2123  */
2124 void ModelChecker::add_action_to_lists(ModelAction *act)
2125 {
2126         int tid = id_to_int(act->get_tid());
2127         ModelAction *uninit = NULL;
2128         int uninit_id = -1;
2129         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2130         if (list->empty() && act->is_atomic_var()) {
2131                 uninit = new_uninitialized_action(act->get_location());
2132                 uninit_id = id_to_int(uninit->get_tid());
2133                 list->push_back(uninit);
2134         }
2135         list->push_back(act);
2136
2137         action_trace->push_back(act);
2138         if (uninit)
2139                 action_trace->push_front(uninit);
2140
2141         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2142         if (tid >= (int)vec->size())
2143                 vec->resize(priv->next_thread_id);
2144         (*vec)[tid].push_back(act);
2145         if (uninit)
2146                 (*vec)[uninit_id].push_front(uninit);
2147
2148         if ((int)thrd_last_action->size() <= tid)
2149                 thrd_last_action->resize(get_num_threads());
2150         (*thrd_last_action)[tid] = act;
2151         if (uninit)
2152                 (*thrd_last_action)[uninit_id] = uninit;
2153
2154         if (act->is_fence() && act->is_release()) {
2155                 if ((int)thrd_last_fence_release->size() <= tid)
2156                         thrd_last_fence_release->resize(get_num_threads());
2157                 (*thrd_last_fence_release)[tid] = act;
2158         }
2159
2160         if (act->is_wait()) {
2161                 void *mutex_loc = (void *) act->get_value();
2162                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2163
2164                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2165                 if (tid >= (int)vec->size())
2166                         vec->resize(priv->next_thread_id);
2167                 (*vec)[tid].push_back(act);
2168         }
2169 }
2170
2171 /**
2172  * @brief Get the last action performed by a particular Thread
2173  * @param tid The thread ID of the Thread in question
2174  * @return The last action in the thread
2175  */
2176 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2177 {
2178         int threadid = id_to_int(tid);
2179         if (threadid < (int)thrd_last_action->size())
2180                 return (*thrd_last_action)[id_to_int(tid)];
2181         else
2182                 return NULL;
2183 }
2184
2185 /**
2186  * @brief Get the last fence release performed by a particular Thread
2187  * @param tid The thread ID of the Thread in question
2188  * @return The last fence release in the thread, if one exists; NULL otherwise
2189  */
2190 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2191 {
2192         int threadid = id_to_int(tid);
2193         if (threadid < (int)thrd_last_fence_release->size())
2194                 return (*thrd_last_fence_release)[id_to_int(tid)];
2195         else
2196                 return NULL;
2197 }
2198
2199 /**
2200  * Gets the last memory_order_seq_cst write (in the total global sequence)
2201  * performed on a particular object (i.e., memory location), not including the
2202  * current action.
2203  * @param curr The current ModelAction; also denotes the object location to
2204  * check
2205  * @return The last seq_cst write
2206  */
2207 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2208 {
2209         void *location = curr->get_location();
2210         action_list_t *list = get_safe_ptr_action(obj_map, location);
2211         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2212         action_list_t::reverse_iterator rit;
2213         for (rit = list->rbegin(); rit != list->rend(); rit++)
2214                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2215                         return *rit;
2216         return NULL;
2217 }
2218
2219 /**
2220  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2221  * performed in a particular thread, prior to a particular fence.
2222  * @param tid The ID of the thread to check
2223  * @param before_fence The fence from which to begin the search; if NULL, then
2224  * search for the most recent fence in the thread.
2225  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2226  */
2227 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2228 {
2229         /* All fences should have NULL location */
2230         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2231         action_list_t::reverse_iterator rit = list->rbegin();
2232
2233         if (before_fence) {
2234                 for (; rit != list->rend(); rit++)
2235                         if (*rit == before_fence)
2236                                 break;
2237
2238                 ASSERT(*rit == before_fence);
2239                 rit++;
2240         }
2241
2242         for (; rit != list->rend(); rit++)
2243                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2244                         return *rit;
2245         return NULL;
2246 }
2247
2248 /**
2249  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2250  * location). This function identifies the mutex according to the current
2251  * action, which is presumed to perform on the same mutex.
2252  * @param curr The current ModelAction; also denotes the object location to
2253  * check
2254  * @return The last unlock operation
2255  */
2256 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2257 {
2258         void *location = curr->get_location();
2259         action_list_t *list = get_safe_ptr_action(obj_map, location);
2260         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2261         action_list_t::reverse_iterator rit;
2262         for (rit = list->rbegin(); rit != list->rend(); rit++)
2263                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2264                         return *rit;
2265         return NULL;
2266 }
2267
2268 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2269 {
2270         ModelAction *parent = get_last_action(tid);
2271         if (!parent)
2272                 parent = get_thread(tid)->get_creation();
2273         return parent;
2274 }
2275
2276 /**
2277  * Returns the clock vector for a given thread.
2278  * @param tid The thread whose clock vector we want
2279  * @return Desired clock vector
2280  */
2281 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2282 {
2283         return get_parent_action(tid)->get_cv();
2284 }
2285
2286 /**
2287  * Resolve a set of Promises with a current write. The set is provided in the
2288  * Node corresponding to @a write.
2289  * @param write The ModelAction that is fulfilling Promises
2290  * @return True if promises were resolved; false otherwise
2291  */
2292 bool ModelChecker::resolve_promises(ModelAction *write)
2293 {
2294         bool resolved = false;
2295         std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2296
2297         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2298                 Promise *promise = (*promises)[promise_index];
2299                 if (write->get_node()->get_promise(i)) {
2300                         ModelAction *read = promise->get_action();
2301                         if (read->is_rmw()) {
2302                                 mo_graph->addRMWEdge(write, read);
2303                         }
2304                         read_from(read, write);
2305                         //First fix up the modification order for actions that happened
2306                         //before the read
2307                         r_modification_order(read, write);
2308                         //Next fix up the modification order for actions that happened
2309                         //after the read.
2310                         post_r_modification_order(read, write);
2311                         //Make sure the promise's value matches the write's value
2312                         ASSERT(promise->get_value() == write->get_value());
2313                         delete(promise);
2314
2315                         promises->erase(promises->begin() + promise_index);
2316                         threads_to_check.push_back(read->get_tid());
2317
2318                         resolved = true;
2319                 } else
2320                         promise_index++;
2321         }
2322
2323         //Check whether reading these writes has made threads unable to
2324         //resolve promises
2325
2326         for (unsigned int i = 0; i < threads_to_check.size(); i++)
2327                 mo_check_promises(threads_to_check[i], write);
2328
2329         return resolved;
2330 }
2331
2332 /**
2333  * Compute the set of promises that could potentially be satisfied by this
2334  * action. Note that the set computation actually appears in the Node, not in
2335  * ModelChecker.
2336  * @param curr The ModelAction that may satisfy promises
2337  */
2338 void ModelChecker::compute_promises(ModelAction *curr)
2339 {
2340         for (unsigned int i = 0; i < promises->size(); i++) {
2341                 Promise *promise = (*promises)[i];
2342                 const ModelAction *act = promise->get_action();
2343                 if (!act->happens_before(curr) &&
2344                                 act->is_read() &&
2345                                 !act->could_synchronize_with(curr) &&
2346                                 !act->same_thread(curr) &&
2347                                 act->get_location() == curr->get_location() &&
2348                                 promise->get_value() == curr->get_value()) {
2349                         curr->get_node()->set_promise(i, act->is_rmw());
2350                 }
2351         }
2352 }
2353
2354 /** Checks promises in response to change in ClockVector Threads. */
2355 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2356 {
2357         for (unsigned int i = 0; i < promises->size(); i++) {
2358                 Promise *promise = (*promises)[i];
2359                 const ModelAction *act = promise->get_action();
2360                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2361                                 merge_cv->synchronized_since(act)) {
2362                         if (promise->increment_threads(tid)) {
2363                                 //Promise has failed
2364                                 priv->failed_promise = true;
2365                                 return;
2366                         }
2367                 }
2368         }
2369 }
2370
2371 void ModelChecker::check_promises_thread_disabled() {
2372         for (unsigned int i = 0; i < promises->size(); i++) {
2373                 Promise *promise = (*promises)[i];
2374                 if (promise->check_promise()) {
2375                         priv->failed_promise = true;
2376                         return;
2377                 }
2378         }
2379 }
2380
2381 /** Checks promises in response to addition to modification order for threads.
2382  * Definitions:
2383  * pthread is the thread that performed the read that created the promise
2384  *
2385  * pread is the read that created the promise
2386  *
2387  * pwrite is either the first write to same location as pread by
2388  * pthread that is sequenced after pread or the value read by the
2389  * first read to the same lcoation as pread by pthread that is
2390  * sequenced after pread..
2391  *
2392  *      1. If tid=pthread, then we check what other threads are reachable
2393  * through the mode order starting with pwrite.  Those threads cannot
2394  * perform a write that will resolve the promise due to modification
2395  * order constraints.
2396  *
2397  * 2. If the tid is not pthread, we check whether pwrite can reach the
2398  * action write through the modification order.  If so, that thread
2399  * cannot perform a future write that will resolve the promise due to
2400  * modificatin order constraints.
2401  *
2402  *      @param tid The thread that either read from the model action
2403  *      write, or actually did the model action write.
2404  *
2405  *      @param write The ModelAction representing the relevant write.
2406  */
2407 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write)
2408 {
2409         void *location = write->get_location();
2410         for (unsigned int i = 0; i < promises->size(); i++) {
2411                 Promise *promise = (*promises)[i];
2412                 const ModelAction *act = promise->get_action();
2413
2414                 //Is this promise on the same location?
2415                 if (act->get_location() != location)
2416                         continue;
2417
2418                 //same thread as the promise
2419                 if (act->get_tid() == tid) {
2420
2421                         //do we have a pwrite for the promise, if not, set it
2422                         if (promise->get_write() == NULL) {
2423                                 promise->set_write(write);
2424                                 //The pwrite cannot happen before the promise
2425                                 if (write->happens_before(act) && (write != act)) {
2426                                         priv->failed_promise = true;
2427                                         return;
2428                                 }
2429                         }
2430                         if (mo_graph->checkPromise(write, promise)) {
2431                                 priv->failed_promise = true;
2432                                 return;
2433                         }
2434                 }
2435
2436                 //Don't do any lookups twice for the same thread
2437                 if (promise->has_sync_thread(tid))
2438                         continue;
2439
2440                 if (promise->get_write() && mo_graph->checkReachable(promise->get_write(), write)) {
2441                         if (promise->increment_threads(tid)) {
2442                                 priv->failed_promise = true;
2443                                 return;
2444                         }
2445                 }
2446         }
2447 }
2448
2449 /**
2450  * Compute the set of writes that may break the current pending release
2451  * sequence. This information is extracted from previou release sequence
2452  * calculations.
2453  *
2454  * @param curr The current ModelAction. Must be a release sequence fixup
2455  * action.
2456  */
2457 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2458 {
2459         if (pending_rel_seqs->empty())
2460                 return;
2461
2462         struct release_seq *pending = pending_rel_seqs->back();
2463         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2464                 const ModelAction *write = pending->writes[i];
2465                 curr->get_node()->add_relseq_break(write);
2466         }
2467
2468         /* NULL means don't break the sequence; just synchronize */
2469         curr->get_node()->add_relseq_break(NULL);
2470 }
2471
2472 /**
2473  * Build up an initial set of all past writes that this 'read' action may read
2474  * from. This set is determined by the clock vector's "happens before"
2475  * relationship.
2476  * @param curr is the current ModelAction that we are exploring; it must be a
2477  * 'read' operation.
2478  */
2479 void ModelChecker::build_reads_from_past(ModelAction *curr)
2480 {
2481         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2482         unsigned int i;
2483         ASSERT(curr->is_read());
2484
2485         ModelAction *last_sc_write = NULL;
2486
2487         if (curr->is_seqcst())
2488                 last_sc_write = get_last_seq_cst_write(curr);
2489
2490         /* Iterate over all threads */
2491         for (i = 0; i < thrd_lists->size(); i++) {
2492                 /* Iterate over actions in thread, starting from most recent */
2493                 action_list_t *list = &(*thrd_lists)[i];
2494                 action_list_t::reverse_iterator rit;
2495                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2496                         ModelAction *act = *rit;
2497
2498                         /* Only consider 'write' actions */
2499                         if (!act->is_write() || act == curr)
2500                                 continue;
2501
2502                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2503                         bool allow_read = true;
2504
2505                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2506                                 allow_read = false;
2507                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2508                                 allow_read = false;
2509
2510                         if (allow_read)
2511                                 curr->get_node()->add_read_from(act);
2512
2513                         /* Include at most one act per-thread that "happens before" curr */
2514                         if (act->happens_before(curr))
2515                                 break;
2516                 }
2517         }
2518
2519         if (DBG_ENABLED()) {
2520                 model_print("Reached read action:\n");
2521                 curr->print();
2522                 model_print("Printing may_read_from\n");
2523                 curr->get_node()->print_may_read_from();
2524                 model_print("End printing may_read_from\n");
2525         }
2526 }
2527
2528 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2529 {
2530         while (true) {
2531                 /* UNINIT actions don't have a Node, and they never sleep */
2532                 if (write->is_uninitialized())
2533                         return true;
2534                 Node *prevnode = write->get_node()->get_parent();
2535
2536                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2537                 if (write->is_release() && thread_sleep)
2538                         return true;
2539                 if (!write->is_rmw()) {
2540                         return false;
2541                 }
2542                 if (write->get_reads_from() == NULL)
2543                         return true;
2544                 write = write->get_reads_from();
2545         }
2546 }
2547
2548 /**
2549  * @brief Create a new action representing an uninitialized atomic
2550  * @param location The memory location of the atomic object
2551  * @return A pointer to a new ModelAction
2552  */
2553 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2554 {
2555         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2556         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2557         act->create_cv(NULL);
2558         return act;
2559 }
2560
2561 static void print_list(action_list_t *list, int exec_num = -1)
2562 {
2563         action_list_t::iterator it;
2564
2565         model_print("---------------------------------------------------------------------\n");
2566         if (exec_num >= 0)
2567                 model_print("Execution %d:\n", exec_num);
2568
2569         unsigned int hash = 0;
2570
2571         for (it = list->begin(); it != list->end(); it++) {
2572                 (*it)->print();
2573                 hash = hash^(hash<<3)^((*it)->hash());
2574         }
2575         model_print("HASH %u\n", hash);
2576         model_print("---------------------------------------------------------------------\n");
2577 }
2578
2579 #if SUPPORT_MOD_ORDER_DUMP
2580 void ModelChecker::dumpGraph(char *filename) const
2581 {
2582         char buffer[200];
2583         sprintf(buffer, "%s.dot", filename);
2584         FILE *file = fopen(buffer, "w");
2585         fprintf(file, "digraph %s {\n", filename);
2586         mo_graph->dumpNodes(file);
2587         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2588
2589         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2590                 ModelAction *action = *it;
2591                 if (action->is_read()) {
2592                         fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2593                         if (action->get_reads_from() != NULL)
2594                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2595                 }
2596                 if (thread_array[action->get_tid()] != NULL) {
2597                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2598                 }
2599
2600                 thread_array[action->get_tid()] = action;
2601         }
2602         fprintf(file, "}\n");
2603         model_free(thread_array);
2604         fclose(file);
2605 }
2606 #endif
2607
2608 /** @brief Prints an execution trace summary. */
2609 void ModelChecker::print_summary() const
2610 {
2611 #if SUPPORT_MOD_ORDER_DUMP
2612         scheduler->print();
2613         char buffername[100];
2614         sprintf(buffername, "exec%04u", stats.num_total);
2615         mo_graph->dumpGraphToFile(buffername);
2616         sprintf(buffername, "graph%04u", stats.num_total);
2617         dumpGraph(buffername);
2618 #endif
2619
2620         if (!isfeasibleprefix())
2621                 model_print("INFEASIBLE EXECUTION!\n");
2622         print_list(action_trace, stats.num_total);
2623         model_print("\n");
2624 }
2625
2626 /**
2627  * Add a Thread to the system for the first time. Should only be called once
2628  * per thread.
2629  * @param t The Thread to add
2630  */
2631 void ModelChecker::add_thread(Thread *t)
2632 {
2633         thread_map->put(id_to_int(t->get_id()), t);
2634         scheduler->add_thread(t);
2635 }
2636
2637 /**
2638  * Removes a thread from the scheduler.
2639  * @param the thread to remove.
2640  */
2641 void ModelChecker::remove_thread(Thread *t)
2642 {
2643         scheduler->remove_thread(t);
2644 }
2645
2646 /**
2647  * @brief Get a Thread reference by its ID
2648  * @param tid The Thread's ID
2649  * @return A Thread reference
2650  */
2651 Thread * ModelChecker::get_thread(thread_id_t tid) const
2652 {
2653         return thread_map->get(id_to_int(tid));
2654 }
2655
2656 /**
2657  * @brief Get a reference to the Thread in which a ModelAction was executed
2658  * @param act The ModelAction
2659  * @return A Thread reference
2660  */
2661 Thread * ModelChecker::get_thread(ModelAction *act) const
2662 {
2663         return get_thread(act->get_tid());
2664 }
2665
2666 /**
2667  * @brief Check if a Thread is currently enabled
2668  * @param t The Thread to check
2669  * @return True if the Thread is currently enabled
2670  */
2671 bool ModelChecker::is_enabled(Thread *t) const
2672 {
2673         return scheduler->is_enabled(t);
2674 }
2675
2676 /**
2677  * @brief Check if a Thread is currently enabled
2678  * @param tid The ID of the Thread to check
2679  * @return True if the Thread is currently enabled
2680  */
2681 bool ModelChecker::is_enabled(thread_id_t tid) const
2682 {
2683         return scheduler->is_enabled(tid);
2684 }
2685
2686 /**
2687  * Switch from a user-context to the "master thread" context (a.k.a. system
2688  * context). This switch is made with the intention of exploring a particular
2689  * model-checking action (described by a ModelAction object). Must be called
2690  * from a user-thread context.
2691  *
2692  * @param act The current action that will be explored. May be NULL only if
2693  * trace is exiting via an assertion (see ModelChecker::set_assert and
2694  * ModelChecker::has_asserted).
2695  * @return Return the value returned by the current action
2696  */
2697 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2698 {
2699         DBG();
2700         Thread *old = thread_current();
2701         set_current_action(act);
2702         old->set_state(THREAD_READY);
2703         if (Thread::swap(old, &system_context) < 0) {
2704                 perror("swap threads");
2705                 exit(EXIT_FAILURE);
2706         }
2707         return old->get_return_value();
2708 }
2709
2710 /**
2711  * Takes the next step in the execution, if possible.
2712  * @param curr The current step to take
2713  * @return Returns true (success) if a step was taken and false otherwise.
2714  */
2715 bool ModelChecker::take_step(ModelAction *curr)
2716 {
2717         if (has_asserted())
2718                 return false;
2719
2720         Thread *curr_thrd = get_thread(curr);
2721         ASSERT(curr_thrd->get_state() == THREAD_READY);
2722
2723         curr = check_current_action(curr);
2724
2725         /* Infeasible -> don't take any more steps */
2726         if (is_infeasible())
2727                 return false;
2728         else if (isfeasibleprefix() && have_bug_reports()) {
2729                 set_assert();
2730                 return false;
2731         }
2732
2733         if (params.bound != 0)
2734                 if (priv->used_sequence_numbers > params.bound)
2735                         return false;
2736
2737         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2738                 scheduler->remove_thread(curr_thrd);
2739
2740         Thread *next_thrd = get_next_thread(curr);
2741         next_thrd = scheduler->next_thread(next_thrd);
2742
2743         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2744                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2745
2746         /*
2747          * Launch end-of-execution release sequence fixups only when there are:
2748          *
2749          * (1) no more user threads to run (or when execution replay chooses
2750          *     the 'model_thread')
2751          * (2) pending release sequences
2752          * (3) pending assertions (i.e., data races)
2753          * (4) no pending promises
2754          */
2755         if (!pending_rel_seqs->empty() && (!next_thrd || next_thrd->is_model_thread()) &&
2756                         is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2757                 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2758                                 pending_rel_seqs->size());
2759                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2760                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2761                                 model_thread);
2762                 set_current_action(fixup);
2763                 return true;
2764         }
2765
2766         /* next_thrd == NULL -> don't take any more steps */
2767         if (!next_thrd)
2768                 return false;
2769
2770         next_thrd->set_state(THREAD_RUNNING);
2771
2772         if (next_thrd->get_pending() != NULL) {
2773                 /* restart a pending action */
2774                 set_current_action(next_thrd->get_pending());
2775                 next_thrd->set_pending(NULL);
2776                 next_thrd->set_state(THREAD_READY);
2777                 return true;
2778         }
2779
2780         /* Return false only if swap fails with an error */
2781         return (Thread::swap(&system_context, next_thrd) == 0);
2782 }
2783
2784 /** Wrapper to run the user's main function, with appropriate arguments */
2785 void user_main_wrapper(void *)
2786 {
2787         user_main(model->params.argc, model->params.argv);
2788 }
2789
2790 /** @brief Run ModelChecker for the user program */
2791 void ModelChecker::run()
2792 {
2793         do {
2794                 thrd_t user_thread;
2795                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2796
2797                 add_thread(t);
2798
2799                 /* Run user thread up to its first action */
2800                 scheduler->next_thread(t);
2801                 Thread::swap(&system_context, t);
2802
2803                 /* Wait for all threads to complete */
2804                 while (take_step(priv->current_action));
2805         } while (next_execution());
2806
2807         print_stats();
2808 }