model: stop thread-trace search once edge is added
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4
5 #include "model.h"
6 #include "action.h"
7 #include "nodestack.h"
8 #include "schedule.h"
9 #include "snapshot-interface.h"
10 #include "common.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
13 #include "promise.h"
14 #include "datarace.h"
15 #include "threads-model.h"
16 #include "output.h"
17
18 #define INITIAL_THREAD_ID       0
19
20 ModelChecker *model;
21
22 struct bug_message {
23         bug_message(const char *str) {
24                 const char *fmt = "  [BUG] %s\n";
25                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
26                 sprintf(msg, fmt, str);
27         }
28         ~bug_message() { if (msg) snapshot_free(msg); }
29
30         char *msg;
31         void print() { model_print("%s", msg); }
32
33         SNAPSHOTALLOC
34 };
35
36 /**
37  * Structure for holding small ModelChecker members that should be snapshotted
38  */
39 struct model_snapshot_members {
40         model_snapshot_members() :
41                 current_action(NULL),
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 nextThread(NULL),
46                 next_backtrack(NULL),
47                 bugs(),
48                 stats(),
49                 failed_promise(false),
50                 too_many_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         ModelAction *current_action;
62         unsigned int next_thread_id;
63         modelclock_t used_sequence_numbers;
64         Thread *nextThread;
65         ModelAction *next_backtrack;
66         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
67         struct execution_stats stats;
68         bool failed_promise;
69         bool too_many_reads;
70         /** @brief Incorrectly-ordered synchronization was made */
71         bool bad_synchronization;
72         bool asserted;
73
74         SNAPSHOTALLOC
75 };
76
77 /** @brief Constructor */
78 ModelChecker::ModelChecker(struct model_params params) :
79         /* Initialize default scheduler */
80         params(params),
81         scheduler(new Scheduler()),
82         diverge(NULL),
83         earliest_diverge(NULL),
84         action_trace(new action_list_t()),
85         thread_map(new HashTable<int, Thread *, int>()),
86         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
89         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
90         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
91         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
92         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
93         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
94         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
95         node_stack(new NodeStack()),
96         priv(new struct model_snapshot_members()),
97         mo_graph(new CycleGraph())
98 {
99         /* Initialize a model-checker thread, for special ModelActions */
100         model_thread = new Thread(get_next_id());
101         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
102 }
103
104 /** @brief Destructor */
105 ModelChecker::~ModelChecker()
106 {
107         for (unsigned int i = 0; i < get_num_threads(); i++)
108                 delete thread_map->get(i);
109         delete thread_map;
110
111         delete obj_thrd_map;
112         delete obj_map;
113         delete lock_waiters_map;
114         delete condvar_waiters_map;
115         delete action_trace;
116
117         for (unsigned int i = 0; i < promises->size(); i++)
118                 delete (*promises)[i];
119         delete promises;
120
121         delete pending_rel_seqs;
122
123         delete thrd_last_action;
124         delete thrd_last_fence_release;
125         delete node_stack;
126         delete scheduler;
127         delete mo_graph;
128         delete priv;
129 }
130
131 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
132         action_list_t * tmp=hash->get(ptr);
133         if (tmp==NULL) {
134                 tmp=new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
141         std::vector<action_list_t> * tmp=hash->get(ptr);
142         if (tmp==NULL) {
143                 tmp=new std::vector<action_list_t>();
144                 hash->put(ptr, tmp);
145         }
146         return tmp;
147 }
148
149 /**
150  * Restores user program to initial state and resets all model-checker data
151  * structures.
152  */
153 void ModelChecker::reset_to_initial_state()
154 {
155         DEBUG("+++ Resetting to initial state +++\n");
156         node_stack->reset_execution();
157
158         /* Print all model-checker output before rollback */
159         fflush(model_out);
160
161         snapshotObject->backTrackBeforeStep(0);
162 }
163
164 /** @return a thread ID for a new Thread */
165 thread_id_t ModelChecker::get_next_id()
166 {
167         return priv->next_thread_id++;
168 }
169
170 /** @return the number of user threads created during this execution */
171 unsigned int ModelChecker::get_num_threads() const
172 {
173         return priv->next_thread_id;
174 }
175
176 /** @return The currently executing Thread. */
177 Thread * ModelChecker::get_current_thread() const
178 {
179         return scheduler->get_current_thread();
180 }
181
182 /** @return a sequence number for a new ModelAction */
183 modelclock_t ModelChecker::get_next_seq_num()
184 {
185         return ++priv->used_sequence_numbers;
186 }
187
188 Node * ModelChecker::get_curr_node() const
189 {
190         return node_stack->get_head();
191 }
192
193 /**
194  * @brief Choose the next thread to execute.
195  *
196  * This function chooses the next thread that should execute. It can force the
197  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
198  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
199  * The model-checker may have no preference regarding the next thread (i.e.,
200  * when exploring a new execution ordering), in which case this will return
201  * NULL.
202  * @param curr The current ModelAction. This action might guide the choice of
203  * next thread.
204  * @return The next thread to run. If the model-checker has no preference, NULL.
205  */
206 Thread * ModelChecker::get_next_thread(ModelAction *curr)
207 {
208         thread_id_t tid;
209
210         if (curr!=NULL) {
211                 /* Do not split atomic actions. */
212                 if (curr->is_rmwr())
213                         return thread_current();
214                 /* The THREAD_CREATE action points to the created Thread */
215                 else if (curr->get_type() == THREAD_CREATE)
216                         return (Thread *)curr->get_location();
217         }
218
219         /* Have we completed exploring the preselected path? */
220         if (diverge == NULL)
221                 return NULL;
222
223         /* Else, we are trying to replay an execution */
224         ModelAction *next = node_stack->get_next()->get_action();
225
226         if (next == diverge) {
227                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
228                         earliest_diverge=diverge;
229
230                 Node *nextnode = next->get_node();
231                 Node *prevnode = nextnode->get_parent();
232                 scheduler->update_sleep_set(prevnode);
233
234                 /* Reached divergence point */
235                 if (nextnode->increment_misc()) {
236                         /* The next node will try to satisfy a different misc_index values. */
237                         tid = next->get_tid();
238                         node_stack->pop_restofstack(2);
239                 } else if (nextnode->increment_promise()) {
240                         /* The next node will try to satisfy a different set of promises. */
241                         tid = next->get_tid();
242                         node_stack->pop_restofstack(2);
243                 } else if (nextnode->increment_read_from()) {
244                         /* The next node will read from a different value. */
245                         tid = next->get_tid();
246                         node_stack->pop_restofstack(2);
247                 } else if (nextnode->increment_future_value()) {
248                         /* The next node will try to read from a different future value. */
249                         tid = next->get_tid();
250                         node_stack->pop_restofstack(2);
251                 } else if (nextnode->increment_relseq_break()) {
252                         /* The next node will try to resolve a release sequence differently */
253                         tid = next->get_tid();
254                         node_stack->pop_restofstack(2);
255                 } else {
256                         /* Make a different thread execute for next step */
257                         scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
258                         tid = prevnode->get_next_backtrack();
259                         /* Make sure the backtracked thread isn't sleeping. */
260                         node_stack->pop_restofstack(1);
261                         if (diverge==earliest_diverge) {
262                                 earliest_diverge=prevnode->get_action();
263                         }
264                 }
265                 /* The correct sleep set is in the parent node. */
266                 execute_sleep_set();
267
268                 DEBUG("*** Divergence point ***\n");
269
270                 diverge = NULL;
271         } else {
272                 tid = next->get_tid();
273         }
274         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
275         ASSERT(tid != THREAD_ID_T_NONE);
276         return thread_map->get(id_to_int(tid));
277 }
278
279 /**
280  * We need to know what the next actions of all threads in the sleep
281  * set will be.  This method computes them and stores the actions at
282  * the corresponding thread object's pending action.
283  */
284
285 void ModelChecker::execute_sleep_set() {
286         for(unsigned int i=0;i<get_num_threads();i++) {
287                 thread_id_t tid=int_to_id(i);
288                 Thread *thr=get_thread(tid);
289                 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
290                                  thr->get_pending() == NULL ) {
291                         thr->set_state(THREAD_RUNNING);
292                         scheduler->next_thread(thr);
293                         Thread::swap(&system_context, thr);
294                         priv->current_action->set_sleep_flag();
295                         thr->set_pending(priv->current_action);
296                 }
297         }
298         priv->current_action = NULL;
299 }
300
301 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
302         for(unsigned int i=0;i<get_num_threads();i++) {
303                 thread_id_t tid=int_to_id(i);
304                 Thread *thr=get_thread(tid);
305                 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
306                         ModelAction *pending_act=thr->get_pending();
307                         if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
308                                 //Remove this thread from sleep set
309                                 scheduler->remove_sleep(thr);
310                         }
311                 }
312         }
313 }
314
315 /** @brief Alert the model-checker that an incorrectly-ordered
316  * synchronization was made */
317 void ModelChecker::set_bad_synchronization()
318 {
319         priv->bad_synchronization = true;
320 }
321
322 bool ModelChecker::has_asserted() const
323 {
324         return priv->asserted;
325 }
326
327 void ModelChecker::set_assert()
328 {
329         priv->asserted = true;
330 }
331
332 /**
333  * Check if we are in a deadlock. Should only be called at the end of an
334  * execution, although it should not give false positives in the middle of an
335  * execution (there should be some ENABLED thread).
336  *
337  * @return True if program is in a deadlock; false otherwise
338  */
339 bool ModelChecker::is_deadlocked() const
340 {
341         bool blocking_threads = false;
342         for (unsigned int i = 0; i < get_num_threads(); i++) {
343                 thread_id_t tid = int_to_id(i);
344                 if (is_enabled(tid))
345                         return false;
346                 Thread *t = get_thread(tid);
347                 if (!t->is_model_thread() && t->get_pending())
348                         blocking_threads = true;
349         }
350         return blocking_threads;
351 }
352
353 /**
354  * Check if this is a complete execution. That is, have all thread completed
355  * execution (rather than exiting because sleep sets have forced a redundant
356  * execution).
357  *
358  * @return True if the execution is complete.
359  */
360 bool ModelChecker::is_complete_execution() const
361 {
362         for (unsigned int i = 0; i < get_num_threads(); i++)
363                 if (is_enabled(int_to_id(i)))
364                         return false;
365         return true;
366 }
367
368 /**
369  * @brief Assert a bug in the executing program.
370  *
371  * Use this function to assert any sort of bug in the user program. If the
372  * current trace is feasible (actually, a prefix of some feasible execution),
373  * then this execution will be aborted, printing the appropriate message. If
374  * the current trace is not yet feasible, the error message will be stashed and
375  * printed if the execution ever becomes feasible.
376  *
377  * @param msg Descriptive message for the bug (do not include newline char)
378  * @return True if bug is immediately-feasible
379  */
380 bool ModelChecker::assert_bug(const char *msg)
381 {
382         priv->bugs.push_back(new bug_message(msg));
383
384         if (isfeasibleprefix()) {
385                 set_assert();
386                 return true;
387         }
388         return false;
389 }
390
391 /**
392  * @brief Assert a bug in the executing program, asserted by a user thread
393  * @see ModelChecker::assert_bug
394  * @param msg Descriptive message for the bug (do not include newline char)
395  */
396 void ModelChecker::assert_user_bug(const char *msg)
397 {
398         /* If feasible bug, bail out now */
399         if (assert_bug(msg))
400                 switch_to_master(NULL);
401 }
402
403 /** @return True, if any bugs have been reported for this execution */
404 bool ModelChecker::have_bug_reports() const
405 {
406         return priv->bugs.size() != 0;
407 }
408
409 /** @brief Print bug report listing for this execution (if any bugs exist) */
410 void ModelChecker::print_bugs() const
411 {
412         if (have_bug_reports()) {
413                 model_print("Bug report: %zu bug%s detected\n",
414                                 priv->bugs.size(),
415                                 priv->bugs.size() > 1 ? "s" : "");
416                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
417                         priv->bugs[i]->print();
418         }
419 }
420
421 /**
422  * @brief Record end-of-execution stats
423  *
424  * Must be run when exiting an execution. Records various stats.
425  * @see struct execution_stats
426  */
427 void ModelChecker::record_stats()
428 {
429         stats.num_total++;
430         if (!isfeasibleprefix())
431                 stats.num_infeasible++;
432         else if (have_bug_reports())
433                 stats.num_buggy_executions++;
434         else if (is_complete_execution())
435                 stats.num_complete++;
436         else
437                 stats.num_redundant++;
438 }
439
440 /** @brief Print execution stats */
441 void ModelChecker::print_stats() const
442 {
443         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
444         model_print("Number of redundant executions: %d\n", stats.num_redundant);
445         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
446         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
447         model_print("Total executions: %d\n", stats.num_total);
448         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
449 }
450
451 /**
452  * @brief End-of-exeuction print
453  * @param printbugs Should any existing bugs be printed?
454  */
455 void ModelChecker::print_execution(bool printbugs) const
456 {
457         print_program_output();
458
459         if (DBG_ENABLED() || params.verbose) {
460                 model_print("Earliest divergence point since last feasible execution:\n");
461                 if (earliest_diverge)
462                         earliest_diverge->print();
463                 else
464                         model_print("(Not set)\n");
465
466                 model_print("\n");
467                 print_stats();
468         }
469
470         /* Don't print invalid bugs */
471         if (printbugs)
472                 print_bugs();
473
474         model_print("\n");
475         print_summary();
476 }
477
478 /**
479  * Queries the model-checker for more executions to explore and, if one
480  * exists, resets the model-checker state to execute a new execution.
481  *
482  * @return If there are more executions to explore, return true. Otherwise,
483  * return false.
484  */
485 bool ModelChecker::next_execution()
486 {
487         DBG();
488         /* Is this execution a feasible execution that's worth bug-checking? */
489         bool complete = isfeasibleprefix() && (is_complete_execution() ||
490                         have_bug_reports());
491
492         /* End-of-execution bug checks */
493         if (complete) {
494                 if (is_deadlocked())
495                         assert_bug("Deadlock detected");
496
497                 checkDataRaces();
498         }
499
500         record_stats();
501
502         /* Output */
503         if (DBG_ENABLED() || params.verbose || have_bug_reports())
504                 print_execution(complete);
505         else
506                 clear_program_output();
507
508         if (complete)
509                 earliest_diverge = NULL;
510
511         if ((diverge = get_next_backtrack()) == NULL)
512                 return false;
513
514         if (DBG_ENABLED()) {
515                 model_print("Next execution will diverge at:\n");
516                 diverge->print();
517         }
518
519         reset_to_initial_state();
520         return true;
521 }
522
523 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
524 {
525         switch (act->get_type()) {
526         case ATOMIC_FENCE:
527         case ATOMIC_READ:
528         case ATOMIC_WRITE:
529         case ATOMIC_RMW: {
530                 /* Optimization: relaxed operations don't need backtracking */
531                 if (act->is_relaxed())
532                         return NULL;
533                 /* linear search: from most recent to oldest */
534                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
535                 action_list_t::reverse_iterator rit;
536                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
537                         ModelAction *prev = *rit;
538                         if (prev->could_synchronize_with(act))
539                                 return prev;
540                 }
541                 break;
542         }
543         case ATOMIC_LOCK:
544         case ATOMIC_TRYLOCK: {
545                 /* linear search: from most recent to oldest */
546                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
547                 action_list_t::reverse_iterator rit;
548                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
549                         ModelAction *prev = *rit;
550                         if (act->is_conflicting_lock(prev))
551                                 return prev;
552                 }
553                 break;
554         }
555         case ATOMIC_UNLOCK: {
556                 /* linear search: from most recent to oldest */
557                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
558                 action_list_t::reverse_iterator rit;
559                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
560                         ModelAction *prev = *rit;
561                         if (!act->same_thread(prev)&&prev->is_failed_trylock())
562                                 return prev;
563                 }
564                 break;
565         }
566         case ATOMIC_WAIT: {
567                 /* linear search: from most recent to oldest */
568                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
569                 action_list_t::reverse_iterator rit;
570                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
571                         ModelAction *prev = *rit;
572                         if (!act->same_thread(prev)&&prev->is_failed_trylock())
573                                 return prev;
574                         if (!act->same_thread(prev)&&prev->is_notify())
575                                 return prev;
576                 }
577                 break;
578         }
579
580         case ATOMIC_NOTIFY_ALL:
581         case ATOMIC_NOTIFY_ONE: {
582                 /* linear search: from most recent to oldest */
583                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
584                 action_list_t::reverse_iterator rit;
585                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
586                         ModelAction *prev = *rit;
587                         if (!act->same_thread(prev)&&prev->is_wait())
588                                 return prev;
589                 }
590                 break;
591         }
592         default:
593                 break;
594         }
595         return NULL;
596 }
597
598 /** This method finds backtracking points where we should try to
599  * reorder the parameter ModelAction against.
600  *
601  * @param the ModelAction to find backtracking points for.
602  */
603 void ModelChecker::set_backtracking(ModelAction *act)
604 {
605         Thread *t = get_thread(act);
606         ModelAction * prev = get_last_conflict(act);
607         if (prev == NULL)
608                 return;
609
610         Node * node = prev->get_node()->get_parent();
611
612         int low_tid, high_tid;
613         if (node->is_enabled(t)) {
614                 low_tid = id_to_int(act->get_tid());
615                 high_tid = low_tid+1;
616         } else {
617                 low_tid = 0;
618                 high_tid = get_num_threads();
619         }
620
621         for(int i = low_tid; i < high_tid; i++) {
622                 thread_id_t tid = int_to_id(i);
623
624                 /* Make sure this thread can be enabled here. */
625                 if (i >= node->get_num_threads())
626                         break;
627
628                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
629                 if (node->enabled_status(tid)!=THREAD_ENABLED)
630                         continue;
631
632                 /* Check if this has been explored already */
633                 if (node->has_been_explored(tid))
634                         continue;
635
636                 /* See if fairness allows */
637                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
638                         bool unfair=false;
639                         for(int t=0;t<node->get_num_threads();t++) {
640                                 thread_id_t tother=int_to_id(t);
641                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
642                                         unfair=true;
643                                         break;
644                                 }
645                         }
646                         if (unfair)
647                                 continue;
648                 }
649                 /* Cache the latest backtracking point */
650                 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
651                         priv->next_backtrack = prev;
652
653                 /* If this is a new backtracking point, mark the tree */
654                 if (!node->set_backtrack(tid))
655                         continue;
656                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
657                                         id_to_int(prev->get_tid()),
658                                         id_to_int(t->get_id()));
659                 if (DBG_ENABLED()) {
660                         prev->print();
661                         act->print();
662                 }
663         }
664 }
665
666 /**
667  * Returns last backtracking point. The model checker will explore a different
668  * path for this point in the next execution.
669  * @return The ModelAction at which the next execution should diverge.
670  */
671 ModelAction * ModelChecker::get_next_backtrack()
672 {
673         ModelAction *next = priv->next_backtrack;
674         priv->next_backtrack = NULL;
675         return next;
676 }
677
678 /**
679  * Processes a read or rmw model action.
680  * @param curr is the read model action to process.
681  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
682  * @return True if processing this read updates the mo_graph.
683  */
684 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
685 {
686         uint64_t value = VALUE_NONE;
687         bool updated = false;
688         while (true) {
689                 const ModelAction *reads_from = curr->get_node()->get_read_from();
690                 if (reads_from != NULL) {
691                         mo_graph->startChanges();
692
693                         value = reads_from->get_value();
694                         bool r_status = false;
695
696                         if (!second_part_of_rmw) {
697                                 check_recency(curr, reads_from);
698                                 r_status = r_modification_order(curr, reads_from);
699                         }
700
701
702                         if (!second_part_of_rmw&&is_infeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
703                                 mo_graph->rollbackChanges();
704                                 priv->too_many_reads = false;
705                                 continue;
706                         }
707
708                         read_from(curr, reads_from);
709                         mo_graph->commitChanges();
710                         mo_check_promises(curr->get_tid(), reads_from);
711
712                         updated |= r_status;
713                 } else if (!second_part_of_rmw) {
714                         /* Read from future value */
715                         value = curr->get_node()->get_future_value();
716                         modelclock_t expiration = curr->get_node()->get_future_value_expiration();
717                         read_from(curr, NULL);
718                         Promise *valuepromise = new Promise(curr, value, expiration);
719                         promises->push_back(valuepromise);
720                 }
721                 get_thread(curr)->set_return_value(value);
722                 return updated;
723         }
724 }
725
726 /**
727  * Processes a lock, trylock, or unlock model action.  @param curr is
728  * the read model action to process.
729  *
730  * The try lock operation checks whether the lock is taken.  If not,
731  * it falls to the normal lock operation case.  If so, it returns
732  * fail.
733  *
734  * The lock operation has already been checked that it is enabled, so
735  * it just grabs the lock and synchronizes with the previous unlock.
736  *
737  * The unlock operation has to re-enable all of the threads that are
738  * waiting on the lock.
739  *
740  * @return True if synchronization was updated; false otherwise
741  */
742 bool ModelChecker::process_mutex(ModelAction *curr) {
743         std::mutex *mutex=NULL;
744         struct std::mutex_state *state=NULL;
745
746         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
747                 mutex = (std::mutex *)curr->get_location();
748                 state = mutex->get_state();
749         } else if(curr->is_wait()) {
750                 mutex = (std::mutex *)curr->get_value();
751                 state = mutex->get_state();
752         }
753
754         switch (curr->get_type()) {
755         case ATOMIC_TRYLOCK: {
756                 bool success = !state->islocked;
757                 curr->set_try_lock(success);
758                 if (!success) {
759                         get_thread(curr)->set_return_value(0);
760                         break;
761                 }
762                 get_thread(curr)->set_return_value(1);
763         }
764                 //otherwise fall into the lock case
765         case ATOMIC_LOCK: {
766                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
767                         assert_bug("Lock access before initialization");
768                 state->islocked = true;
769                 ModelAction *unlock = get_last_unlock(curr);
770                 //synchronize with the previous unlock statement
771                 if (unlock != NULL) {
772                         curr->synchronize_with(unlock);
773                         return true;
774                 }
775                 break;
776         }
777         case ATOMIC_UNLOCK: {
778                 //unlock the lock
779                 state->islocked = false;
780                 //wake up the other threads
781                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
782                 //activate all the waiting threads
783                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
784                         scheduler->wake(get_thread(*rit));
785                 }
786                 waiters->clear();
787                 break;
788         }
789         case ATOMIC_WAIT: {
790                 //unlock the lock
791                 state->islocked = false;
792                 //wake up the other threads
793                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
794                 //activate all the waiting threads
795                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
796                         scheduler->wake(get_thread(*rit));
797                 }
798                 waiters->clear();
799                 //check whether we should go to sleep or not...simulate spurious failures
800                 if (curr->get_node()->get_misc()==0) {
801                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
802                         //disable us
803                         scheduler->sleep(get_current_thread());
804                 }
805                 break;
806         }
807         case ATOMIC_NOTIFY_ALL: {
808                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
809                 //activate all the waiting threads
810                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
811                         scheduler->wake(get_thread(*rit));
812                 }
813                 waiters->clear();
814                 break;
815         }
816         case ATOMIC_NOTIFY_ONE: {
817                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
818                 int wakeupthread=curr->get_node()->get_misc();
819                 action_list_t::iterator it = waiters->begin();
820                 advance(it, wakeupthread);
821                 scheduler->wake(get_thread(*it));
822                 waiters->erase(it);
823                 break;
824         }
825
826         default:
827                 ASSERT(0);
828         }
829         return false;
830 }
831
832 /**
833  * Process a write ModelAction
834  * @param curr The ModelAction to process
835  * @return True if the mo_graph was updated or promises were resolved
836  */
837 bool ModelChecker::process_write(ModelAction *curr)
838 {
839         bool updated_mod_order = w_modification_order(curr);
840         bool updated_promises = resolve_promises(curr);
841
842         if (promises->size() == 0) {
843                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
844                         struct PendingFutureValue pfv = (*futurevalues)[i];
845                         //Do more ambitious checks now that mo is more complete
846                         if (mo_may_allow(pfv.writer, pfv.act)&&
847                                         pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
848                                         (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
849                                 priv->next_backtrack = pfv.act;
850                 }
851                 futurevalues->resize(0);
852         }
853
854         mo_graph->commitChanges();
855         mo_check_promises(curr->get_tid(), curr);
856
857         get_thread(curr)->set_return_value(VALUE_NONE);
858         return updated_mod_order || updated_promises;
859 }
860
861 /**
862  * Process a fence ModelAction
863  * @param curr The ModelAction to process
864  * @return True if synchronization was updated
865  */
866 bool ModelChecker::process_fence(ModelAction *curr)
867 {
868         /*
869          * fence-relaxed: no-op
870          * fence-release: only log the occurence (not in this function), for
871          *   use in later synchronization
872          * fence-acquire (this function): search for hypothetical release
873          *   sequences
874          */
875         bool updated = false;
876         if (curr->is_acquire()) {
877                 action_list_t *list = action_trace;
878                 action_list_t::reverse_iterator rit;
879                 /* Find X : is_read(X) && X --sb-> curr */
880                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
881                         ModelAction *act = *rit;
882                         if (act == curr)
883                                 continue;
884                         if (act->get_tid() != curr->get_tid())
885                                 continue;
886                         /* Stop at the beginning of the thread */
887                         if (act->is_thread_start())
888                                 break;
889                         /* Stop once we reach a prior fence-acquire */
890                         if (act->is_fence() && act->is_acquire())
891                                 break;
892                         if (!act->is_read())
893                                 continue;
894                         /* read-acquire will find its own release sequences */
895                         if (act->is_acquire())
896                                 continue;
897
898                         /* Establish hypothetical release sequences */
899                         rel_heads_list_t release_heads;
900                         get_release_seq_heads(curr, act, &release_heads);
901                         for (unsigned int i = 0; i < release_heads.size(); i++)
902                                 if (!curr->synchronize_with(release_heads[i]))
903                                         set_bad_synchronization();
904                         if (release_heads.size() != 0)
905                                 updated = true;
906                 }
907         }
908         return updated;
909 }
910
911 /**
912  * @brief Process the current action for thread-related activity
913  *
914  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
915  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
916  * synchronization, etc.  This function is a no-op for non-THREAD actions
917  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
918  *
919  * @param curr The current action
920  * @return True if synchronization was updated or a thread completed
921  */
922 bool ModelChecker::process_thread_action(ModelAction *curr)
923 {
924         bool updated = false;
925
926         switch (curr->get_type()) {
927         case THREAD_CREATE: {
928                 Thread *th = (Thread *)curr->get_location();
929                 th->set_creation(curr);
930                 break;
931         }
932         case THREAD_JOIN: {
933                 Thread *blocking = (Thread *)curr->get_location();
934                 ModelAction *act = get_last_action(blocking->get_id());
935                 curr->synchronize_with(act);
936                 updated = true; /* trigger rel-seq checks */
937                 break;
938         }
939         case THREAD_FINISH: {
940                 Thread *th = get_thread(curr);
941                 while (!th->wait_list_empty()) {
942                         ModelAction *act = th->pop_wait_list();
943                         scheduler->wake(get_thread(act));
944                 }
945                 th->complete();
946                 updated = true; /* trigger rel-seq checks */
947                 break;
948         }
949         case THREAD_START: {
950                 check_promises(curr->get_tid(), NULL, curr->get_cv());
951                 break;
952         }
953         default:
954                 break;
955         }
956
957         return updated;
958 }
959
960 /**
961  * @brief Process the current action for release sequence fixup activity
962  *
963  * Performs model-checker release sequence fixups for the current action,
964  * forcing a single pending release sequence to break (with a given, potential
965  * "loose" write) or to complete (i.e., synchronize). If a pending release
966  * sequence forms a complete release sequence, then we must perform the fixup
967  * synchronization, mo_graph additions, etc.
968  *
969  * @param curr The current action; must be a release sequence fixup action
970  * @param work_queue The work queue to which to add work items as they are
971  * generated
972  */
973 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
974 {
975         const ModelAction *write = curr->get_node()->get_relseq_break();
976         struct release_seq *sequence = pending_rel_seqs->back();
977         pending_rel_seqs->pop_back();
978         ASSERT(sequence);
979         ModelAction *acquire = sequence->acquire;
980         const ModelAction *rf = sequence->rf;
981         const ModelAction *release = sequence->release;
982         ASSERT(acquire);
983         ASSERT(release);
984         ASSERT(rf);
985         ASSERT(release->same_thread(rf));
986
987         if (write == NULL) {
988                 /**
989                  * @todo Forcing a synchronization requires that we set
990                  * modification order constraints. For instance, we can't allow
991                  * a fixup sequence in which two separate read-acquire
992                  * operations read from the same sequence, where the first one
993                  * synchronizes and the other doesn't. Essentially, we can't
994                  * allow any writes to insert themselves between 'release' and
995                  * 'rf'
996                  */
997
998                 /* Must synchronize */
999                 if (!acquire->synchronize_with(release)) {
1000                         set_bad_synchronization();
1001                         return;
1002                 }
1003                 /* Re-check all pending release sequences */
1004                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1005                 /* Re-check act for mo_graph edges */
1006                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1007
1008                 /* propagate synchronization to later actions */
1009                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1010                 for (; (*rit) != acquire; rit++) {
1011                         ModelAction *propagate = *rit;
1012                         if (acquire->happens_before(propagate)) {
1013                                 propagate->synchronize_with(acquire);
1014                                 /* Re-check 'propagate' for mo_graph edges */
1015                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1016                         }
1017                 }
1018         } else {
1019                 /* Break release sequence with new edges:
1020                  *   release --mo--> write --mo--> rf */
1021                 mo_graph->addEdge(release, write);
1022                 mo_graph->addEdge(write, rf);
1023         }
1024
1025         /* See if we have realized a data race */
1026         checkDataRaces();
1027 }
1028
1029 /**
1030  * Initialize the current action by performing one or more of the following
1031  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1032  * in the NodeStack, manipulating backtracking sets, allocating and
1033  * initializing clock vectors, and computing the promises to fulfill.
1034  *
1035  * @param curr The current action, as passed from the user context; may be
1036  * freed/invalidated after the execution of this function, with a different
1037  * action "returned" its place (pass-by-reference)
1038  * @return True if curr is a newly-explored action; false otherwise
1039  */
1040 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1041 {
1042         ModelAction *newcurr;
1043
1044         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1045                 newcurr = process_rmw(*curr);
1046                 delete *curr;
1047
1048                 if (newcurr->is_rmw())
1049                         compute_promises(newcurr);
1050
1051                 *curr = newcurr;
1052                 return false;
1053         }
1054
1055         (*curr)->set_seq_number(get_next_seq_num());
1056
1057         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1058         if (newcurr) {
1059                 /* First restore type and order in case of RMW operation */
1060                 if ((*curr)->is_rmwr())
1061                         newcurr->copy_typeandorder(*curr);
1062
1063                 ASSERT((*curr)->get_location() == newcurr->get_location());
1064                 newcurr->copy_from_new(*curr);
1065
1066                 /* Discard duplicate ModelAction; use action from NodeStack */
1067                 delete *curr;
1068
1069                 /* Always compute new clock vector */
1070                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1071
1072                 *curr = newcurr;
1073                 return false; /* Action was explored previously */
1074         } else {
1075                 newcurr = *curr;
1076
1077                 /* Always compute new clock vector */
1078                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1079
1080                 /* Assign most recent release fence */
1081                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1082
1083                 /*
1084                  * Perform one-time actions when pushing new ModelAction onto
1085                  * NodeStack
1086                  */
1087                 if (newcurr->is_write())
1088                         compute_promises(newcurr);
1089                 else if (newcurr->is_relseq_fixup())
1090                         compute_relseq_breakwrites(newcurr);
1091                 else if (newcurr->is_wait())
1092                         newcurr->get_node()->set_misc_max(2);
1093                 else if (newcurr->is_notify_one()) {
1094                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1095                 }
1096                 return true; /* This was a new ModelAction */
1097         }
1098 }
1099
1100 /**
1101  * @brief Establish reads-from relation between two actions
1102  *
1103  * Perform basic operations involved with establishing a concrete rf relation,
1104  * including setting the ModelAction data and checking for release sequences.
1105  *
1106  * @param act The action that is reading (must be a read)
1107  * @param rf The action from which we are reading (must be a write)
1108  *
1109  * @return True if this read established synchronization
1110  */
1111 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1112 {
1113         act->set_read_from(rf);
1114         if (rf != NULL && act->is_acquire()) {
1115                 rel_heads_list_t release_heads;
1116                 get_release_seq_heads(act, act, &release_heads);
1117                 int num_heads = release_heads.size();
1118                 for (unsigned int i = 0; i < release_heads.size(); i++)
1119                         if (!act->synchronize_with(release_heads[i])) {
1120                                 set_bad_synchronization();
1121                                 num_heads--;
1122                         }
1123                 return num_heads > 0;
1124         }
1125         return false;
1126 }
1127
1128 /**
1129  * @brief Check whether a model action is enabled.
1130  *
1131  * Checks whether a lock or join operation would be successful (i.e., is the
1132  * lock already locked, or is the joined thread already complete). If not, put
1133  * the action in a waiter list.
1134  *
1135  * @param curr is the ModelAction to check whether it is enabled.
1136  * @return a bool that indicates whether the action is enabled.
1137  */
1138 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1139         if (curr->is_lock()) {
1140                 std::mutex * lock = (std::mutex *)curr->get_location();
1141                 struct std::mutex_state * state = lock->get_state();
1142                 if (state->islocked) {
1143                         //Stick the action in the appropriate waiting queue
1144                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1145                         return false;
1146                 }
1147         } else if (curr->get_type() == THREAD_JOIN) {
1148                 Thread *blocking = (Thread *)curr->get_location();
1149                 if (!blocking->is_complete()) {
1150                         blocking->push_wait_list(curr);
1151                         return false;
1152                 }
1153         }
1154
1155         return true;
1156 }
1157
1158 /**
1159  * Stores the ModelAction for the current thread action.  Call this
1160  * immediately before switching from user- to system-context to pass
1161  * data between them.
1162  * @param act The ModelAction created by the user-thread action
1163  */
1164 void ModelChecker::set_current_action(ModelAction *act) {
1165         priv->current_action = act;
1166 }
1167
1168 /**
1169  * This is the heart of the model checker routine. It performs model-checking
1170  * actions corresponding to a given "current action." Among other processes, it
1171  * calculates reads-from relationships, updates synchronization clock vectors,
1172  * forms a memory_order constraints graph, and handles replay/backtrack
1173  * execution when running permutations of previously-observed executions.
1174  *
1175  * @param curr The current action to process
1176  * @return The next Thread that must be executed. May be NULL if ModelChecker
1177  * makes no choice (e.g., according to replay execution, combining RMW actions,
1178  * etc.)
1179  */
1180 Thread * ModelChecker::check_current_action(ModelAction *curr)
1181 {
1182         ASSERT(curr);
1183         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1184
1185         if (!check_action_enabled(curr)) {
1186                 /* Make the execution look like we chose to run this action
1187                  * much later, when a lock/join can succeed */
1188                 get_current_thread()->set_pending(curr);
1189                 scheduler->sleep(get_current_thread());
1190                 return get_next_thread(NULL);
1191         }
1192
1193         bool newly_explored = initialize_curr_action(&curr);
1194
1195         wake_up_sleeping_actions(curr);
1196
1197         /* Add the action to lists before any other model-checking tasks */
1198         if (!second_part_of_rmw)
1199                 add_action_to_lists(curr);
1200
1201         /* Build may_read_from set for newly-created actions */
1202         if (newly_explored && curr->is_read())
1203                 build_reads_from_past(curr);
1204
1205         /* Initialize work_queue with the "current action" work */
1206         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1207         while (!work_queue.empty() && !has_asserted()) {
1208                 WorkQueueEntry work = work_queue.front();
1209                 work_queue.pop_front();
1210
1211                 switch (work.type) {
1212                 case WORK_CHECK_CURR_ACTION: {
1213                         ModelAction *act = work.action;
1214                         bool update = false; /* update this location's release seq's */
1215                         bool update_all = false; /* update all release seq's */
1216
1217                         if (process_thread_action(curr))
1218                                 update_all = true;
1219
1220                         if (act->is_read() && process_read(act, second_part_of_rmw))
1221                                 update = true;
1222
1223                         if (act->is_write() && process_write(act))
1224                                 update = true;
1225
1226                         if (act->is_fence() && process_fence(act))
1227                                 update_all = true;
1228
1229                         if (act->is_mutex_op() && process_mutex(act))
1230                                 update_all = true;
1231
1232                         if (act->is_relseq_fixup())
1233                                 process_relseq_fixup(curr, &work_queue);
1234
1235                         if (update_all)
1236                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1237                         else if (update)
1238                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1239                         break;
1240                 }
1241                 case WORK_CHECK_RELEASE_SEQ:
1242                         resolve_release_sequences(work.location, &work_queue);
1243                         break;
1244                 case WORK_CHECK_MO_EDGES: {
1245                         /** @todo Complete verification of work_queue */
1246                         ModelAction *act = work.action;
1247                         bool updated = false;
1248
1249                         if (act->is_read()) {
1250                                 const ModelAction *rf = act->get_reads_from();
1251                                 if (rf != NULL && r_modification_order(act, rf))
1252                                         updated = true;
1253                         }
1254                         if (act->is_write()) {
1255                                 if (w_modification_order(act))
1256                                         updated = true;
1257                         }
1258                         mo_graph->commitChanges();
1259
1260                         if (updated)
1261                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1262                         break;
1263                 }
1264                 default:
1265                         ASSERT(false);
1266                         break;
1267                 }
1268         }
1269
1270         check_curr_backtracking(curr);
1271         set_backtracking(curr);
1272         return get_next_thread(curr);
1273 }
1274
1275 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
1276         Node *currnode = curr->get_node();
1277         Node *parnode = currnode->get_parent();
1278
1279         if ((!parnode->backtrack_empty() ||
1280                          !currnode->misc_empty() ||
1281                          !currnode->read_from_empty() ||
1282                          !currnode->future_value_empty() ||
1283                          !currnode->promise_empty() ||
1284                          !currnode->relseq_break_empty())
1285                         && (!priv->next_backtrack ||
1286                                         *curr > *priv->next_backtrack)) {
1287                 priv->next_backtrack = curr;
1288         }
1289 }
1290
1291 bool ModelChecker::promises_expired() const
1292 {
1293         for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1294                 Promise *promise = (*promises)[promise_index];
1295                 if (promise->get_expiration()<priv->used_sequence_numbers) {
1296                         return true;
1297                 }
1298         }
1299         return false;
1300 }
1301
1302 /**
1303  * This is the strongest feasibility check available.
1304  * @return whether the current trace (partial or complete) must be a prefix of
1305  * a feasible trace.
1306  */
1307 bool ModelChecker::isfeasibleprefix() const
1308 {
1309         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1310 }
1311
1312 /**
1313  * Returns whether the current completed trace is feasible, except for pending
1314  * release sequences.
1315  */
1316 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1317 {
1318         if (DBG_ENABLED() && promises->size() != 0)
1319                 DEBUG("Infeasible: unrevolved promises\n");
1320
1321         return !is_infeasible() && promises->size() == 0;
1322 }
1323
1324 /**
1325  * Check if the current partial trace is infeasible. Does not check any
1326  * end-of-execution flags, which might rule out the execution. Thus, this is
1327  * useful only for ruling an execution as infeasible.
1328  * @return whether the current partial trace is infeasible.
1329  */
1330 bool ModelChecker::is_infeasible() const
1331 {
1332         if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1333                 DEBUG("Infeasible: RMW violation\n");
1334
1335         return mo_graph->checkForRMWViolation() || is_infeasible_ignoreRMW();
1336 }
1337
1338 /**
1339  * Check If the current partial trace is infeasible, while ignoring
1340  * infeasibility related to 2 RMW's reading from the same store. It does not
1341  * check end-of-execution feasibility.
1342  * @see ModelChecker::is_infeasible
1343  * @return whether the current partial trace is infeasible, ignoring multiple
1344  * RMWs reading from the same store.
1345  * */
1346 bool ModelChecker::is_infeasible_ignoreRMW() const
1347 {
1348         if (DBG_ENABLED()) {
1349                 if (mo_graph->checkForCycles())
1350                         DEBUG("Infeasible: modification order cycles\n");
1351                 if (priv->failed_promise)
1352                         DEBUG("Infeasible: failed promise\n");
1353                 if (priv->too_many_reads)
1354                         DEBUG("Infeasible: too many reads\n");
1355                 if (priv->bad_synchronization)
1356                         DEBUG("Infeasible: bad synchronization ordering\n");
1357                 if (promises_expired())
1358                         DEBUG("Infeasible: promises expired\n");
1359         }
1360         return mo_graph->checkForCycles() || priv->failed_promise ||
1361                 priv->too_many_reads || priv->bad_synchronization ||
1362                 promises_expired();
1363 }
1364
1365 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1366 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1367         ModelAction *lastread = get_last_action(act->get_tid());
1368         lastread->process_rmw(act);
1369         if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1370                 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1371                 mo_graph->commitChanges();
1372         }
1373         return lastread;
1374 }
1375
1376 /**
1377  * Checks whether a thread has read from the same write for too many times
1378  * without seeing the effects of a later write.
1379  *
1380  * Basic idea:
1381  * 1) there must a different write that we could read from that would satisfy the modification order,
1382  * 2) we must have read from the same value in excess of maxreads times, and
1383  * 3) that other write must have been in the reads_from set for maxreads times.
1384  *
1385  * If so, we decide that the execution is no longer feasible.
1386  */
1387 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1388         if (params.maxreads != 0) {
1389
1390                 if (curr->get_node()->get_read_from_size() <= 1)
1391                         return;
1392                 //Must make sure that execution is currently feasible...  We could
1393                 //accidentally clear by rolling back
1394                 if (is_infeasible())
1395                         return;
1396                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1397                 int tid = id_to_int(curr->get_tid());
1398
1399                 /* Skip checks */
1400                 if ((int)thrd_lists->size() <= tid)
1401                         return;
1402                 action_list_t *list = &(*thrd_lists)[tid];
1403
1404                 action_list_t::reverse_iterator rit = list->rbegin();
1405                 /* Skip past curr */
1406                 for (; (*rit) != curr; rit++)
1407                         ;
1408                 /* go past curr now */
1409                 rit++;
1410
1411                 action_list_t::reverse_iterator ritcopy = rit;
1412                 //See if we have enough reads from the same value
1413                 int count = 0;
1414                 for (; count < params.maxreads; rit++,count++) {
1415                         if (rit==list->rend())
1416                                 return;
1417                         ModelAction *act = *rit;
1418                         if (!act->is_read())
1419                                 return;
1420
1421                         if (act->get_reads_from() != rf)
1422                                 return;
1423                         if (act->get_node()->get_read_from_size() <= 1)
1424                                 return;
1425                 }
1426                 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1427                         //Get write
1428                         const ModelAction * write = curr->get_node()->get_read_from_at(i);
1429
1430                         //Need a different write
1431                         if (write==rf)
1432                                 continue;
1433
1434                         /* Test to see whether this is a feasible write to read from*/
1435                         mo_graph->startChanges();
1436                         r_modification_order(curr, write);
1437                         bool feasiblereadfrom = !is_infeasible();
1438                         mo_graph->rollbackChanges();
1439
1440                         if (!feasiblereadfrom)
1441                                 continue;
1442                         rit = ritcopy;
1443
1444                         bool feasiblewrite = true;
1445                         //new we need to see if this write works for everyone
1446
1447                         for (int loop = count; loop>0; loop--,rit++) {
1448                                 ModelAction *act=*rit;
1449                                 bool foundvalue = false;
1450                                 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1451                                         if (act->get_node()->get_read_from_at(j)==write) {
1452                                                 foundvalue = true;
1453                                                 break;
1454                                         }
1455                                 }
1456                                 if (!foundvalue) {
1457                                         feasiblewrite = false;
1458                                         break;
1459                                 }
1460                         }
1461                         if (feasiblewrite) {
1462                                 priv->too_many_reads = true;
1463                                 return;
1464                         }
1465                 }
1466         }
1467 }
1468
1469 /**
1470  * Updates the mo_graph with the constraints imposed from the current
1471  * read.
1472  *
1473  * Basic idea is the following: Go through each other thread and find
1474  * the lastest action that happened before our read.  Two cases:
1475  *
1476  * (1) The action is a write => that write must either occur before
1477  * the write we read from or be the write we read from.
1478  *
1479  * (2) The action is a read => the write that that action read from
1480  * must occur before the write we read from or be the same write.
1481  *
1482  * @param curr The current action. Must be a read.
1483  * @param rf The action that curr reads from. Must be a write.
1484  * @return True if modification order edges were added; false otherwise
1485  */
1486 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1487 {
1488         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1489         unsigned int i;
1490         bool added = false;
1491         ASSERT(curr->is_read());
1492
1493         /* Last SC fence in the current thread */
1494         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1495
1496         /* Iterate over all threads */
1497         for (i = 0; i < thrd_lists->size(); i++) {
1498                 /* Last SC fence in thread i */
1499                 ModelAction *last_sc_fence_thread_local = NULL;
1500                 if (int_to_id((int)i) != curr->get_tid())
1501                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1502
1503                 /* Last SC fence in thread i, before last SC fence in current thread */
1504                 ModelAction *last_sc_fence_thread_before = NULL;
1505                 if (last_sc_fence_local)
1506                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1507
1508                 /* Iterate over actions in thread, starting from most recent */
1509                 action_list_t *list = &(*thrd_lists)[i];
1510                 action_list_t::reverse_iterator rit;
1511                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1512                         ModelAction *act = *rit;
1513
1514                         if (act->is_write() && act != rf && act != curr) {
1515                                 /* C++, Section 29.3 statement 5 */
1516                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1517                                                 *act < *last_sc_fence_thread_local) {
1518                                         mo_graph->addEdge(act, rf);
1519                                         added = true;
1520                                         break;
1521                                 }
1522                                 /* C++, Section 29.3 statement 4 */
1523                                 else if (act->is_seqcst() && last_sc_fence_local &&
1524                                                 *act < *last_sc_fence_local) {
1525                                         mo_graph->addEdge(act, rf);
1526                                         added = true;
1527                                         break;
1528                                 }
1529                                 /* C++, Section 29.3 statement 6 */
1530                                 else if (last_sc_fence_thread_before &&
1531                                                 *act < *last_sc_fence_thread_before) {
1532                                         mo_graph->addEdge(act, rf);
1533                                         added = true;
1534                                         break;
1535                                 }
1536                         }
1537
1538                         /*
1539                          * Include at most one act per-thread that "happens
1540                          * before" curr. Don't consider reflexively.
1541                          */
1542                         if (act->happens_before(curr) && act != curr) {
1543                                 if (act->is_write()) {
1544                                         if (rf != act) {
1545                                                 mo_graph->addEdge(act, rf);
1546                                                 added = true;
1547                                         }
1548                                 } else {
1549                                         const ModelAction *prevreadfrom = act->get_reads_from();
1550                                         //if the previous read is unresolved, keep going...
1551                                         if (prevreadfrom == NULL)
1552                                                 continue;
1553
1554                                         if (rf != prevreadfrom) {
1555                                                 mo_graph->addEdge(prevreadfrom, rf);
1556                                                 added = true;
1557                                         }
1558                                 }
1559                                 break;
1560                         }
1561                 }
1562         }
1563
1564         return added;
1565 }
1566
1567 /** This method fixes up the modification order when we resolve a
1568  *  promises.  The basic problem is that actions that occur after the
1569  *  read curr could not property add items to the modification order
1570  *  for our read.
1571  *
1572  *  So for each thread, we find the earliest item that happens after
1573  *  the read curr.  This is the item we have to fix up with additional
1574  *  constraints.  If that action is write, we add a MO edge between
1575  *  the Action rf and that action.  If the action is a read, we add a
1576  *  MO edge between the Action rf, and whatever the read accessed.
1577  *
1578  * @param curr is the read ModelAction that we are fixing up MO edges for.
1579  * @param rf is the write ModelAction that curr reads from.
1580  *
1581  */
1582 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1583 {
1584         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1585         unsigned int i;
1586         ASSERT(curr->is_read());
1587
1588         /* Iterate over all threads */
1589         for (i = 0; i < thrd_lists->size(); i++) {
1590                 /* Iterate over actions in thread, starting from most recent */
1591                 action_list_t *list = &(*thrd_lists)[i];
1592                 action_list_t::reverse_iterator rit;
1593                 ModelAction *lastact = NULL;
1594
1595                 /* Find last action that happens after curr that is either not curr or a rmw */
1596                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1597                         ModelAction *act = *rit;
1598                         if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1599                                 lastact = act;
1600                         } else
1601                                 break;
1602                 }
1603
1604                         /* Include at most one act per-thread that "happens before" curr */
1605                 if (lastact != NULL) {
1606                         if (lastact==curr) {
1607                                 //Case 1: The resolved read is a RMW, and we need to make sure
1608                                 //that the write portion of the RMW mod order after rf
1609
1610                                 mo_graph->addEdge(rf, lastact);
1611                         } else if (lastact->is_read()) {
1612                                 //Case 2: The resolved read is a normal read and the next
1613                                 //operation is a read, and we need to make sure the value read
1614                                 //is mod ordered after rf
1615
1616                                 const ModelAction *postreadfrom = lastact->get_reads_from();
1617                                 if (postreadfrom != NULL&&rf != postreadfrom)
1618                                         mo_graph->addEdge(rf, postreadfrom);
1619                         } else {
1620                                 //Case 3: The resolved read is a normal read and the next
1621                                 //operation is a write, and we need to make sure that the
1622                                 //write is mod ordered after rf
1623                                 if (lastact!=rf)
1624                                         mo_graph->addEdge(rf, lastact);
1625                         }
1626                         break;
1627                 }
1628         }
1629 }
1630
1631 /**
1632  * Updates the mo_graph with the constraints imposed from the current write.
1633  *
1634  * Basic idea is the following: Go through each other thread and find
1635  * the lastest action that happened before our write.  Two cases:
1636  *
1637  * (1) The action is a write => that write must occur before
1638  * the current write
1639  *
1640  * (2) The action is a read => the write that that action read from
1641  * must occur before the current write.
1642  *
1643  * This method also handles two other issues:
1644  *
1645  * (I) Sequential Consistency: Making sure that if the current write is
1646  * seq_cst, that it occurs after the previous seq_cst write.
1647  *
1648  * (II) Sending the write back to non-synchronizing reads.
1649  *
1650  * @param curr The current action. Must be a write.
1651  * @return True if modification order edges were added; false otherwise
1652  */
1653 bool ModelChecker::w_modification_order(ModelAction *curr)
1654 {
1655         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1656         unsigned int i;
1657         bool added = false;
1658         ASSERT(curr->is_write());
1659
1660         if (curr->is_seqcst()) {
1661                 /* We have to at least see the last sequentially consistent write,
1662                          so we are initialized. */
1663                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1664                 if (last_seq_cst != NULL) {
1665                         mo_graph->addEdge(last_seq_cst, curr);
1666                         added = true;
1667                 }
1668         }
1669
1670         /* Last SC fence in the current thread */
1671         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1672
1673         /* Iterate over all threads */
1674         for (i = 0; i < thrd_lists->size(); i++) {
1675                 /* Last SC fence in thread i, before last SC fence in current thread */
1676                 ModelAction *last_sc_fence_thread_before = NULL;
1677                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1678                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1679
1680                 /* Iterate over actions in thread, starting from most recent */
1681                 action_list_t *list = &(*thrd_lists)[i];
1682                 action_list_t::reverse_iterator rit;
1683                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1684                         ModelAction *act = *rit;
1685                         if (act == curr) {
1686                                 /*
1687                                  * 1) If RMW and it actually read from something, then we
1688                                  * already have all relevant edges, so just skip to next
1689                                  * thread.
1690                                  *
1691                                  * 2) If RMW and it didn't read from anything, we should
1692                                  * whatever edge we can get to speed up convergence.
1693                                  *
1694                                  * 3) If normal write, we need to look at earlier actions, so
1695                                  * continue processing list.
1696                                  */
1697                                 if (curr->is_rmw()) {
1698                                         if (curr->get_reads_from()!=NULL)
1699                                                 break;
1700                                         else
1701                                                 continue;
1702                                 } else
1703                                         continue;
1704                         }
1705
1706                         /* C++, Section 29.3 statement 7 */
1707                         if (last_sc_fence_thread_before && act->is_write() &&
1708                                         *act < *last_sc_fence_thread_before) {
1709                                 mo_graph->addEdge(act, curr);
1710                                 added = true;
1711                                 break;
1712                         }
1713
1714                         /*
1715                          * Include at most one act per-thread that "happens
1716                          * before" curr
1717                          */
1718                         if (act->happens_before(curr)) {
1719                                 /*
1720                                  * Note: if act is RMW, just add edge:
1721                                  *   act --mo--> curr
1722                                  * The following edge should be handled elsewhere:
1723                                  *   readfrom(act) --mo--> act
1724                                  */
1725                                 if (act->is_write())
1726                                         mo_graph->addEdge(act, curr);
1727                                 else if (act->is_read()) {
1728                                         //if previous read accessed a null, just keep going
1729                                         if (act->get_reads_from() == NULL)
1730                                                 continue;
1731                                         mo_graph->addEdge(act->get_reads_from(), curr);
1732                                 }
1733                                 added = true;
1734                                 break;
1735                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1736                                                      !act->same_thread(curr)) {
1737                                 /* We have an action that:
1738                                    (1) did not happen before us
1739                                    (2) is a read and we are a write
1740                                    (3) cannot synchronize with us
1741                                    (4) is in a different thread
1742                                    =>
1743                                    that read could potentially read from our write.  Note that
1744                                    these checks are overly conservative at this point, we'll
1745                                    do more checks before actually removing the
1746                                    pendingfuturevalue.
1747
1748                                  */
1749                                 if (thin_air_constraint_may_allow(curr, act)) {
1750                                         if (!is_infeasible() ||
1751                                                         (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && !is_infeasible_ignoreRMW())) {
1752                                                 struct PendingFutureValue pfv = {curr,act};
1753                                                 futurevalues->push_back(pfv);
1754                                         }
1755                                 }
1756                         }
1757                 }
1758         }
1759
1760         return added;
1761 }
1762
1763 /** Arbitrary reads from the future are not allowed.  Section 29.3
1764  * part 9 places some constraints.  This method checks one result of constraint
1765  * constraint.  Others require compiler support. */
1766 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1767         if (!writer->is_rmw())
1768                 return true;
1769
1770         if (!reader->is_rmw())
1771                 return true;
1772
1773         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1774                 if (search == reader)
1775                         return false;
1776                 if (search->get_tid() == reader->get_tid() &&
1777                                 search->happens_before(reader))
1778                         break;
1779         }
1780
1781         return true;
1782 }
1783
1784 /**
1785  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1786  * some constraints. This method checks one the following constraint (others
1787  * require compiler support):
1788  *
1789  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1790  */
1791 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1792 {
1793         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1794         unsigned int i;
1795         /* Iterate over all threads */
1796         for (i = 0; i < thrd_lists->size(); i++) {
1797                 const ModelAction *write_after_read = NULL;
1798
1799                 /* Iterate over actions in thread, starting from most recent */
1800                 action_list_t *list = &(*thrd_lists)[i];
1801                 action_list_t::reverse_iterator rit;
1802                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1803                         ModelAction *act = *rit;
1804
1805                         if (!reader->happens_before(act))
1806                                 break;
1807                         else if (act->is_write())
1808                                 write_after_read = act;
1809                         else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1810                                 write_after_read = act->get_reads_from();
1811                         }
1812                 }
1813
1814                 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1815                         return false;
1816         }
1817         return true;
1818 }
1819
1820 /**
1821  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1822  * The ModelAction under consideration is expected to be taking part in
1823  * release/acquire synchronization as an object of the "reads from" relation.
1824  * Note that this can only provide release sequence support for RMW chains
1825  * which do not read from the future, as those actions cannot be traced until
1826  * their "promise" is fulfilled. Similarly, we may not even establish the
1827  * presence of a release sequence with certainty, as some modification order
1828  * constraints may be decided further in the future. Thus, this function
1829  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1830  * and a boolean representing certainty.
1831  *
1832  * @param rf The action that might be part of a release sequence. Must be a
1833  * write.
1834  * @param release_heads A pass-by-reference style return parameter. After
1835  * execution of this function, release_heads will contain the heads of all the
1836  * relevant release sequences, if any exists with certainty
1837  * @param pending A pass-by-reference style return parameter which is only used
1838  * when returning false (i.e., uncertain). Returns most information regarding
1839  * an uncertain release sequence, including any write operations that might
1840  * break the sequence.
1841  * @return true, if the ModelChecker is certain that release_heads is complete;
1842  * false otherwise
1843  */
1844 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1845                 rel_heads_list_t *release_heads,
1846                 struct release_seq *pending) const
1847 {
1848         /* Only check for release sequences if there are no cycles */
1849         if (mo_graph->checkForCycles())
1850                 return false;
1851
1852         while (rf) {
1853                 ASSERT(rf->is_write());
1854
1855                 if (rf->is_release())
1856                         release_heads->push_back(rf);
1857                 else if (rf->get_last_fence_release())
1858                         release_heads->push_back(rf->get_last_fence_release());
1859                 if (!rf->is_rmw())
1860                         break; /* End of RMW chain */
1861
1862                 /** @todo Need to be smarter here...  In the linux lock
1863                  * example, this will run to the beginning of the program for
1864                  * every acquire. */
1865                 /** @todo The way to be smarter here is to keep going until 1
1866                  * thread has a release preceded by an acquire and you've seen
1867                  *       both. */
1868
1869                 /* acq_rel RMW is a sufficient stopping condition */
1870                 if (rf->is_acquire() && rf->is_release())
1871                         return true; /* complete */
1872
1873                 rf = rf->get_reads_from();
1874         };
1875         if (!rf) {
1876                 /* read from future: need to settle this later */
1877                 pending->rf = NULL;
1878                 return false; /* incomplete */
1879         }
1880
1881         if (rf->is_release())
1882                 return true; /* complete */
1883
1884         /* else relaxed write
1885          * - check for fence-release in the same thread (29.8, stmt. 3)
1886          * - check modification order for contiguous subsequence
1887          *   -> rf must be same thread as release */
1888
1889         const ModelAction *fence_release = rf->get_last_fence_release();
1890         /* Synchronize with a fence-release unconditionally; we don't need to
1891          * find any more "contiguous subsequence..." for it */
1892         if (fence_release)
1893                 release_heads->push_back(fence_release);
1894
1895         int tid = id_to_int(rf->get_tid());
1896         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1897         action_list_t *list = &(*thrd_lists)[tid];
1898         action_list_t::const_reverse_iterator rit;
1899
1900         /* Find rf in the thread list */
1901         rit = std::find(list->rbegin(), list->rend(), rf);
1902         ASSERT(rit != list->rend());
1903
1904         /* Find the last {write,fence}-release */
1905         for (; rit != list->rend(); rit++) {
1906                 if (fence_release && *(*rit) < *fence_release)
1907                         break;
1908                 if ((*rit)->is_release())
1909                         break;
1910         }
1911         if (rit == list->rend()) {
1912                 /* No write-release in this thread */
1913                 return true; /* complete */
1914         } else if (fence_release && *(*rit) < *fence_release) {
1915                 /* The fence-release is more recent (and so, "stronger") than
1916                  * the most recent write-release */
1917                 return true; /* complete */
1918         } /* else, need to establish contiguous release sequence */
1919         ModelAction *release = *rit;
1920
1921         ASSERT(rf->same_thread(release));
1922
1923         pending->writes.clear();
1924
1925         bool certain = true;
1926         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1927                 if (id_to_int(rf->get_tid()) == (int)i)
1928                         continue;
1929                 list = &(*thrd_lists)[i];
1930
1931                 /* Can we ensure no future writes from this thread may break
1932                  * the release seq? */
1933                 bool future_ordered = false;
1934
1935                 ModelAction *last = get_last_action(int_to_id(i));
1936                 Thread *th = get_thread(int_to_id(i));
1937                 if ((last && rf->happens_before(last)) ||
1938                                 !is_enabled(th) ||
1939                                 th->is_complete())
1940                         future_ordered = true;
1941
1942                 ASSERT(!th->is_model_thread() || future_ordered);
1943
1944                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1945                         const ModelAction *act = *rit;
1946                         /* Reach synchronization -> this thread is complete */
1947                         if (act->happens_before(release))
1948                                 break;
1949                         if (rf->happens_before(act)) {
1950                                 future_ordered = true;
1951                                 continue;
1952                         }
1953
1954                         /* Only non-RMW writes can break release sequences */
1955                         if (!act->is_write() || act->is_rmw())
1956                                 continue;
1957
1958                         /* Check modification order */
1959                         if (mo_graph->checkReachable(rf, act)) {
1960                                 /* rf --mo--> act */
1961                                 future_ordered = true;
1962                                 continue;
1963                         }
1964                         if (mo_graph->checkReachable(act, release))
1965                                 /* act --mo--> release */
1966                                 break;
1967                         if (mo_graph->checkReachable(release, act) &&
1968                                       mo_graph->checkReachable(act, rf)) {
1969                                 /* release --mo-> act --mo--> rf */
1970                                 return true; /* complete */
1971                         }
1972                         /* act may break release sequence */
1973                         pending->writes.push_back(act);
1974                         certain = false;
1975                 }
1976                 if (!future_ordered)
1977                         certain = false; /* This thread is uncertain */
1978         }
1979
1980         if (certain) {
1981                 release_heads->push_back(release);
1982                 pending->writes.clear();
1983         } else {
1984                 pending->release = release;
1985                 pending->rf = rf;
1986         }
1987         return certain;
1988 }
1989
1990 /**
1991  * An interface for getting the release sequence head(s) with which a
1992  * given ModelAction must synchronize. This function only returns a non-empty
1993  * result when it can locate a release sequence head with certainty. Otherwise,
1994  * it may mark the internal state of the ModelChecker so that it will handle
1995  * the release sequence at a later time, causing @a acquire to update its
1996  * synchronization at some later point in execution.
1997  *
1998  * @param acquire The 'acquire' action that may synchronize with a release
1999  * sequence
2000  * @param read The read action that may read from a release sequence; this may
2001  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2002  * when 'acquire' is a fence-acquire)
2003  * @param release_heads A pass-by-reference return parameter. Will be filled
2004  * with the head(s) of the release sequence(s), if they exists with certainty.
2005  * @see ModelChecker::release_seq_heads
2006  */
2007 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2008                 ModelAction *read, rel_heads_list_t *release_heads)
2009 {
2010         const ModelAction *rf = read->get_reads_from();
2011         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2012         sequence->acquire = acquire;
2013         sequence->read = read;
2014
2015         if (!release_seq_heads(rf, release_heads, sequence)) {
2016                 /* add act to 'lazy checking' list */
2017                 pending_rel_seqs->push_back(sequence);
2018         } else {
2019                 snapshot_free(sequence);
2020         }
2021 }
2022
2023 /**
2024  * Attempt to resolve all stashed operations that might synchronize with a
2025  * release sequence for a given location. This implements the "lazy" portion of
2026  * determining whether or not a release sequence was contiguous, since not all
2027  * modification order information is present at the time an action occurs.
2028  *
2029  * @param location The location/object that should be checked for release
2030  * sequence resolutions. A NULL value means to check all locations.
2031  * @param work_queue The work queue to which to add work items as they are
2032  * generated
2033  * @return True if any updates occurred (new synchronization, new mo_graph
2034  * edges)
2035  */
2036 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2037 {
2038         bool updated = false;
2039         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2040         while (it != pending_rel_seqs->end()) {
2041                 struct release_seq *pending = *it;
2042                 ModelAction *acquire = pending->acquire;
2043                 const ModelAction *read = pending->read;
2044
2045                 /* Only resolve sequences on the given location, if provided */
2046                 if (location && read->get_location() != location) {
2047                         it++;
2048                         continue;
2049                 }
2050
2051                 const ModelAction *rf = read->get_reads_from();
2052                 rel_heads_list_t release_heads;
2053                 bool complete;
2054                 complete = release_seq_heads(rf, &release_heads, pending);
2055                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2056                         if (!acquire->has_synchronized_with(release_heads[i])) {
2057                                 if (acquire->synchronize_with(release_heads[i]))
2058                                         updated = true;
2059                                 else
2060                                         set_bad_synchronization();
2061                         }
2062                 }
2063
2064                 if (updated) {
2065                         /* Re-check all pending release sequences */
2066                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2067                         /* Re-check read-acquire for mo_graph edges */
2068                         if (acquire->is_read())
2069                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2070
2071                         /* propagate synchronization to later actions */
2072                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2073                         for (; (*rit) != acquire; rit++) {
2074                                 ModelAction *propagate = *rit;
2075                                 if (acquire->happens_before(propagate)) {
2076                                         propagate->synchronize_with(acquire);
2077                                         /* Re-check 'propagate' for mo_graph edges */
2078                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2079                                 }
2080                         }
2081                 }
2082                 if (complete) {
2083                         it = pending_rel_seqs->erase(it);
2084                         snapshot_free(pending);
2085                 } else {
2086                         it++;
2087                 }
2088         }
2089
2090         // If we resolved promises or data races, see if we have realized a data race.
2091         checkDataRaces();
2092
2093         return updated;
2094 }
2095
2096 /**
2097  * Performs various bookkeeping operations for the current ModelAction. For
2098  * instance, adds action to the per-object, per-thread action vector and to the
2099  * action trace list of all thread actions.
2100  *
2101  * @param act is the ModelAction to add.
2102  */
2103 void ModelChecker::add_action_to_lists(ModelAction *act)
2104 {
2105         int tid = id_to_int(act->get_tid());
2106         action_trace->push_back(act);
2107
2108         get_safe_ptr_action(obj_map, act->get_location())->push_back(act);
2109
2110         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2111         if (tid >= (int)vec->size())
2112                 vec->resize(priv->next_thread_id);
2113         (*vec)[tid].push_back(act);
2114
2115         if ((int)thrd_last_action->size() <= tid)
2116                 thrd_last_action->resize(get_num_threads());
2117         (*thrd_last_action)[tid] = act;
2118
2119         if (act->is_fence() && act->is_release()) {
2120                 if ((int)thrd_last_fence_release->size() <= tid)
2121                         thrd_last_fence_release->resize(get_num_threads());
2122                 (*thrd_last_fence_release)[tid] = act;
2123         }
2124
2125         if (act->is_wait()) {
2126                 void *mutex_loc=(void *) act->get_value();
2127                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2128
2129                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2130                 if (tid >= (int)vec->size())
2131                         vec->resize(priv->next_thread_id);
2132                 (*vec)[tid].push_back(act);
2133         }
2134 }
2135
2136 /**
2137  * @brief Get the last action performed by a particular Thread
2138  * @param tid The thread ID of the Thread in question
2139  * @return The last action in the thread
2140  */
2141 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2142 {
2143         int threadid = id_to_int(tid);
2144         if (threadid < (int)thrd_last_action->size())
2145                 return (*thrd_last_action)[id_to_int(tid)];
2146         else
2147                 return NULL;
2148 }
2149
2150 /**
2151  * @brief Get the last fence release performed by a particular Thread
2152  * @param tid The thread ID of the Thread in question
2153  * @return The last fence release in the thread, if one exists; NULL otherwise
2154  */
2155 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2156 {
2157         int threadid = id_to_int(tid);
2158         if (threadid < (int)thrd_last_fence_release->size())
2159                 return (*thrd_last_fence_release)[id_to_int(tid)];
2160         else
2161                 return NULL;
2162 }
2163
2164 /**
2165  * Gets the last memory_order_seq_cst write (in the total global sequence)
2166  * performed on a particular object (i.e., memory location), not including the
2167  * current action.
2168  * @param curr The current ModelAction; also denotes the object location to
2169  * check
2170  * @return The last seq_cst write
2171  */
2172 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2173 {
2174         void *location = curr->get_location();
2175         action_list_t *list = get_safe_ptr_action(obj_map, location);
2176         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2177         action_list_t::reverse_iterator rit;
2178         for (rit = list->rbegin(); rit != list->rend(); rit++)
2179                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2180                         return *rit;
2181         return NULL;
2182 }
2183
2184 /**
2185  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2186  * performed in a particular thread, prior to a particular fence.
2187  * @param tid The ID of the thread to check
2188  * @param before_fence The fence from which to begin the search; if NULL, then
2189  * search for the most recent fence in the thread.
2190  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2191  */
2192 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2193 {
2194         /* All fences should have NULL location */
2195         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2196         action_list_t::reverse_iterator rit = list->rbegin();
2197
2198         if (before_fence) {
2199                 for (; rit != list->rend(); rit++)
2200                         if (*rit == before_fence)
2201                                 break;
2202
2203                 ASSERT(*rit == before_fence);
2204                 rit++;
2205         }
2206
2207         for (; rit != list->rend(); rit++)
2208                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2209                         return *rit;
2210         return NULL;
2211 }
2212
2213 /**
2214  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2215  * location). This function identifies the mutex according to the current
2216  * action, which is presumed to perform on the same mutex.
2217  * @param curr The current ModelAction; also denotes the object location to
2218  * check
2219  * @return The last unlock operation
2220  */
2221 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2222 {
2223         void *location = curr->get_location();
2224         action_list_t *list = get_safe_ptr_action(obj_map, location);
2225         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2226         action_list_t::reverse_iterator rit;
2227         for (rit = list->rbegin(); rit != list->rend(); rit++)
2228                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2229                         return *rit;
2230         return NULL;
2231 }
2232
2233 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2234 {
2235         ModelAction *parent = get_last_action(tid);
2236         if (!parent)
2237                 parent = get_thread(tid)->get_creation();
2238         return parent;
2239 }
2240
2241 /**
2242  * Returns the clock vector for a given thread.
2243  * @param tid The thread whose clock vector we want
2244  * @return Desired clock vector
2245  */
2246 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2247 {
2248         return get_parent_action(tid)->get_cv();
2249 }
2250
2251 /**
2252  * Resolve a set of Promises with a current write. The set is provided in the
2253  * Node corresponding to @a write.
2254  * @param write The ModelAction that is fulfilling Promises
2255  * @return True if promises were resolved; false otherwise
2256  */
2257 bool ModelChecker::resolve_promises(ModelAction *write)
2258 {
2259         bool resolved = false;
2260         std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2261
2262         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2263                 Promise *promise = (*promises)[promise_index];
2264                 if (write->get_node()->get_promise(i)) {
2265                         ModelAction *read = promise->get_action();
2266                         if (read->is_rmw()) {
2267                                 mo_graph->addRMWEdge(write, read);
2268                         }
2269                         read_from(read, write);
2270                         //First fix up the modification order for actions that happened
2271                         //before the read
2272                         r_modification_order(read, write);
2273                         //Next fix up the modification order for actions that happened
2274                         //after the read.
2275                         post_r_modification_order(read, write);
2276                         //Make sure the promise's value matches the write's value
2277                         ASSERT(promise->get_value() == write->get_value());
2278                         delete(promise);
2279
2280                         promises->erase(promises->begin() + promise_index);
2281                         threads_to_check.push_back(read->get_tid());
2282
2283                         resolved = true;
2284                 } else
2285                         promise_index++;
2286         }
2287
2288         //Check whether reading these writes has made threads unable to
2289         //resolve promises
2290
2291         for(unsigned int i=0;i<threads_to_check.size();i++)
2292                 mo_check_promises(threads_to_check[i], write);
2293
2294         return resolved;
2295 }
2296
2297 /**
2298  * Compute the set of promises that could potentially be satisfied by this
2299  * action. Note that the set computation actually appears in the Node, not in
2300  * ModelChecker.
2301  * @param curr The ModelAction that may satisfy promises
2302  */
2303 void ModelChecker::compute_promises(ModelAction *curr)
2304 {
2305         for (unsigned int i = 0; i < promises->size(); i++) {
2306                 Promise *promise = (*promises)[i];
2307                 const ModelAction *act = promise->get_action();
2308                 if (!act->happens_before(curr) &&
2309                                 act->is_read() &&
2310                                 !act->could_synchronize_with(curr) &&
2311                                 !act->same_thread(curr) &&
2312                                 act->get_location() == curr->get_location() &&
2313                                 promise->get_value() == curr->get_value()) {
2314                         curr->get_node()->set_promise(i, act->is_rmw());
2315                 }
2316         }
2317 }
2318
2319 /** Checks promises in response to change in ClockVector Threads. */
2320 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2321 {
2322         for (unsigned int i = 0; i < promises->size(); i++) {
2323                 Promise *promise = (*promises)[i];
2324                 const ModelAction *act = promise->get_action();
2325                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2326                                 merge_cv->synchronized_since(act)) {
2327                         if (promise->increment_threads(tid)) {
2328                                 //Promise has failed
2329                                 priv->failed_promise = true;
2330                                 return;
2331                         }
2332                 }
2333         }
2334 }
2335
2336 void ModelChecker::check_promises_thread_disabled() {
2337         for (unsigned int i = 0; i < promises->size(); i++) {
2338                 Promise *promise = (*promises)[i];
2339                 if (promise->check_promise()) {
2340                         priv->failed_promise = true;
2341                         return;
2342                 }
2343         }
2344 }
2345
2346 /** Checks promises in response to addition to modification order for threads.
2347  * Definitions:
2348  * pthread is the thread that performed the read that created the promise
2349  *
2350  * pread is the read that created the promise
2351  *
2352  * pwrite is either the first write to same location as pread by
2353  * pthread that is sequenced after pread or the value read by the
2354  * first read to the same lcoation as pread by pthread that is
2355  * sequenced after pread..
2356  *
2357  *      1. If tid=pthread, then we check what other threads are reachable
2358  * through the mode order starting with pwrite.  Those threads cannot
2359  * perform a write that will resolve the promise due to modification
2360  * order constraints.
2361  *
2362  * 2. If the tid is not pthread, we check whether pwrite can reach the
2363  * action write through the modification order.  If so, that thread
2364  * cannot perform a future write that will resolve the promise due to
2365  * modificatin order constraints.
2366  *
2367  *      @parem tid The thread that either read from the model action
2368  *      write, or actually did the model action write.
2369  *
2370  *      @parem write The ModelAction representing the relevant write.
2371  */
2372
2373 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
2374         void * location = write->get_location();
2375         for (unsigned int i = 0; i < promises->size(); i++) {
2376                 Promise *promise = (*promises)[i];
2377                 const ModelAction *act = promise->get_action();
2378
2379                 //Is this promise on the same location?
2380                 if ( act->get_location() != location )
2381                         continue;
2382
2383                 //same thread as the promise
2384                 if ( act->get_tid()==tid ) {
2385
2386                         //do we have a pwrite for the promise, if not, set it
2387                         if (promise->get_write() == NULL ) {
2388                                 promise->set_write(write);
2389                                 //The pwrite cannot happen before the promise
2390                                 if (write->happens_before(act) && (write != act)) {
2391                                         priv->failed_promise = true;
2392                                         return;
2393                                 }
2394                         }
2395                         if (mo_graph->checkPromise(write, promise)) {
2396                                 priv->failed_promise = true;
2397                                 return;
2398                         }
2399                 }
2400
2401                 //Don't do any lookups twice for the same thread
2402                 if (promise->has_sync_thread(tid))
2403                         continue;
2404
2405                 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2406                         if (promise->increment_threads(tid)) {
2407                                 priv->failed_promise = true;
2408                                 return;
2409                         }
2410                 }
2411         }
2412 }
2413
2414 /**
2415  * Compute the set of writes that may break the current pending release
2416  * sequence. This information is extracted from previou release sequence
2417  * calculations.
2418  *
2419  * @param curr The current ModelAction. Must be a release sequence fixup
2420  * action.
2421  */
2422 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2423 {
2424         if (pending_rel_seqs->empty())
2425                 return;
2426
2427         struct release_seq *pending = pending_rel_seqs->back();
2428         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2429                 const ModelAction *write = pending->writes[i];
2430                 curr->get_node()->add_relseq_break(write);
2431         }
2432
2433         /* NULL means don't break the sequence; just synchronize */
2434         curr->get_node()->add_relseq_break(NULL);
2435 }
2436
2437 /**
2438  * Build up an initial set of all past writes that this 'read' action may read
2439  * from. This set is determined by the clock vector's "happens before"
2440  * relationship.
2441  * @param curr is the current ModelAction that we are exploring; it must be a
2442  * 'read' operation.
2443  */
2444 void ModelChecker::build_reads_from_past(ModelAction *curr)
2445 {
2446         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2447         unsigned int i;
2448         ASSERT(curr->is_read());
2449
2450         ModelAction *last_sc_write = NULL;
2451
2452         /* Track whether this object has been initialized */
2453         bool initialized = false;
2454
2455         if (curr->is_seqcst()) {
2456                 last_sc_write = get_last_seq_cst_write(curr);
2457                 /* We have to at least see the last sequentially consistent write,
2458                          so we are initialized. */
2459                 if (last_sc_write != NULL)
2460                         initialized = true;
2461         }
2462
2463         /* Iterate over all threads */
2464         for (i = 0; i < thrd_lists->size(); i++) {
2465                 /* Iterate over actions in thread, starting from most recent */
2466                 action_list_t *list = &(*thrd_lists)[i];
2467                 action_list_t::reverse_iterator rit;
2468                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2469                         ModelAction *act = *rit;
2470
2471                         /* Only consider 'write' actions */
2472                         if (!act->is_write() || act == curr)
2473                                 continue;
2474
2475                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2476                         bool allow_read = true;
2477
2478                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2479                                 allow_read = false;
2480                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2481                                 allow_read = false;
2482
2483                         if (allow_read) {
2484                                 DEBUG("Adding action to may_read_from:\n");
2485                                 if (DBG_ENABLED()) {
2486                                         act->print();
2487                                         curr->print();
2488                                 }
2489                                 curr->get_node()->add_read_from(act);
2490                         }
2491
2492                         /* Include at most one act per-thread that "happens before" curr */
2493                         if (act->happens_before(curr)) {
2494                                 initialized = true;
2495                                 break;
2496                         }
2497                 }
2498         }
2499
2500         if (!initialized)
2501                 assert_bug("May read from uninitialized atomic");
2502
2503         if (DBG_ENABLED() || !initialized) {
2504                 model_print("Reached read action:\n");
2505                 curr->print();
2506                 model_print("Printing may_read_from\n");
2507                 curr->get_node()->print_may_read_from();
2508                 model_print("End printing may_read_from\n");
2509         }
2510 }
2511
2512 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2513         while(true) {
2514                 Node *prevnode=write->get_node()->get_parent();
2515
2516                 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2517                 if (write->is_release()&&thread_sleep)
2518                         return true;
2519                 if (!write->is_rmw()) {
2520                         return false;
2521                 }
2522                 if (write->get_reads_from()==NULL)
2523                         return true;
2524                 write=write->get_reads_from();
2525         }
2526 }
2527
2528 static void print_list(action_list_t *list, int exec_num = -1)
2529 {
2530         action_list_t::iterator it;
2531
2532         model_print("---------------------------------------------------------------------\n");
2533         if (exec_num >= 0)
2534                 model_print("Execution %d:\n", exec_num);
2535
2536         unsigned int hash=0;
2537
2538         for (it = list->begin(); it != list->end(); it++) {
2539                 (*it)->print();
2540                 hash=hash^(hash<<3)^((*it)->hash());
2541         }
2542         model_print("HASH %u\n", hash);
2543         model_print("---------------------------------------------------------------------\n");
2544 }
2545
2546 #if SUPPORT_MOD_ORDER_DUMP
2547 void ModelChecker::dumpGraph(char *filename) {
2548         char buffer[200];
2549         sprintf(buffer, "%s.dot",filename);
2550         FILE *file=fopen(buffer, "w");
2551         fprintf(file, "digraph %s {\n",filename);
2552         mo_graph->dumpNodes(file);
2553         ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2554
2555         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2556                 ModelAction *action=*it;
2557                 if (action->is_read()) {
2558                         fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2559                         if (action->get_reads_from()!=NULL)
2560                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2561                 }
2562                 if (thread_array[action->get_tid()] != NULL) {
2563                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2564                 }
2565
2566                 thread_array[action->get_tid()]=action;
2567         }
2568         fprintf(file,"}\n");
2569         model_free(thread_array);
2570         fclose(file);
2571 }
2572 #endif
2573
2574 /** @brief Prints an execution trace summary. */
2575 void ModelChecker::print_summary() const
2576 {
2577 #if SUPPORT_MOD_ORDER_DUMP
2578         scheduler->print();
2579         char buffername[100];
2580         sprintf(buffername, "exec%04u", stats.num_total);
2581         mo_graph->dumpGraphToFile(buffername);
2582         sprintf(buffername, "graph%04u", stats.num_total);
2583         dumpGraph(buffername);
2584 #endif
2585
2586         if (!isfeasibleprefix())
2587                 model_print("INFEASIBLE EXECUTION!\n");
2588         print_list(action_trace, stats.num_total);
2589         model_print("\n");
2590 }
2591
2592 /**
2593  * Add a Thread to the system for the first time. Should only be called once
2594  * per thread.
2595  * @param t The Thread to add
2596  */
2597 void ModelChecker::add_thread(Thread *t)
2598 {
2599         thread_map->put(id_to_int(t->get_id()), t);
2600         scheduler->add_thread(t);
2601 }
2602
2603 /**
2604  * Removes a thread from the scheduler.
2605  * @param the thread to remove.
2606  */
2607 void ModelChecker::remove_thread(Thread *t)
2608 {
2609         scheduler->remove_thread(t);
2610 }
2611
2612 /**
2613  * @brief Get a Thread reference by its ID
2614  * @param tid The Thread's ID
2615  * @return A Thread reference
2616  */
2617 Thread * ModelChecker::get_thread(thread_id_t tid) const
2618 {
2619         return thread_map->get(id_to_int(tid));
2620 }
2621
2622 /**
2623  * @brief Get a reference to the Thread in which a ModelAction was executed
2624  * @param act The ModelAction
2625  * @return A Thread reference
2626  */
2627 Thread * ModelChecker::get_thread(ModelAction *act) const
2628 {
2629         return get_thread(act->get_tid());
2630 }
2631
2632 /**
2633  * @brief Check if a Thread is currently enabled
2634  * @param t The Thread to check
2635  * @return True if the Thread is currently enabled
2636  */
2637 bool ModelChecker::is_enabled(Thread *t) const
2638 {
2639         return scheduler->is_enabled(t);
2640 }
2641
2642 /**
2643  * @brief Check if a Thread is currently enabled
2644  * @param tid The ID of the Thread to check
2645  * @return True if the Thread is currently enabled
2646  */
2647 bool ModelChecker::is_enabled(thread_id_t tid) const
2648 {
2649         return scheduler->is_enabled(tid);
2650 }
2651
2652 /**
2653  * Switch from a user-context to the "master thread" context (a.k.a. system
2654  * context). This switch is made with the intention of exploring a particular
2655  * model-checking action (described by a ModelAction object). Must be called
2656  * from a user-thread context.
2657  *
2658  * @param act The current action that will be explored. May be NULL only if
2659  * trace is exiting via an assertion (see ModelChecker::set_assert and
2660  * ModelChecker::has_asserted).
2661  * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2662  */
2663 int ModelChecker::switch_to_master(ModelAction *act)
2664 {
2665         DBG();
2666         Thread *old = thread_current();
2667         set_current_action(act);
2668         old->set_state(THREAD_READY);
2669         return Thread::swap(old, &system_context);
2670 }
2671
2672 /**
2673  * Takes the next step in the execution, if possible.
2674  * @return Returns true (success) if a step was taken and false otherwise.
2675  */
2676 bool ModelChecker::take_step() {
2677         if (has_asserted())
2678                 return false;
2679
2680         Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2681         if (curr) {
2682                 if (curr->get_state() == THREAD_READY) {
2683                         ASSERT(priv->current_action);
2684
2685                         priv->nextThread = check_current_action(priv->current_action);
2686                         priv->current_action = NULL;
2687
2688                         if (curr->is_blocked() || curr->is_complete())
2689                                 scheduler->remove_thread(curr);
2690                 } else {
2691                         ASSERT(false);
2692                 }
2693         }
2694         Thread *next = scheduler->next_thread(priv->nextThread);
2695
2696         /* Infeasible -> don't take any more steps */
2697         if (is_infeasible())
2698                 return false;
2699         else if (isfeasibleprefix() && have_bug_reports()) {
2700                 set_assert();
2701                 return false;
2702         }
2703
2704         if (params.bound != 0) {
2705                 if (priv->used_sequence_numbers > params.bound) {
2706                         return false;
2707                 }
2708         }
2709
2710         DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2711                         next ? id_to_int(next->get_id()) : -1);
2712
2713         /*
2714          * Launch end-of-execution release sequence fixups only when there are:
2715          *
2716          * (1) no more user threads to run (or when execution replay chooses
2717          *     the 'model_thread')
2718          * (2) pending release sequences
2719          * (3) pending assertions (i.e., data races)
2720          * (4) no pending promises
2721          */
2722         if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2723                         is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2724                 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2725                                 pending_rel_seqs->size());
2726                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2727                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2728                                 model_thread);
2729                 set_current_action(fixup);
2730                 return true;
2731         }
2732
2733         /* next == NULL -> don't take any more steps */
2734         if (!next)
2735                 return false;
2736
2737         next->set_state(THREAD_RUNNING);
2738
2739         if (next->get_pending() != NULL) {
2740                 /* restart a pending action */
2741                 set_current_action(next->get_pending());
2742                 next->set_pending(NULL);
2743                 next->set_state(THREAD_READY);
2744                 return true;
2745         }
2746
2747         /* Return false only if swap fails with an error */
2748         return (Thread::swap(&system_context, next) == 0);
2749 }
2750
2751 /** Wrapper to run the user's main function, with appropriate arguments */
2752 void user_main_wrapper(void *)
2753 {
2754         user_main(model->params.argc, model->params.argv);
2755 }
2756
2757 /** @brief Run ModelChecker for the user program */
2758 void ModelChecker::run()
2759 {
2760         do {
2761                 thrd_t user_thread;
2762
2763                 /* Start user program */
2764                 add_thread(new Thread(&user_thread, &user_main_wrapper, NULL));
2765
2766                 /* Wait for all threads to complete */
2767                 while (take_step());
2768         } while (next_execution());
2769
2770         print_stats();
2771 }