model: add read-acquire/fence-release support
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4
5 #include "model.h"
6 #include "action.h"
7 #include "nodestack.h"
8 #include "schedule.h"
9 #include "snapshot-interface.h"
10 #include "common.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
13 #include "promise.h"
14 #include "datarace.h"
15 #include "threads-model.h"
16 #include "output.h"
17
18 #define INITIAL_THREAD_ID       0
19
20 ModelChecker *model;
21
22 struct bug_message {
23         bug_message(const char *str) {
24                 const char *fmt = "  [BUG] %s\n";
25                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
26                 sprintf(msg, fmt, str);
27         }
28         ~bug_message() { if (msg) snapshot_free(msg); }
29
30         char *msg;
31         void print() { model_print("%s", msg); }
32
33         SNAPSHOTALLOC
34 };
35
36 /**
37  * Structure for holding small ModelChecker members that should be snapshotted
38  */
39 struct model_snapshot_members {
40         model_snapshot_members() :
41                 current_action(NULL),
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 nextThread(NULL),
46                 next_backtrack(NULL),
47                 bugs(),
48                 stats(),
49                 failed_promise(false),
50                 too_many_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         ModelAction *current_action;
62         unsigned int next_thread_id;
63         modelclock_t used_sequence_numbers;
64         Thread *nextThread;
65         ModelAction *next_backtrack;
66         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
67         struct execution_stats stats;
68         bool failed_promise;
69         bool too_many_reads;
70         /** @brief Incorrectly-ordered synchronization was made */
71         bool bad_synchronization;
72         bool asserted;
73
74         SNAPSHOTALLOC
75 };
76
77 /** @brief Constructor */
78 ModelChecker::ModelChecker(struct model_params params) :
79         /* Initialize default scheduler */
80         params(params),
81         scheduler(new Scheduler()),
82         diverge(NULL),
83         earliest_diverge(NULL),
84         action_trace(new action_list_t()),
85         thread_map(new HashTable<int, Thread *, int>()),
86         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
89         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
90         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
91         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
92         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
93         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
94         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
95         node_stack(new NodeStack()),
96         priv(new struct model_snapshot_members()),
97         mo_graph(new CycleGraph())
98 {
99         /* Initialize a model-checker thread, for special ModelActions */
100         model_thread = new Thread(get_next_id());
101         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
102 }
103
104 /** @brief Destructor */
105 ModelChecker::~ModelChecker()
106 {
107         for (unsigned int i = 0; i < get_num_threads(); i++)
108                 delete thread_map->get(i);
109         delete thread_map;
110
111         delete obj_thrd_map;
112         delete obj_map;
113         delete lock_waiters_map;
114         delete condvar_waiters_map;
115         delete action_trace;
116
117         for (unsigned int i = 0; i < promises->size(); i++)
118                 delete (*promises)[i];
119         delete promises;
120
121         delete pending_rel_seqs;
122
123         delete thrd_last_action;
124         delete thrd_last_fence_release;
125         delete node_stack;
126         delete scheduler;
127         delete mo_graph;
128         delete priv;
129 }
130
131 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
132         action_list_t * tmp=hash->get(ptr);
133         if (tmp==NULL) {
134                 tmp=new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
141         std::vector<action_list_t> * tmp=hash->get(ptr);
142         if (tmp==NULL) {
143                 tmp=new std::vector<action_list_t>();
144                 hash->put(ptr, tmp);
145         }
146         return tmp;
147 }
148
149 /**
150  * Restores user program to initial state and resets all model-checker data
151  * structures.
152  */
153 void ModelChecker::reset_to_initial_state()
154 {
155         DEBUG("+++ Resetting to initial state +++\n");
156         node_stack->reset_execution();
157
158         /* Print all model-checker output before rollback */
159         fflush(model_out);
160
161         snapshotObject->backTrackBeforeStep(0);
162 }
163
164 /** @return a thread ID for a new Thread */
165 thread_id_t ModelChecker::get_next_id()
166 {
167         return priv->next_thread_id++;
168 }
169
170 /** @return the number of user threads created during this execution */
171 unsigned int ModelChecker::get_num_threads() const
172 {
173         return priv->next_thread_id;
174 }
175
176 /** @return The currently executing Thread. */
177 Thread * ModelChecker::get_current_thread() const
178 {
179         return scheduler->get_current_thread();
180 }
181
182 /** @return a sequence number for a new ModelAction */
183 modelclock_t ModelChecker::get_next_seq_num()
184 {
185         return ++priv->used_sequence_numbers;
186 }
187
188 Node * ModelChecker::get_curr_node() const
189 {
190         return node_stack->get_head();
191 }
192
193 /**
194  * @brief Choose the next thread to execute.
195  *
196  * This function chooses the next thread that should execute. It can force the
197  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
198  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
199  * The model-checker may have no preference regarding the next thread (i.e.,
200  * when exploring a new execution ordering), in which case this will return
201  * NULL.
202  * @param curr The current ModelAction. This action might guide the choice of
203  * next thread.
204  * @return The next thread to run. If the model-checker has no preference, NULL.
205  */
206 Thread * ModelChecker::get_next_thread(ModelAction *curr)
207 {
208         thread_id_t tid;
209
210         if (curr!=NULL) {
211                 /* Do not split atomic actions. */
212                 if (curr->is_rmwr())
213                         return thread_current();
214                 /* The THREAD_CREATE action points to the created Thread */
215                 else if (curr->get_type() == THREAD_CREATE)
216                         return (Thread *)curr->get_location();
217         }
218
219         /* Have we completed exploring the preselected path? */
220         if (diverge == NULL)
221                 return NULL;
222
223         /* Else, we are trying to replay an execution */
224         ModelAction *next = node_stack->get_next()->get_action();
225
226         if (next == diverge) {
227                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
228                         earliest_diverge=diverge;
229
230                 Node *nextnode = next->get_node();
231                 Node *prevnode = nextnode->get_parent();
232                 scheduler->update_sleep_set(prevnode);
233
234                 /* Reached divergence point */
235                 if (nextnode->increment_misc()) {
236                         /* The next node will try to satisfy a different misc_index values. */
237                         tid = next->get_tid();
238                         node_stack->pop_restofstack(2);
239                 } else if (nextnode->increment_promise()) {
240                         /* The next node will try to satisfy a different set of promises. */
241                         tid = next->get_tid();
242                         node_stack->pop_restofstack(2);
243                 } else if (nextnode->increment_read_from()) {
244                         /* The next node will read from a different value. */
245                         tid = next->get_tid();
246                         node_stack->pop_restofstack(2);
247                 } else if (nextnode->increment_future_value()) {
248                         /* The next node will try to read from a different future value. */
249                         tid = next->get_tid();
250                         node_stack->pop_restofstack(2);
251                 } else if (nextnode->increment_relseq_break()) {
252                         /* The next node will try to resolve a release sequence differently */
253                         tid = next->get_tid();
254                         node_stack->pop_restofstack(2);
255                 } else {
256                         /* Make a different thread execute for next step */
257                         scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
258                         tid = prevnode->get_next_backtrack();
259                         /* Make sure the backtracked thread isn't sleeping. */
260                         node_stack->pop_restofstack(1);
261                         if (diverge==earliest_diverge) {
262                                 earliest_diverge=prevnode->get_action();
263                         }
264                 }
265                 /* The correct sleep set is in the parent node. */
266                 execute_sleep_set();
267
268                 DEBUG("*** Divergence point ***\n");
269
270                 diverge = NULL;
271         } else {
272                 tid = next->get_tid();
273         }
274         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
275         ASSERT(tid != THREAD_ID_T_NONE);
276         return thread_map->get(id_to_int(tid));
277 }
278
279 /**
280  * We need to know what the next actions of all threads in the sleep
281  * set will be.  This method computes them and stores the actions at
282  * the corresponding thread object's pending action.
283  */
284
285 void ModelChecker::execute_sleep_set() {
286         for(unsigned int i=0;i<get_num_threads();i++) {
287                 thread_id_t tid=int_to_id(i);
288                 Thread *thr=get_thread(tid);
289                 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
290                                  thr->get_pending() == NULL ) {
291                         thr->set_state(THREAD_RUNNING);
292                         scheduler->next_thread(thr);
293                         Thread::swap(&system_context, thr);
294                         priv->current_action->set_sleep_flag();
295                         thr->set_pending(priv->current_action);
296                 }
297         }
298         priv->current_action = NULL;
299 }
300
301 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
302         for(unsigned int i=0;i<get_num_threads();i++) {
303                 thread_id_t tid=int_to_id(i);
304                 Thread *thr=get_thread(tid);
305                 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
306                         ModelAction *pending_act=thr->get_pending();
307                         if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
308                                 //Remove this thread from sleep set
309                                 scheduler->remove_sleep(thr);
310                         }
311                 }
312         }
313 }
314
315 /** @brief Alert the model-checker that an incorrectly-ordered
316  * synchronization was made */
317 void ModelChecker::set_bad_synchronization()
318 {
319         priv->bad_synchronization = true;
320 }
321
322 bool ModelChecker::has_asserted() const
323 {
324         return priv->asserted;
325 }
326
327 void ModelChecker::set_assert()
328 {
329         priv->asserted = true;
330 }
331
332 /**
333  * Check if we are in a deadlock. Should only be called at the end of an
334  * execution, although it should not give false positives in the middle of an
335  * execution (there should be some ENABLED thread).
336  *
337  * @return True if program is in a deadlock; false otherwise
338  */
339 bool ModelChecker::is_deadlocked() const
340 {
341         bool blocking_threads = false;
342         for (unsigned int i = 0; i < get_num_threads(); i++) {
343                 thread_id_t tid = int_to_id(i);
344                 if (is_enabled(tid))
345                         return false;
346                 Thread *t = get_thread(tid);
347                 if (!t->is_model_thread() && t->get_pending())
348                         blocking_threads = true;
349         }
350         return blocking_threads;
351 }
352
353 /**
354  * Check if this is a complete execution. That is, have all thread completed
355  * execution (rather than exiting because sleep sets have forced a redundant
356  * execution).
357  *
358  * @return True if the execution is complete.
359  */
360 bool ModelChecker::is_complete_execution() const
361 {
362         for (unsigned int i = 0; i < get_num_threads(); i++)
363                 if (is_enabled(int_to_id(i)))
364                         return false;
365         return true;
366 }
367
368 /**
369  * @brief Assert a bug in the executing program.
370  *
371  * Use this function to assert any sort of bug in the user program. If the
372  * current trace is feasible (actually, a prefix of some feasible execution),
373  * then this execution will be aborted, printing the appropriate message. If
374  * the current trace is not yet feasible, the error message will be stashed and
375  * printed if the execution ever becomes feasible.
376  *
377  * @param msg Descriptive message for the bug (do not include newline char)
378  * @return True if bug is immediately-feasible
379  */
380 bool ModelChecker::assert_bug(const char *msg)
381 {
382         priv->bugs.push_back(new bug_message(msg));
383
384         if (isfeasibleprefix()) {
385                 set_assert();
386                 return true;
387         }
388         return false;
389 }
390
391 /**
392  * @brief Assert a bug in the executing program, asserted by a user thread
393  * @see ModelChecker::assert_bug
394  * @param msg Descriptive message for the bug (do not include newline char)
395  */
396 void ModelChecker::assert_user_bug(const char *msg)
397 {
398         /* If feasible bug, bail out now */
399         if (assert_bug(msg))
400                 switch_to_master(NULL);
401 }
402
403 /** @return True, if any bugs have been reported for this execution */
404 bool ModelChecker::have_bug_reports() const
405 {
406         return priv->bugs.size() != 0;
407 }
408
409 /** @brief Print bug report listing for this execution (if any bugs exist) */
410 void ModelChecker::print_bugs() const
411 {
412         if (have_bug_reports()) {
413                 model_print("Bug report: %zu bug%s detected\n",
414                                 priv->bugs.size(),
415                                 priv->bugs.size() > 1 ? "s" : "");
416                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
417                         priv->bugs[i]->print();
418         }
419 }
420
421 /**
422  * @brief Record end-of-execution stats
423  *
424  * Must be run when exiting an execution. Records various stats.
425  * @see struct execution_stats
426  */
427 void ModelChecker::record_stats()
428 {
429         stats.num_total++;
430         if (!isfeasibleprefix())
431                 stats.num_infeasible++;
432         else if (have_bug_reports())
433                 stats.num_buggy_executions++;
434         else if (is_complete_execution())
435                 stats.num_complete++;
436         else
437                 stats.num_redundant++;
438 }
439
440 /** @brief Print execution stats */
441 void ModelChecker::print_stats() const
442 {
443         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
444         model_print("Number of redundant executions: %d\n", stats.num_redundant);
445         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
446         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
447         model_print("Total executions: %d\n", stats.num_total);
448         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
449 }
450
451 /**
452  * @brief End-of-exeuction print
453  * @param printbugs Should any existing bugs be printed?
454  */
455 void ModelChecker::print_execution(bool printbugs) const
456 {
457         print_program_output();
458
459         if (DBG_ENABLED() || params.verbose) {
460                 model_print("Earliest divergence point since last feasible execution:\n");
461                 if (earliest_diverge)
462                         earliest_diverge->print();
463                 else
464                         model_print("(Not set)\n");
465
466                 model_print("\n");
467                 print_stats();
468         }
469
470         /* Don't print invalid bugs */
471         if (printbugs)
472                 print_bugs();
473
474         model_print("\n");
475         print_summary();
476 }
477
478 /**
479  * Queries the model-checker for more executions to explore and, if one
480  * exists, resets the model-checker state to execute a new execution.
481  *
482  * @return If there are more executions to explore, return true. Otherwise,
483  * return false.
484  */
485 bool ModelChecker::next_execution()
486 {
487         DBG();
488         /* Is this execution a feasible execution that's worth bug-checking? */
489         bool complete = isfeasibleprefix() && (is_complete_execution() ||
490                         have_bug_reports());
491
492         /* End-of-execution bug checks */
493         if (complete) {
494                 if (is_deadlocked())
495                         assert_bug("Deadlock detected");
496
497                 checkDataRaces();
498         }
499
500         record_stats();
501
502         /* Output */
503         if (DBG_ENABLED() || params.verbose || have_bug_reports())
504                 print_execution(complete);
505         else
506                 clear_program_output();
507
508         if (complete)
509                 earliest_diverge = NULL;
510
511         if ((diverge = get_next_backtrack()) == NULL)
512                 return false;
513
514         if (DBG_ENABLED()) {
515                 model_print("Next execution will diverge at:\n");
516                 diverge->print();
517         }
518
519         reset_to_initial_state();
520         return true;
521 }
522
523 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
524 {
525         switch (act->get_type()) {
526         case ATOMIC_FENCE:
527         case ATOMIC_READ:
528         case ATOMIC_WRITE:
529         case ATOMIC_RMW: {
530                 /* Optimization: relaxed operations don't need backtracking */
531                 if (act->is_relaxed())
532                         return NULL;
533                 /* linear search: from most recent to oldest */
534                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
535                 action_list_t::reverse_iterator rit;
536                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
537                         ModelAction *prev = *rit;
538                         if (prev->could_synchronize_with(act))
539                                 return prev;
540                 }
541                 break;
542         }
543         case ATOMIC_LOCK:
544         case ATOMIC_TRYLOCK: {
545                 /* linear search: from most recent to oldest */
546                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
547                 action_list_t::reverse_iterator rit;
548                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
549                         ModelAction *prev = *rit;
550                         if (act->is_conflicting_lock(prev))
551                                 return prev;
552                 }
553                 break;
554         }
555         case ATOMIC_UNLOCK: {
556                 /* linear search: from most recent to oldest */
557                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
558                 action_list_t::reverse_iterator rit;
559                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
560                         ModelAction *prev = *rit;
561                         if (!act->same_thread(prev)&&prev->is_failed_trylock())
562                                 return prev;
563                 }
564                 break;
565         }
566         case ATOMIC_WAIT: {
567                 /* linear search: from most recent to oldest */
568                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
569                 action_list_t::reverse_iterator rit;
570                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
571                         ModelAction *prev = *rit;
572                         if (!act->same_thread(prev)&&prev->is_failed_trylock())
573                                 return prev;
574                         if (!act->same_thread(prev)&&prev->is_notify())
575                                 return prev;
576                 }
577                 break;
578         }
579
580         case ATOMIC_NOTIFY_ALL:
581         case ATOMIC_NOTIFY_ONE: {
582                 /* linear search: from most recent to oldest */
583                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
584                 action_list_t::reverse_iterator rit;
585                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
586                         ModelAction *prev = *rit;
587                         if (!act->same_thread(prev)&&prev->is_wait())
588                                 return prev;
589                 }
590                 break;
591         }
592         default:
593                 break;
594         }
595         return NULL;
596 }
597
598 /** This method finds backtracking points where we should try to
599  * reorder the parameter ModelAction against.
600  *
601  * @param the ModelAction to find backtracking points for.
602  */
603 void ModelChecker::set_backtracking(ModelAction *act)
604 {
605         Thread *t = get_thread(act);
606         ModelAction * prev = get_last_conflict(act);
607         if (prev == NULL)
608                 return;
609
610         Node * node = prev->get_node()->get_parent();
611
612         int low_tid, high_tid;
613         if (node->is_enabled(t)) {
614                 low_tid = id_to_int(act->get_tid());
615                 high_tid = low_tid+1;
616         } else {
617                 low_tid = 0;
618                 high_tid = get_num_threads();
619         }
620
621         for(int i = low_tid; i < high_tid; i++) {
622                 thread_id_t tid = int_to_id(i);
623
624                 /* Make sure this thread can be enabled here. */
625                 if (i >= node->get_num_threads())
626                         break;
627
628                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
629                 if (node->enabled_status(tid)!=THREAD_ENABLED)
630                         continue;
631
632                 /* Check if this has been explored already */
633                 if (node->has_been_explored(tid))
634                         continue;
635
636                 /* See if fairness allows */
637                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
638                         bool unfair=false;
639                         for(int t=0;t<node->get_num_threads();t++) {
640                                 thread_id_t tother=int_to_id(t);
641                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
642                                         unfair=true;
643                                         break;
644                                 }
645                         }
646                         if (unfair)
647                                 continue;
648                 }
649                 /* Cache the latest backtracking point */
650                 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
651                         priv->next_backtrack = prev;
652
653                 /* If this is a new backtracking point, mark the tree */
654                 if (!node->set_backtrack(tid))
655                         continue;
656                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
657                                         id_to_int(prev->get_tid()),
658                                         id_to_int(t->get_id()));
659                 if (DBG_ENABLED()) {
660                         prev->print();
661                         act->print();
662                 }
663         }
664 }
665
666 /**
667  * Returns last backtracking point. The model checker will explore a different
668  * path for this point in the next execution.
669  * @return The ModelAction at which the next execution should diverge.
670  */
671 ModelAction * ModelChecker::get_next_backtrack()
672 {
673         ModelAction *next = priv->next_backtrack;
674         priv->next_backtrack = NULL;
675         return next;
676 }
677
678 /**
679  * Processes a read or rmw model action.
680  * @param curr is the read model action to process.
681  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
682  * @return True if processing this read updates the mo_graph.
683  */
684 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
685 {
686         uint64_t value = VALUE_NONE;
687         bool updated = false;
688         while (true) {
689                 const ModelAction *reads_from = curr->get_node()->get_read_from();
690                 if (reads_from != NULL) {
691                         mo_graph->startChanges();
692
693                         value = reads_from->get_value();
694                         bool r_status = false;
695
696                         if (!second_part_of_rmw) {
697                                 check_recency(curr, reads_from);
698                                 r_status = r_modification_order(curr, reads_from);
699                         }
700
701
702                         if (!second_part_of_rmw&&is_infeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
703                                 mo_graph->rollbackChanges();
704                                 priv->too_many_reads = false;
705                                 continue;
706                         }
707
708                         read_from(curr, reads_from);
709                         mo_graph->commitChanges();
710                         mo_check_promises(curr->get_tid(), reads_from);
711
712                         updated |= r_status;
713                 } else if (!second_part_of_rmw) {
714                         /* Read from future value */
715                         value = curr->get_node()->get_future_value();
716                         modelclock_t expiration = curr->get_node()->get_future_value_expiration();
717                         read_from(curr, NULL);
718                         Promise *valuepromise = new Promise(curr, value, expiration);
719                         promises->push_back(valuepromise);
720                 }
721                 get_thread(curr)->set_return_value(value);
722                 return updated;
723         }
724 }
725
726 /**
727  * Processes a lock, trylock, or unlock model action.  @param curr is
728  * the read model action to process.
729  *
730  * The try lock operation checks whether the lock is taken.  If not,
731  * it falls to the normal lock operation case.  If so, it returns
732  * fail.
733  *
734  * The lock operation has already been checked that it is enabled, so
735  * it just grabs the lock and synchronizes with the previous unlock.
736  *
737  * The unlock operation has to re-enable all of the threads that are
738  * waiting on the lock.
739  *
740  * @return True if synchronization was updated; false otherwise
741  */
742 bool ModelChecker::process_mutex(ModelAction *curr) {
743         std::mutex *mutex=NULL;
744         struct std::mutex_state *state=NULL;
745
746         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
747                 mutex = (std::mutex *)curr->get_location();
748                 state = mutex->get_state();
749         } else if(curr->is_wait()) {
750                 mutex = (std::mutex *)curr->get_value();
751                 state = mutex->get_state();
752         }
753
754         switch (curr->get_type()) {
755         case ATOMIC_TRYLOCK: {
756                 bool success = !state->islocked;
757                 curr->set_try_lock(success);
758                 if (!success) {
759                         get_thread(curr)->set_return_value(0);
760                         break;
761                 }
762                 get_thread(curr)->set_return_value(1);
763         }
764                 //otherwise fall into the lock case
765         case ATOMIC_LOCK: {
766                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
767                         assert_bug("Lock access before initialization");
768                 state->islocked = true;
769                 ModelAction *unlock = get_last_unlock(curr);
770                 //synchronize with the previous unlock statement
771                 if (unlock != NULL) {
772                         curr->synchronize_with(unlock);
773                         return true;
774                 }
775                 break;
776         }
777         case ATOMIC_UNLOCK: {
778                 //unlock the lock
779                 state->islocked = false;
780                 //wake up the other threads
781                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
782                 //activate all the waiting threads
783                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
784                         scheduler->wake(get_thread(*rit));
785                 }
786                 waiters->clear();
787                 break;
788         }
789         case ATOMIC_WAIT: {
790                 //unlock the lock
791                 state->islocked = false;
792                 //wake up the other threads
793                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
794                 //activate all the waiting threads
795                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
796                         scheduler->wake(get_thread(*rit));
797                 }
798                 waiters->clear();
799                 //check whether we should go to sleep or not...simulate spurious failures
800                 if (curr->get_node()->get_misc()==0) {
801                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
802                         //disable us
803                         scheduler->sleep(get_current_thread());
804                 }
805                 break;
806         }
807         case ATOMIC_NOTIFY_ALL: {
808                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
809                 //activate all the waiting threads
810                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
811                         scheduler->wake(get_thread(*rit));
812                 }
813                 waiters->clear();
814                 break;
815         }
816         case ATOMIC_NOTIFY_ONE: {
817                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
818                 int wakeupthread=curr->get_node()->get_misc();
819                 action_list_t::iterator it = waiters->begin();
820                 advance(it, wakeupthread);
821                 scheduler->wake(get_thread(*it));
822                 waiters->erase(it);
823                 break;
824         }
825
826         default:
827                 ASSERT(0);
828         }
829         return false;
830 }
831
832 /**
833  * Process a write ModelAction
834  * @param curr The ModelAction to process
835  * @return True if the mo_graph was updated or promises were resolved
836  */
837 bool ModelChecker::process_write(ModelAction *curr)
838 {
839         bool updated_mod_order = w_modification_order(curr);
840         bool updated_promises = resolve_promises(curr);
841
842         if (promises->size() == 0) {
843                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
844                         struct PendingFutureValue pfv = (*futurevalues)[i];
845                         //Do more ambitious checks now that mo is more complete
846                         if (mo_may_allow(pfv.writer, pfv.act)&&
847                                         pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
848                                         (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
849                                 priv->next_backtrack = pfv.act;
850                 }
851                 futurevalues->resize(0);
852         }
853
854         mo_graph->commitChanges();
855         mo_check_promises(curr->get_tid(), curr);
856
857         get_thread(curr)->set_return_value(VALUE_NONE);
858         return updated_mod_order || updated_promises;
859 }
860
861 /**
862  * @brief Process the current action for thread-related activity
863  *
864  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
865  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
866  * synchronization, etc.  This function is a no-op for non-THREAD actions
867  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
868  *
869  * @param curr The current action
870  * @return True if synchronization was updated or a thread completed
871  */
872 bool ModelChecker::process_thread_action(ModelAction *curr)
873 {
874         bool updated = false;
875
876         switch (curr->get_type()) {
877         case THREAD_CREATE: {
878                 Thread *th = (Thread *)curr->get_location();
879                 th->set_creation(curr);
880                 break;
881         }
882         case THREAD_JOIN: {
883                 Thread *blocking = (Thread *)curr->get_location();
884                 ModelAction *act = get_last_action(blocking->get_id());
885                 curr->synchronize_with(act);
886                 updated = true; /* trigger rel-seq checks */
887                 break;
888         }
889         case THREAD_FINISH: {
890                 Thread *th = get_thread(curr);
891                 while (!th->wait_list_empty()) {
892                         ModelAction *act = th->pop_wait_list();
893                         scheduler->wake(get_thread(act));
894                 }
895                 th->complete();
896                 updated = true; /* trigger rel-seq checks */
897                 break;
898         }
899         case THREAD_START: {
900                 check_promises(curr->get_tid(), NULL, curr->get_cv());
901                 break;
902         }
903         default:
904                 break;
905         }
906
907         return updated;
908 }
909
910 /**
911  * @brief Process the current action for release sequence fixup activity
912  *
913  * Performs model-checker release sequence fixups for the current action,
914  * forcing a single pending release sequence to break (with a given, potential
915  * "loose" write) or to complete (i.e., synchronize). If a pending release
916  * sequence forms a complete release sequence, then we must perform the fixup
917  * synchronization, mo_graph additions, etc.
918  *
919  * @param curr The current action; must be a release sequence fixup action
920  * @param work_queue The work queue to which to add work items as they are
921  * generated
922  */
923 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
924 {
925         const ModelAction *write = curr->get_node()->get_relseq_break();
926         struct release_seq *sequence = pending_rel_seqs->back();
927         pending_rel_seqs->pop_back();
928         ASSERT(sequence);
929         ModelAction *acquire = sequence->acquire;
930         const ModelAction *rf = sequence->rf;
931         const ModelAction *release = sequence->release;
932         ASSERT(acquire);
933         ASSERT(release);
934         ASSERT(rf);
935         ASSERT(release->same_thread(rf));
936
937         if (write == NULL) {
938                 /**
939                  * @todo Forcing a synchronization requires that we set
940                  * modification order constraints. For instance, we can't allow
941                  * a fixup sequence in which two separate read-acquire
942                  * operations read from the same sequence, where the first one
943                  * synchronizes and the other doesn't. Essentially, we can't
944                  * allow any writes to insert themselves between 'release' and
945                  * 'rf'
946                  */
947
948                 /* Must synchronize */
949                 if (!acquire->synchronize_with(release)) {
950                         set_bad_synchronization();
951                         return;
952                 }
953                 /* Re-check all pending release sequences */
954                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
955                 /* Re-check act for mo_graph edges */
956                 work_queue->push_back(MOEdgeWorkEntry(acquire));
957
958                 /* propagate synchronization to later actions */
959                 action_list_t::reverse_iterator rit = action_trace->rbegin();
960                 for (; (*rit) != acquire; rit++) {
961                         ModelAction *propagate = *rit;
962                         if (acquire->happens_before(propagate)) {
963                                 propagate->synchronize_with(acquire);
964                                 /* Re-check 'propagate' for mo_graph edges */
965                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
966                         }
967                 }
968         } else {
969                 /* Break release sequence with new edges:
970                  *   release --mo--> write --mo--> rf */
971                 mo_graph->addEdge(release, write);
972                 mo_graph->addEdge(write, rf);
973         }
974
975         /* See if we have realized a data race */
976         checkDataRaces();
977 }
978
979 /**
980  * Initialize the current action by performing one or more of the following
981  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
982  * in the NodeStack, manipulating backtracking sets, allocating and
983  * initializing clock vectors, and computing the promises to fulfill.
984  *
985  * @param curr The current action, as passed from the user context; may be
986  * freed/invalidated after the execution of this function, with a different
987  * action "returned" its place (pass-by-reference)
988  * @return True if curr is a newly-explored action; false otherwise
989  */
990 bool ModelChecker::initialize_curr_action(ModelAction **curr)
991 {
992         ModelAction *newcurr;
993
994         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
995                 newcurr = process_rmw(*curr);
996                 delete *curr;
997
998                 if (newcurr->is_rmw())
999                         compute_promises(newcurr);
1000
1001                 *curr = newcurr;
1002                 return false;
1003         }
1004
1005         (*curr)->set_seq_number(get_next_seq_num());
1006
1007         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1008         if (newcurr) {
1009                 /* First restore type and order in case of RMW operation */
1010                 if ((*curr)->is_rmwr())
1011                         newcurr->copy_typeandorder(*curr);
1012
1013                 ASSERT((*curr)->get_location() == newcurr->get_location());
1014                 newcurr->copy_from_new(*curr);
1015
1016                 /* Discard duplicate ModelAction; use action from NodeStack */
1017                 delete *curr;
1018
1019                 /* Always compute new clock vector */
1020                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1021
1022                 *curr = newcurr;
1023                 return false; /* Action was explored previously */
1024         } else {
1025                 newcurr = *curr;
1026
1027                 /* Always compute new clock vector */
1028                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1029
1030                 /* Assign most recent release fence */
1031                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1032
1033                 /*
1034                  * Perform one-time actions when pushing new ModelAction onto
1035                  * NodeStack
1036                  */
1037                 if (newcurr->is_write())
1038                         compute_promises(newcurr);
1039                 else if (newcurr->is_relseq_fixup())
1040                         compute_relseq_breakwrites(newcurr);
1041                 else if (newcurr->is_wait())
1042                         newcurr->get_node()->set_misc_max(2);
1043                 else if (newcurr->is_notify_one()) {
1044                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1045                 }
1046                 return true; /* This was a new ModelAction */
1047         }
1048 }
1049
1050 /**
1051  * @brief Establish reads-from relation between two actions
1052  *
1053  * Perform basic operations involved with establishing a concrete rf relation,
1054  * including setting the ModelAction data and checking for release sequences.
1055  *
1056  * @param act The action that is reading (must be a read)
1057  * @param rf The action from which we are reading (must be a write)
1058  *
1059  * @return True if this read established synchronization
1060  */
1061 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1062 {
1063         act->set_read_from(rf);
1064         if (rf != NULL && act->is_acquire()) {
1065                 rel_heads_list_t release_heads;
1066                 get_release_seq_heads(act, &release_heads);
1067                 int num_heads = release_heads.size();
1068                 for (unsigned int i = 0; i < release_heads.size(); i++)
1069                         if (!act->synchronize_with(release_heads[i])) {
1070                                 set_bad_synchronization();
1071                                 num_heads--;
1072                         }
1073                 return num_heads > 0;
1074         }
1075         return false;
1076 }
1077
1078 /**
1079  * @brief Check whether a model action is enabled.
1080  *
1081  * Checks whether a lock or join operation would be successful (i.e., is the
1082  * lock already locked, or is the joined thread already complete). If not, put
1083  * the action in a waiter list.
1084  *
1085  * @param curr is the ModelAction to check whether it is enabled.
1086  * @return a bool that indicates whether the action is enabled.
1087  */
1088 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1089         if (curr->is_lock()) {
1090                 std::mutex * lock = (std::mutex *)curr->get_location();
1091                 struct std::mutex_state * state = lock->get_state();
1092                 if (state->islocked) {
1093                         //Stick the action in the appropriate waiting queue
1094                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1095                         return false;
1096                 }
1097         } else if (curr->get_type() == THREAD_JOIN) {
1098                 Thread *blocking = (Thread *)curr->get_location();
1099                 if (!blocking->is_complete()) {
1100                         blocking->push_wait_list(curr);
1101                         return false;
1102                 }
1103         }
1104
1105         return true;
1106 }
1107
1108 /**
1109  * Stores the ModelAction for the current thread action.  Call this
1110  * immediately before switching from user- to system-context to pass
1111  * data between them.
1112  * @param act The ModelAction created by the user-thread action
1113  */
1114 void ModelChecker::set_current_action(ModelAction *act) {
1115         priv->current_action = act;
1116 }
1117
1118 /**
1119  * This is the heart of the model checker routine. It performs model-checking
1120  * actions corresponding to a given "current action." Among other processes, it
1121  * calculates reads-from relationships, updates synchronization clock vectors,
1122  * forms a memory_order constraints graph, and handles replay/backtrack
1123  * execution when running permutations of previously-observed executions.
1124  *
1125  * @param curr The current action to process
1126  * @return The next Thread that must be executed. May be NULL if ModelChecker
1127  * makes no choice (e.g., according to replay execution, combining RMW actions,
1128  * etc.)
1129  */
1130 Thread * ModelChecker::check_current_action(ModelAction *curr)
1131 {
1132         ASSERT(curr);
1133         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1134
1135         if (!check_action_enabled(curr)) {
1136                 /* Make the execution look like we chose to run this action
1137                  * much later, when a lock/join can succeed */
1138                 get_current_thread()->set_pending(curr);
1139                 scheduler->sleep(get_current_thread());
1140                 return get_next_thread(NULL);
1141         }
1142
1143         bool newly_explored = initialize_curr_action(&curr);
1144
1145         wake_up_sleeping_actions(curr);
1146
1147         /* Add the action to lists before any other model-checking tasks */
1148         if (!second_part_of_rmw)
1149                 add_action_to_lists(curr);
1150
1151         /* Build may_read_from set for newly-created actions */
1152         if (newly_explored && curr->is_read())
1153                 build_reads_from_past(curr);
1154
1155         /* Initialize work_queue with the "current action" work */
1156         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1157         while (!work_queue.empty() && !has_asserted()) {
1158                 WorkQueueEntry work = work_queue.front();
1159                 work_queue.pop_front();
1160
1161                 switch (work.type) {
1162                 case WORK_CHECK_CURR_ACTION: {
1163                         ModelAction *act = work.action;
1164                         bool update = false; /* update this location's release seq's */
1165                         bool update_all = false; /* update all release seq's */
1166
1167                         if (process_thread_action(curr))
1168                                 update_all = true;
1169
1170                         if (act->is_read() && process_read(act, second_part_of_rmw))
1171                                 update = true;
1172
1173                         if (act->is_write() && process_write(act))
1174                                 update = true;
1175
1176                         if (act->is_mutex_op() && process_mutex(act))
1177                                 update_all = true;
1178
1179                         if (act->is_relseq_fixup())
1180                                 process_relseq_fixup(curr, &work_queue);
1181
1182                         if (update_all)
1183                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1184                         else if (update)
1185                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1186                         break;
1187                 }
1188                 case WORK_CHECK_RELEASE_SEQ:
1189                         resolve_release_sequences(work.location, &work_queue);
1190                         break;
1191                 case WORK_CHECK_MO_EDGES: {
1192                         /** @todo Complete verification of work_queue */
1193                         ModelAction *act = work.action;
1194                         bool updated = false;
1195
1196                         if (act->is_read()) {
1197                                 const ModelAction *rf = act->get_reads_from();
1198                                 if (rf != NULL && r_modification_order(act, rf))
1199                                         updated = true;
1200                         }
1201                         if (act->is_write()) {
1202                                 if (w_modification_order(act))
1203                                         updated = true;
1204                         }
1205                         mo_graph->commitChanges();
1206
1207                         if (updated)
1208                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1209                         break;
1210                 }
1211                 default:
1212                         ASSERT(false);
1213                         break;
1214                 }
1215         }
1216
1217         check_curr_backtracking(curr);
1218         set_backtracking(curr);
1219         return get_next_thread(curr);
1220 }
1221
1222 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
1223         Node *currnode = curr->get_node();
1224         Node *parnode = currnode->get_parent();
1225
1226         if ((!parnode->backtrack_empty() ||
1227                          !currnode->misc_empty() ||
1228                          !currnode->read_from_empty() ||
1229                          !currnode->future_value_empty() ||
1230                          !currnode->promise_empty() ||
1231                          !currnode->relseq_break_empty())
1232                         && (!priv->next_backtrack ||
1233                                         *curr > *priv->next_backtrack)) {
1234                 priv->next_backtrack = curr;
1235         }
1236 }
1237
1238 bool ModelChecker::promises_expired() const
1239 {
1240         for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1241                 Promise *promise = (*promises)[promise_index];
1242                 if (promise->get_expiration()<priv->used_sequence_numbers) {
1243                         return true;
1244                 }
1245         }
1246         return false;
1247 }
1248
1249 /**
1250  * This is the strongest feasibility check available.
1251  * @return whether the current trace (partial or complete) must be a prefix of
1252  * a feasible trace.
1253  */
1254 bool ModelChecker::isfeasibleprefix() const
1255 {
1256         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1257 }
1258
1259 /**
1260  * Returns whether the current completed trace is feasible, except for pending
1261  * release sequences.
1262  */
1263 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1264 {
1265         if (DBG_ENABLED() && promises->size() != 0)
1266                 DEBUG("Infeasible: unrevolved promises\n");
1267
1268         return !is_infeasible() && promises->size() == 0;
1269 }
1270
1271 /**
1272  * Check if the current partial trace is infeasible. Does not check any
1273  * end-of-execution flags, which might rule out the execution. Thus, this is
1274  * useful only for ruling an execution as infeasible.
1275  * @return whether the current partial trace is infeasible.
1276  */
1277 bool ModelChecker::is_infeasible() const
1278 {
1279         if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1280                 DEBUG("Infeasible: RMW violation\n");
1281
1282         return mo_graph->checkForRMWViolation() || is_infeasible_ignoreRMW();
1283 }
1284
1285 /**
1286  * Check If the current partial trace is infeasible, while ignoring
1287  * infeasibility related to 2 RMW's reading from the same store. It does not
1288  * check end-of-execution feasibility.
1289  * @see ModelChecker::is_infeasible
1290  * @return whether the current partial trace is infeasible, ignoring multiple
1291  * RMWs reading from the same store.
1292  * */
1293 bool ModelChecker::is_infeasible_ignoreRMW() const
1294 {
1295         if (DBG_ENABLED()) {
1296                 if (mo_graph->checkForCycles())
1297                         DEBUG("Infeasible: modification order cycles\n");
1298                 if (priv->failed_promise)
1299                         DEBUG("Infeasible: failed promise\n");
1300                 if (priv->too_many_reads)
1301                         DEBUG("Infeasible: too many reads\n");
1302                 if (priv->bad_synchronization)
1303                         DEBUG("Infeasible: bad synchronization ordering\n");
1304                 if (promises_expired())
1305                         DEBUG("Infeasible: promises expired\n");
1306         }
1307         return mo_graph->checkForCycles() || priv->failed_promise ||
1308                 priv->too_many_reads || priv->bad_synchronization ||
1309                 promises_expired();
1310 }
1311
1312 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1313 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1314         ModelAction *lastread = get_last_action(act->get_tid());
1315         lastread->process_rmw(act);
1316         if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1317                 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1318                 mo_graph->commitChanges();
1319         }
1320         return lastread;
1321 }
1322
1323 /**
1324  * Checks whether a thread has read from the same write for too many times
1325  * without seeing the effects of a later write.
1326  *
1327  * Basic idea:
1328  * 1) there must a different write that we could read from that would satisfy the modification order,
1329  * 2) we must have read from the same value in excess of maxreads times, and
1330  * 3) that other write must have been in the reads_from set for maxreads times.
1331  *
1332  * If so, we decide that the execution is no longer feasible.
1333  */
1334 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1335         if (params.maxreads != 0) {
1336
1337                 if (curr->get_node()->get_read_from_size() <= 1)
1338                         return;
1339                 //Must make sure that execution is currently feasible...  We could
1340                 //accidentally clear by rolling back
1341                 if (is_infeasible())
1342                         return;
1343                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1344                 int tid = id_to_int(curr->get_tid());
1345
1346                 /* Skip checks */
1347                 if ((int)thrd_lists->size() <= tid)
1348                         return;
1349                 action_list_t *list = &(*thrd_lists)[tid];
1350
1351                 action_list_t::reverse_iterator rit = list->rbegin();
1352                 /* Skip past curr */
1353                 for (; (*rit) != curr; rit++)
1354                         ;
1355                 /* go past curr now */
1356                 rit++;
1357
1358                 action_list_t::reverse_iterator ritcopy = rit;
1359                 //See if we have enough reads from the same value
1360                 int count = 0;
1361                 for (; count < params.maxreads; rit++,count++) {
1362                         if (rit==list->rend())
1363                                 return;
1364                         ModelAction *act = *rit;
1365                         if (!act->is_read())
1366                                 return;
1367
1368                         if (act->get_reads_from() != rf)
1369                                 return;
1370                         if (act->get_node()->get_read_from_size() <= 1)
1371                                 return;
1372                 }
1373                 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1374                         //Get write
1375                         const ModelAction * write = curr->get_node()->get_read_from_at(i);
1376
1377                         //Need a different write
1378                         if (write==rf)
1379                                 continue;
1380
1381                         /* Test to see whether this is a feasible write to read from*/
1382                         mo_graph->startChanges();
1383                         r_modification_order(curr, write);
1384                         bool feasiblereadfrom = !is_infeasible();
1385                         mo_graph->rollbackChanges();
1386
1387                         if (!feasiblereadfrom)
1388                                 continue;
1389                         rit = ritcopy;
1390
1391                         bool feasiblewrite = true;
1392                         //new we need to see if this write works for everyone
1393
1394                         for (int loop = count; loop>0; loop--,rit++) {
1395                                 ModelAction *act=*rit;
1396                                 bool foundvalue = false;
1397                                 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1398                                         if (act->get_node()->get_read_from_at(j)==write) {
1399                                                 foundvalue = true;
1400                                                 break;
1401                                         }
1402                                 }
1403                                 if (!foundvalue) {
1404                                         feasiblewrite = false;
1405                                         break;
1406                                 }
1407                         }
1408                         if (feasiblewrite) {
1409                                 priv->too_many_reads = true;
1410                                 return;
1411                         }
1412                 }
1413         }
1414 }
1415
1416 /**
1417  * Updates the mo_graph with the constraints imposed from the current
1418  * read.
1419  *
1420  * Basic idea is the following: Go through each other thread and find
1421  * the lastest action that happened before our read.  Two cases:
1422  *
1423  * (1) The action is a write => that write must either occur before
1424  * the write we read from or be the write we read from.
1425  *
1426  * (2) The action is a read => the write that that action read from
1427  * must occur before the write we read from or be the same write.
1428  *
1429  * @param curr The current action. Must be a read.
1430  * @param rf The action that curr reads from. Must be a write.
1431  * @return True if modification order edges were added; false otherwise
1432  */
1433 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1434 {
1435         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1436         unsigned int i;
1437         bool added = false;
1438         ASSERT(curr->is_read());
1439
1440         /* Last SC fence in the current thread */
1441         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1442
1443         /* Iterate over all threads */
1444         for (i = 0; i < thrd_lists->size(); i++) {
1445                 /* Last SC fence in thread i */
1446                 ModelAction *last_sc_fence_thread_local = NULL;
1447                 if (int_to_id((int)i) != curr->get_tid())
1448                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1449
1450                 /* Last SC fence in thread i, before last SC fence in current thread */
1451                 ModelAction *last_sc_fence_thread_before = NULL;
1452                 if (last_sc_fence_local)
1453                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1454
1455                 /* Iterate over actions in thread, starting from most recent */
1456                 action_list_t *list = &(*thrd_lists)[i];
1457                 action_list_t::reverse_iterator rit;
1458                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1459                         ModelAction *act = *rit;
1460
1461                         if (act->is_write() && act != rf && act != curr) {
1462                                 /* C++, Section 29.3 statement 5 */
1463                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1464                                                 *act < *last_sc_fence_thread_local) {
1465                                         mo_graph->addEdge(act, rf);
1466                                         added = true;
1467                                 }
1468                                 /* C++, Section 29.3 statement 4 */
1469                                 else if (act->is_seqcst() && last_sc_fence_local &&
1470                                                 *act < *last_sc_fence_local) {
1471                                         mo_graph->addEdge(act, rf);
1472                                         added = true;
1473                                 }
1474                                 /* C++, Section 29.3 statement 6 */
1475                                 else if (last_sc_fence_thread_before &&
1476                                                 *act < *last_sc_fence_thread_before) {
1477                                         mo_graph->addEdge(act, rf);
1478                                         added = true;
1479                                 }
1480                         }
1481
1482                         /*
1483                          * Include at most one act per-thread that "happens
1484                          * before" curr. Don't consider reflexively.
1485                          */
1486                         if (act->happens_before(curr) && act != curr) {
1487                                 if (act->is_write()) {
1488                                         if (rf != act) {
1489                                                 mo_graph->addEdge(act, rf);
1490                                                 added = true;
1491                                         }
1492                                 } else {
1493                                         const ModelAction *prevreadfrom = act->get_reads_from();
1494                                         //if the previous read is unresolved, keep going...
1495                                         if (prevreadfrom == NULL)
1496                                                 continue;
1497
1498                                         if (rf != prevreadfrom) {
1499                                                 mo_graph->addEdge(prevreadfrom, rf);
1500                                                 added = true;
1501                                         }
1502                                 }
1503                                 break;
1504                         }
1505                 }
1506         }
1507
1508         return added;
1509 }
1510
1511 /** This method fixes up the modification order when we resolve a
1512  *  promises.  The basic problem is that actions that occur after the
1513  *  read curr could not property add items to the modification order
1514  *  for our read.
1515  *
1516  *  So for each thread, we find the earliest item that happens after
1517  *  the read curr.  This is the item we have to fix up with additional
1518  *  constraints.  If that action is write, we add a MO edge between
1519  *  the Action rf and that action.  If the action is a read, we add a
1520  *  MO edge between the Action rf, and whatever the read accessed.
1521  *
1522  * @param curr is the read ModelAction that we are fixing up MO edges for.
1523  * @param rf is the write ModelAction that curr reads from.
1524  *
1525  */
1526 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1527 {
1528         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1529         unsigned int i;
1530         ASSERT(curr->is_read());
1531
1532         /* Iterate over all threads */
1533         for (i = 0; i < thrd_lists->size(); i++) {
1534                 /* Iterate over actions in thread, starting from most recent */
1535                 action_list_t *list = &(*thrd_lists)[i];
1536                 action_list_t::reverse_iterator rit;
1537                 ModelAction *lastact = NULL;
1538
1539                 /* Find last action that happens after curr that is either not curr or a rmw */
1540                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1541                         ModelAction *act = *rit;
1542                         if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1543                                 lastact = act;
1544                         } else
1545                                 break;
1546                 }
1547
1548                         /* Include at most one act per-thread that "happens before" curr */
1549                 if (lastact != NULL) {
1550                         if (lastact==curr) {
1551                                 //Case 1: The resolved read is a RMW, and we need to make sure
1552                                 //that the write portion of the RMW mod order after rf
1553
1554                                 mo_graph->addEdge(rf, lastact);
1555                         } else if (lastact->is_read()) {
1556                                 //Case 2: The resolved read is a normal read and the next
1557                                 //operation is a read, and we need to make sure the value read
1558                                 //is mod ordered after rf
1559
1560                                 const ModelAction *postreadfrom = lastact->get_reads_from();
1561                                 if (postreadfrom != NULL&&rf != postreadfrom)
1562                                         mo_graph->addEdge(rf, postreadfrom);
1563                         } else {
1564                                 //Case 3: The resolved read is a normal read and the next
1565                                 //operation is a write, and we need to make sure that the
1566                                 //write is mod ordered after rf
1567                                 if (lastact!=rf)
1568                                         mo_graph->addEdge(rf, lastact);
1569                         }
1570                         break;
1571                 }
1572         }
1573 }
1574
1575 /**
1576  * Updates the mo_graph with the constraints imposed from the current write.
1577  *
1578  * Basic idea is the following: Go through each other thread and find
1579  * the lastest action that happened before our write.  Two cases:
1580  *
1581  * (1) The action is a write => that write must occur before
1582  * the current write
1583  *
1584  * (2) The action is a read => the write that that action read from
1585  * must occur before the current write.
1586  *
1587  * This method also handles two other issues:
1588  *
1589  * (I) Sequential Consistency: Making sure that if the current write is
1590  * seq_cst, that it occurs after the previous seq_cst write.
1591  *
1592  * (II) Sending the write back to non-synchronizing reads.
1593  *
1594  * @param curr The current action. Must be a write.
1595  * @return True if modification order edges were added; false otherwise
1596  */
1597 bool ModelChecker::w_modification_order(ModelAction *curr)
1598 {
1599         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1600         unsigned int i;
1601         bool added = false;
1602         ASSERT(curr->is_write());
1603
1604         if (curr->is_seqcst()) {
1605                 /* We have to at least see the last sequentially consistent write,
1606                          so we are initialized. */
1607                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1608                 if (last_seq_cst != NULL) {
1609                         mo_graph->addEdge(last_seq_cst, curr);
1610                         added = true;
1611                 }
1612         }
1613
1614         /* Last SC fence in the current thread */
1615         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1616
1617         /* Iterate over all threads */
1618         for (i = 0; i < thrd_lists->size(); i++) {
1619                 /* Last SC fence in thread i, before last SC fence in current thread */
1620                 ModelAction *last_sc_fence_thread_before = NULL;
1621                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1622                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1623
1624                 /* Iterate over actions in thread, starting from most recent */
1625                 action_list_t *list = &(*thrd_lists)[i];
1626                 action_list_t::reverse_iterator rit;
1627                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1628                         ModelAction *act = *rit;
1629                         if (act == curr) {
1630                                 /*
1631                                  * 1) If RMW and it actually read from something, then we
1632                                  * already have all relevant edges, so just skip to next
1633                                  * thread.
1634                                  *
1635                                  * 2) If RMW and it didn't read from anything, we should
1636                                  * whatever edge we can get to speed up convergence.
1637                                  *
1638                                  * 3) If normal write, we need to look at earlier actions, so
1639                                  * continue processing list.
1640                                  */
1641                                 if (curr->is_rmw()) {
1642                                         if (curr->get_reads_from()!=NULL)
1643                                                 break;
1644                                         else
1645                                                 continue;
1646                                 } else
1647                                         continue;
1648                         }
1649
1650                         /* C++, Section 29.3 statement 7 */
1651                         if (last_sc_fence_thread_before && act->is_write() &&
1652                                         *act < *last_sc_fence_thread_before) {
1653                                 mo_graph->addEdge(act, curr);
1654                                 added = true;
1655                         }
1656
1657                         /*
1658                          * Include at most one act per-thread that "happens
1659                          * before" curr
1660                          */
1661                         if (act->happens_before(curr)) {
1662                                 /*
1663                                  * Note: if act is RMW, just add edge:
1664                                  *   act --mo--> curr
1665                                  * The following edge should be handled elsewhere:
1666                                  *   readfrom(act) --mo--> act
1667                                  */
1668                                 if (act->is_write())
1669                                         mo_graph->addEdge(act, curr);
1670                                 else if (act->is_read()) {
1671                                         //if previous read accessed a null, just keep going
1672                                         if (act->get_reads_from() == NULL)
1673                                                 continue;
1674                                         mo_graph->addEdge(act->get_reads_from(), curr);
1675                                 }
1676                                 added = true;
1677                                 break;
1678                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1679                                                      !act->same_thread(curr)) {
1680                                 /* We have an action that:
1681                                    (1) did not happen before us
1682                                    (2) is a read and we are a write
1683                                    (3) cannot synchronize with us
1684                                    (4) is in a different thread
1685                                    =>
1686                                    that read could potentially read from our write.  Note that
1687                                    these checks are overly conservative at this point, we'll
1688                                    do more checks before actually removing the
1689                                    pendingfuturevalue.
1690
1691                                  */
1692                                 if (thin_air_constraint_may_allow(curr, act)) {
1693                                         if (!is_infeasible() ||
1694                                                         (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && !is_infeasible_ignoreRMW())) {
1695                                                 struct PendingFutureValue pfv = {curr,act};
1696                                                 futurevalues->push_back(pfv);
1697                                         }
1698                                 }
1699                         }
1700                 }
1701         }
1702
1703         return added;
1704 }
1705
1706 /** Arbitrary reads from the future are not allowed.  Section 29.3
1707  * part 9 places some constraints.  This method checks one result of constraint
1708  * constraint.  Others require compiler support. */
1709 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1710         if (!writer->is_rmw())
1711                 return true;
1712
1713         if (!reader->is_rmw())
1714                 return true;
1715
1716         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1717                 if (search == reader)
1718                         return false;
1719                 if (search->get_tid() == reader->get_tid() &&
1720                                 search->happens_before(reader))
1721                         break;
1722         }
1723
1724         return true;
1725 }
1726
1727 /**
1728  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1729  * some constraints. This method checks one the following constraint (others
1730  * require compiler support):
1731  *
1732  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1733  */
1734 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1735 {
1736         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1737         unsigned int i;
1738         /* Iterate over all threads */
1739         for (i = 0; i < thrd_lists->size(); i++) {
1740                 const ModelAction *write_after_read = NULL;
1741
1742                 /* Iterate over actions in thread, starting from most recent */
1743                 action_list_t *list = &(*thrd_lists)[i];
1744                 action_list_t::reverse_iterator rit;
1745                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1746                         ModelAction *act = *rit;
1747
1748                         if (!reader->happens_before(act))
1749                                 break;
1750                         else if (act->is_write())
1751                                 write_after_read = act;
1752                         else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1753                                 write_after_read = act->get_reads_from();
1754                         }
1755                 }
1756
1757                 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1758                         return false;
1759         }
1760         return true;
1761 }
1762
1763 /**
1764  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1765  * The ModelAction under consideration is expected to be taking part in
1766  * release/acquire synchronization as an object of the "reads from" relation.
1767  * Note that this can only provide release sequence support for RMW chains
1768  * which do not read from the future, as those actions cannot be traced until
1769  * their "promise" is fulfilled. Similarly, we may not even establish the
1770  * presence of a release sequence with certainty, as some modification order
1771  * constraints may be decided further in the future. Thus, this function
1772  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1773  * and a boolean representing certainty.
1774  *
1775  * @param rf The action that might be part of a release sequence. Must be a
1776  * write.
1777  * @param release_heads A pass-by-reference style return parameter. After
1778  * execution of this function, release_heads will contain the heads of all the
1779  * relevant release sequences, if any exists with certainty
1780  * @param pending A pass-by-reference style return parameter which is only used
1781  * when returning false (i.e., uncertain). Returns most information regarding
1782  * an uncertain release sequence, including any write operations that might
1783  * break the sequence.
1784  * @return true, if the ModelChecker is certain that release_heads is complete;
1785  * false otherwise
1786  */
1787 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1788                 rel_heads_list_t *release_heads,
1789                 struct release_seq *pending) const
1790 {
1791         /* Only check for release sequences if there are no cycles */
1792         if (mo_graph->checkForCycles())
1793                 return false;
1794
1795         while (rf) {
1796                 ASSERT(rf->is_write());
1797
1798                 if (rf->is_release())
1799                         release_heads->push_back(rf);
1800                 else if (rf->get_last_fence_release())
1801                         release_heads->push_back(rf->get_last_fence_release());
1802                 if (!rf->is_rmw())
1803                         break; /* End of RMW chain */
1804
1805                 /** @todo Need to be smarter here...  In the linux lock
1806                  * example, this will run to the beginning of the program for
1807                  * every acquire. */
1808                 /** @todo The way to be smarter here is to keep going until 1
1809                  * thread has a release preceded by an acquire and you've seen
1810                  *       both. */
1811
1812                 /* acq_rel RMW is a sufficient stopping condition */
1813                 if (rf->is_acquire() && rf->is_release())
1814                         return true; /* complete */
1815
1816                 rf = rf->get_reads_from();
1817         };
1818         if (!rf) {
1819                 /* read from future: need to settle this later */
1820                 pending->rf = NULL;
1821                 return false; /* incomplete */
1822         }
1823
1824         if (rf->is_release())
1825                 return true; /* complete */
1826
1827         /* else relaxed write
1828          * - check for fence-release in the same thread (29.8, stmt. 3)
1829          * - check modification order for contiguous subsequence
1830          *   -> rf must be same thread as release */
1831
1832         const ModelAction *fence_release = rf->get_last_fence_release();
1833         /* Synchronize with a fence-release unconditionally; we don't need to
1834          * find any more "contiguous subsequence..." for it */
1835         if (fence_release)
1836                 release_heads->push_back(fence_release);
1837
1838         int tid = id_to_int(rf->get_tid());
1839         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1840         action_list_t *list = &(*thrd_lists)[tid];
1841         action_list_t::const_reverse_iterator rit;
1842
1843         /* Find rf in the thread list */
1844         rit = std::find(list->rbegin(), list->rend(), rf);
1845         ASSERT(rit != list->rend());
1846
1847         /* Find the last {write,fence}-release */
1848         for (; rit != list->rend(); rit++) {
1849                 if (fence_release && *(*rit) < *fence_release)
1850                         break;
1851                 if ((*rit)->is_release())
1852                         break;
1853         }
1854         if (rit == list->rend()) {
1855                 /* No write-release in this thread */
1856                 return true; /* complete */
1857         } else if (fence_release && *(*rit) < *fence_release) {
1858                 /* The fence-release is more recent (and so, "stronger") than
1859                  * the most recent write-release */
1860                 return true; /* complete */
1861         } /* else, need to establish contiguous release sequence */
1862         ModelAction *release = *rit;
1863
1864         ASSERT(rf->same_thread(release));
1865
1866         pending->writes.clear();
1867
1868         bool certain = true;
1869         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1870                 if (id_to_int(rf->get_tid()) == (int)i)
1871                         continue;
1872                 list = &(*thrd_lists)[i];
1873
1874                 /* Can we ensure no future writes from this thread may break
1875                  * the release seq? */
1876                 bool future_ordered = false;
1877
1878                 ModelAction *last = get_last_action(int_to_id(i));
1879                 Thread *th = get_thread(int_to_id(i));
1880                 if ((last && rf->happens_before(last)) ||
1881                                 !is_enabled(th) ||
1882                                 th->is_complete())
1883                         future_ordered = true;
1884
1885                 ASSERT(!th->is_model_thread() || future_ordered);
1886
1887                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1888                         const ModelAction *act = *rit;
1889                         /* Reach synchronization -> this thread is complete */
1890                         if (act->happens_before(release))
1891                                 break;
1892                         if (rf->happens_before(act)) {
1893                                 future_ordered = true;
1894                                 continue;
1895                         }
1896
1897                         /* Only non-RMW writes can break release sequences */
1898                         if (!act->is_write() || act->is_rmw())
1899                                 continue;
1900
1901                         /* Check modification order */
1902                         if (mo_graph->checkReachable(rf, act)) {
1903                                 /* rf --mo--> act */
1904                                 future_ordered = true;
1905                                 continue;
1906                         }
1907                         if (mo_graph->checkReachable(act, release))
1908                                 /* act --mo--> release */
1909                                 break;
1910                         if (mo_graph->checkReachable(release, act) &&
1911                                       mo_graph->checkReachable(act, rf)) {
1912                                 /* release --mo-> act --mo--> rf */
1913                                 return true; /* complete */
1914                         }
1915                         /* act may break release sequence */
1916                         pending->writes.push_back(act);
1917                         certain = false;
1918                 }
1919                 if (!future_ordered)
1920                         certain = false; /* This thread is uncertain */
1921         }
1922
1923         if (certain) {
1924                 release_heads->push_back(release);
1925                 pending->writes.clear();
1926         } else {
1927                 pending->release = release;
1928                 pending->rf = rf;
1929         }
1930         return certain;
1931 }
1932
1933 /**
1934  * An interface for getting the release sequence head(s) with which a
1935  * given ModelAction must synchronize. This function only returns a non-empty
1936  * result when it can locate a release sequence head with certainty. Otherwise,
1937  * it may mark the internal state of the ModelChecker so that it will handle
1938  * the release sequence at a later time, causing @a act to update its
1939  * synchronization at some later point in execution.
1940  * @param act The 'acquire' action that may read from a release sequence
1941  * @param release_heads A pass-by-reference return parameter. Will be filled
1942  * with the head(s) of the release sequence(s), if they exists with certainty.
1943  * @see ModelChecker::release_seq_heads
1944  */
1945 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1946 {
1947         const ModelAction *rf = act->get_reads_from();
1948         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1949         sequence->acquire = act;
1950
1951         if (!release_seq_heads(rf, release_heads, sequence)) {
1952                 /* add act to 'lazy checking' list */
1953                 pending_rel_seqs->push_back(sequence);
1954         } else {
1955                 snapshot_free(sequence);
1956         }
1957 }
1958
1959 /**
1960  * Attempt to resolve all stashed operations that might synchronize with a
1961  * release sequence for a given location. This implements the "lazy" portion of
1962  * determining whether or not a release sequence was contiguous, since not all
1963  * modification order information is present at the time an action occurs.
1964  *
1965  * @param location The location/object that should be checked for release
1966  * sequence resolutions. A NULL value means to check all locations.
1967  * @param work_queue The work queue to which to add work items as they are
1968  * generated
1969  * @return True if any updates occurred (new synchronization, new mo_graph
1970  * edges)
1971  */
1972 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1973 {
1974         bool updated = false;
1975         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1976         while (it != pending_rel_seqs->end()) {
1977                 struct release_seq *pending = *it;
1978                 ModelAction *act = pending->acquire;
1979
1980                 /* Only resolve sequences on the given location, if provided */
1981                 if (location && act->get_location() != location) {
1982                         it++;
1983                         continue;
1984                 }
1985
1986                 const ModelAction *rf = act->get_reads_from();
1987                 rel_heads_list_t release_heads;
1988                 bool complete;
1989                 complete = release_seq_heads(rf, &release_heads, pending);
1990                 for (unsigned int i = 0; i < release_heads.size(); i++) {
1991                         if (!act->has_synchronized_with(release_heads[i])) {
1992                                 if (act->synchronize_with(release_heads[i]))
1993                                         updated = true;
1994                                 else
1995                                         set_bad_synchronization();
1996                         }
1997                 }
1998
1999                 if (updated) {
2000                         /* Re-check all pending release sequences */
2001                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2002                         /* Re-check act for mo_graph edges */
2003                         work_queue->push_back(MOEdgeWorkEntry(act));
2004
2005                         /* propagate synchronization to later actions */
2006                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2007                         for (; (*rit) != act; rit++) {
2008                                 ModelAction *propagate = *rit;
2009                                 if (act->happens_before(propagate)) {
2010                                         propagate->synchronize_with(act);
2011                                         /* Re-check 'propagate' for mo_graph edges */
2012                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2013                                 }
2014                         }
2015                 }
2016                 if (complete) {
2017                         it = pending_rel_seqs->erase(it);
2018                         snapshot_free(pending);
2019                 } else {
2020                         it++;
2021                 }
2022         }
2023
2024         // If we resolved promises or data races, see if we have realized a data race.
2025         checkDataRaces();
2026
2027         return updated;
2028 }
2029
2030 /**
2031  * Performs various bookkeeping operations for the current ModelAction. For
2032  * instance, adds action to the per-object, per-thread action vector and to the
2033  * action trace list of all thread actions.
2034  *
2035  * @param act is the ModelAction to add.
2036  */
2037 void ModelChecker::add_action_to_lists(ModelAction *act)
2038 {
2039         int tid = id_to_int(act->get_tid());
2040         action_trace->push_back(act);
2041
2042         get_safe_ptr_action(obj_map, act->get_location())->push_back(act);
2043
2044         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2045         if (tid >= (int)vec->size())
2046                 vec->resize(priv->next_thread_id);
2047         (*vec)[tid].push_back(act);
2048
2049         if ((int)thrd_last_action->size() <= tid)
2050                 thrd_last_action->resize(get_num_threads());
2051         (*thrd_last_action)[tid] = act;
2052
2053         if (act->is_fence() && act->is_release()) {
2054                 if ((int)thrd_last_fence_release->size() <= tid)
2055                         thrd_last_fence_release->resize(get_num_threads());
2056                 (*thrd_last_fence_release)[tid] = act;
2057         }
2058
2059         if (act->is_wait()) {
2060                 void *mutex_loc=(void *) act->get_value();
2061                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2062
2063                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2064                 if (tid >= (int)vec->size())
2065                         vec->resize(priv->next_thread_id);
2066                 (*vec)[tid].push_back(act);
2067         }
2068 }
2069
2070 /**
2071  * @brief Get the last action performed by a particular Thread
2072  * @param tid The thread ID of the Thread in question
2073  * @return The last action in the thread
2074  */
2075 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2076 {
2077         int threadid = id_to_int(tid);
2078         if (threadid < (int)thrd_last_action->size())
2079                 return (*thrd_last_action)[id_to_int(tid)];
2080         else
2081                 return NULL;
2082 }
2083
2084 /**
2085  * @brief Get the last fence release performed by a particular Thread
2086  * @param tid The thread ID of the Thread in question
2087  * @return The last fence release in the thread, if one exists; NULL otherwise
2088  */
2089 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2090 {
2091         int threadid = id_to_int(tid);
2092         if (threadid < (int)thrd_last_fence_release->size())
2093                 return (*thrd_last_fence_release)[id_to_int(tid)];
2094         else
2095                 return NULL;
2096 }
2097
2098 /**
2099  * Gets the last memory_order_seq_cst write (in the total global sequence)
2100  * performed on a particular object (i.e., memory location), not including the
2101  * current action.
2102  * @param curr The current ModelAction; also denotes the object location to
2103  * check
2104  * @return The last seq_cst write
2105  */
2106 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2107 {
2108         void *location = curr->get_location();
2109         action_list_t *list = get_safe_ptr_action(obj_map, location);
2110         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2111         action_list_t::reverse_iterator rit;
2112         for (rit = list->rbegin(); rit != list->rend(); rit++)
2113                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2114                         return *rit;
2115         return NULL;
2116 }
2117
2118 /**
2119  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2120  * performed in a particular thread, prior to a particular fence.
2121  * @param tid The ID of the thread to check
2122  * @param before_fence The fence from which to begin the search; if NULL, then
2123  * search for the most recent fence in the thread.
2124  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2125  */
2126 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2127 {
2128         /* All fences should have NULL location */
2129         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2130         action_list_t::reverse_iterator rit = list->rbegin();
2131
2132         if (before_fence) {
2133                 for (; rit != list->rend(); rit++)
2134                         if (*rit == before_fence)
2135                                 break;
2136
2137                 ASSERT(*rit == before_fence);
2138                 rit++;
2139         }
2140
2141         for (; rit != list->rend(); rit++)
2142                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2143                         return *rit;
2144         return NULL;
2145 }
2146
2147 /**
2148  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2149  * location). This function identifies the mutex according to the current
2150  * action, which is presumed to perform on the same mutex.
2151  * @param curr The current ModelAction; also denotes the object location to
2152  * check
2153  * @return The last unlock operation
2154  */
2155 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2156 {
2157         void *location = curr->get_location();
2158         action_list_t *list = get_safe_ptr_action(obj_map, location);
2159         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2160         action_list_t::reverse_iterator rit;
2161         for (rit = list->rbegin(); rit != list->rend(); rit++)
2162                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2163                         return *rit;
2164         return NULL;
2165 }
2166
2167 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2168 {
2169         ModelAction *parent = get_last_action(tid);
2170         if (!parent)
2171                 parent = get_thread(tid)->get_creation();
2172         return parent;
2173 }
2174
2175 /**
2176  * Returns the clock vector for a given thread.
2177  * @param tid The thread whose clock vector we want
2178  * @return Desired clock vector
2179  */
2180 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2181 {
2182         return get_parent_action(tid)->get_cv();
2183 }
2184
2185 /**
2186  * Resolve a set of Promises with a current write. The set is provided in the
2187  * Node corresponding to @a write.
2188  * @param write The ModelAction that is fulfilling Promises
2189  * @return True if promises were resolved; false otherwise
2190  */
2191 bool ModelChecker::resolve_promises(ModelAction *write)
2192 {
2193         bool resolved = false;
2194         std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2195
2196         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2197                 Promise *promise = (*promises)[promise_index];
2198                 if (write->get_node()->get_promise(i)) {
2199                         ModelAction *read = promise->get_action();
2200                         if (read->is_rmw()) {
2201                                 mo_graph->addRMWEdge(write, read);
2202                         }
2203                         read_from(read, write);
2204                         //First fix up the modification order for actions that happened
2205                         //before the read
2206                         r_modification_order(read, write);
2207                         //Next fix up the modification order for actions that happened
2208                         //after the read.
2209                         post_r_modification_order(read, write);
2210                         //Make sure the promise's value matches the write's value
2211                         ASSERT(promise->get_value() == write->get_value());
2212                         delete(promise);
2213
2214                         promises->erase(promises->begin() + promise_index);
2215                         threads_to_check.push_back(read->get_tid());
2216
2217                         resolved = true;
2218                 } else
2219                         promise_index++;
2220         }
2221
2222         //Check whether reading these writes has made threads unable to
2223         //resolve promises
2224
2225         for(unsigned int i=0;i<threads_to_check.size();i++)
2226                 mo_check_promises(threads_to_check[i], write);
2227
2228         return resolved;
2229 }
2230
2231 /**
2232  * Compute the set of promises that could potentially be satisfied by this
2233  * action. Note that the set computation actually appears in the Node, not in
2234  * ModelChecker.
2235  * @param curr The ModelAction that may satisfy promises
2236  */
2237 void ModelChecker::compute_promises(ModelAction *curr)
2238 {
2239         for (unsigned int i = 0; i < promises->size(); i++) {
2240                 Promise *promise = (*promises)[i];
2241                 const ModelAction *act = promise->get_action();
2242                 if (!act->happens_before(curr) &&
2243                                 act->is_read() &&
2244                                 !act->could_synchronize_with(curr) &&
2245                                 !act->same_thread(curr) &&
2246                                 act->get_location() == curr->get_location() &&
2247                                 promise->get_value() == curr->get_value()) {
2248                         curr->get_node()->set_promise(i, act->is_rmw());
2249                 }
2250         }
2251 }
2252
2253 /** Checks promises in response to change in ClockVector Threads. */
2254 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2255 {
2256         for (unsigned int i = 0; i < promises->size(); i++) {
2257                 Promise *promise = (*promises)[i];
2258                 const ModelAction *act = promise->get_action();
2259                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2260                                 merge_cv->synchronized_since(act)) {
2261                         if (promise->increment_threads(tid)) {
2262                                 //Promise has failed
2263                                 priv->failed_promise = true;
2264                                 return;
2265                         }
2266                 }
2267         }
2268 }
2269
2270 void ModelChecker::check_promises_thread_disabled() {
2271         for (unsigned int i = 0; i < promises->size(); i++) {
2272                 Promise *promise = (*promises)[i];
2273                 if (promise->check_promise()) {
2274                         priv->failed_promise = true;
2275                         return;
2276                 }
2277         }
2278 }
2279
2280 /** Checks promises in response to addition to modification order for threads.
2281  * Definitions:
2282  * pthread is the thread that performed the read that created the promise
2283  *
2284  * pread is the read that created the promise
2285  *
2286  * pwrite is either the first write to same location as pread by
2287  * pthread that is sequenced after pread or the value read by the
2288  * first read to the same lcoation as pread by pthread that is
2289  * sequenced after pread..
2290  *
2291  *      1. If tid=pthread, then we check what other threads are reachable
2292  * through the mode order starting with pwrite.  Those threads cannot
2293  * perform a write that will resolve the promise due to modification
2294  * order constraints.
2295  *
2296  * 2. If the tid is not pthread, we check whether pwrite can reach the
2297  * action write through the modification order.  If so, that thread
2298  * cannot perform a future write that will resolve the promise due to
2299  * modificatin order constraints.
2300  *
2301  *      @parem tid The thread that either read from the model action
2302  *      write, or actually did the model action write.
2303  *
2304  *      @parem write The ModelAction representing the relevant write.
2305  */
2306
2307 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
2308         void * location = write->get_location();
2309         for (unsigned int i = 0; i < promises->size(); i++) {
2310                 Promise *promise = (*promises)[i];
2311                 const ModelAction *act = promise->get_action();
2312
2313                 //Is this promise on the same location?
2314                 if ( act->get_location() != location )
2315                         continue;
2316
2317                 //same thread as the promise
2318                 if ( act->get_tid()==tid ) {
2319
2320                         //do we have a pwrite for the promise, if not, set it
2321                         if (promise->get_write() == NULL ) {
2322                                 promise->set_write(write);
2323                                 //The pwrite cannot happen before the promise
2324                                 if (write->happens_before(act) && (write != act)) {
2325                                         priv->failed_promise = true;
2326                                         return;
2327                                 }
2328                         }
2329                         if (mo_graph->checkPromise(write, promise)) {
2330                                 priv->failed_promise = true;
2331                                 return;
2332                         }
2333                 }
2334
2335                 //Don't do any lookups twice for the same thread
2336                 if (promise->has_sync_thread(tid))
2337                         continue;
2338
2339                 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2340                         if (promise->increment_threads(tid)) {
2341                                 priv->failed_promise = true;
2342                                 return;
2343                         }
2344                 }
2345         }
2346 }
2347
2348 /**
2349  * Compute the set of writes that may break the current pending release
2350  * sequence. This information is extracted from previou release sequence
2351  * calculations.
2352  *
2353  * @param curr The current ModelAction. Must be a release sequence fixup
2354  * action.
2355  */
2356 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2357 {
2358         if (pending_rel_seqs->empty())
2359                 return;
2360
2361         struct release_seq *pending = pending_rel_seqs->back();
2362         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2363                 const ModelAction *write = pending->writes[i];
2364                 curr->get_node()->add_relseq_break(write);
2365         }
2366
2367         /* NULL means don't break the sequence; just synchronize */
2368         curr->get_node()->add_relseq_break(NULL);
2369 }
2370
2371 /**
2372  * Build up an initial set of all past writes that this 'read' action may read
2373  * from. This set is determined by the clock vector's "happens before"
2374  * relationship.
2375  * @param curr is the current ModelAction that we are exploring; it must be a
2376  * 'read' operation.
2377  */
2378 void ModelChecker::build_reads_from_past(ModelAction *curr)
2379 {
2380         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2381         unsigned int i;
2382         ASSERT(curr->is_read());
2383
2384         ModelAction *last_sc_write = NULL;
2385
2386         /* Track whether this object has been initialized */
2387         bool initialized = false;
2388
2389         if (curr->is_seqcst()) {
2390                 last_sc_write = get_last_seq_cst_write(curr);
2391                 /* We have to at least see the last sequentially consistent write,
2392                          so we are initialized. */
2393                 if (last_sc_write != NULL)
2394                         initialized = true;
2395         }
2396
2397         /* Iterate over all threads */
2398         for (i = 0; i < thrd_lists->size(); i++) {
2399                 /* Iterate over actions in thread, starting from most recent */
2400                 action_list_t *list = &(*thrd_lists)[i];
2401                 action_list_t::reverse_iterator rit;
2402                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2403                         ModelAction *act = *rit;
2404
2405                         /* Only consider 'write' actions */
2406                         if (!act->is_write() || act == curr)
2407                                 continue;
2408
2409                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2410                         bool allow_read = true;
2411
2412                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2413                                 allow_read = false;
2414                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2415                                 allow_read = false;
2416
2417                         if (allow_read) {
2418                                 DEBUG("Adding action to may_read_from:\n");
2419                                 if (DBG_ENABLED()) {
2420                                         act->print();
2421                                         curr->print();
2422                                 }
2423                                 curr->get_node()->add_read_from(act);
2424                         }
2425
2426                         /* Include at most one act per-thread that "happens before" curr */
2427                         if (act->happens_before(curr)) {
2428                                 initialized = true;
2429                                 break;
2430                         }
2431                 }
2432         }
2433
2434         if (!initialized)
2435                 assert_bug("May read from uninitialized atomic");
2436
2437         if (DBG_ENABLED() || !initialized) {
2438                 model_print("Reached read action:\n");
2439                 curr->print();
2440                 model_print("Printing may_read_from\n");
2441                 curr->get_node()->print_may_read_from();
2442                 model_print("End printing may_read_from\n");
2443         }
2444 }
2445
2446 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2447         while(true) {
2448                 Node *prevnode=write->get_node()->get_parent();
2449
2450                 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2451                 if (write->is_release()&&thread_sleep)
2452                         return true;
2453                 if (!write->is_rmw()) {
2454                         return false;
2455                 }
2456                 if (write->get_reads_from()==NULL)
2457                         return true;
2458                 write=write->get_reads_from();
2459         }
2460 }
2461
2462 static void print_list(action_list_t *list, int exec_num = -1)
2463 {
2464         action_list_t::iterator it;
2465
2466         model_print("---------------------------------------------------------------------\n");
2467         if (exec_num >= 0)
2468                 model_print("Execution %d:\n", exec_num);
2469
2470         unsigned int hash=0;
2471
2472         for (it = list->begin(); it != list->end(); it++) {
2473                 (*it)->print();
2474                 hash=hash^(hash<<3)^((*it)->hash());
2475         }
2476         model_print("HASH %u\n", hash);
2477         model_print("---------------------------------------------------------------------\n");
2478 }
2479
2480 #if SUPPORT_MOD_ORDER_DUMP
2481 void ModelChecker::dumpGraph(char *filename) {
2482         char buffer[200];
2483         sprintf(buffer, "%s.dot",filename);
2484         FILE *file=fopen(buffer, "w");
2485         fprintf(file, "digraph %s {\n",filename);
2486         mo_graph->dumpNodes(file);
2487         ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2488
2489         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2490                 ModelAction *action=*it;
2491                 if (action->is_read()) {
2492                         fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2493                         if (action->get_reads_from()!=NULL)
2494                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2495                 }
2496                 if (thread_array[action->get_tid()] != NULL) {
2497                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2498                 }
2499
2500                 thread_array[action->get_tid()]=action;
2501         }
2502         fprintf(file,"}\n");
2503         model_free(thread_array);
2504         fclose(file);
2505 }
2506 #endif
2507
2508 /** @brief Prints an execution trace summary. */
2509 void ModelChecker::print_summary() const
2510 {
2511 #if SUPPORT_MOD_ORDER_DUMP
2512         scheduler->print();
2513         char buffername[100];
2514         sprintf(buffername, "exec%04u", stats.num_total);
2515         mo_graph->dumpGraphToFile(buffername);
2516         sprintf(buffername, "graph%04u", stats.num_total);
2517         dumpGraph(buffername);
2518 #endif
2519
2520         if (!isfeasibleprefix())
2521                 model_print("INFEASIBLE EXECUTION!\n");
2522         print_list(action_trace, stats.num_total);
2523         model_print("\n");
2524 }
2525
2526 /**
2527  * Add a Thread to the system for the first time. Should only be called once
2528  * per thread.
2529  * @param t The Thread to add
2530  */
2531 void ModelChecker::add_thread(Thread *t)
2532 {
2533         thread_map->put(id_to_int(t->get_id()), t);
2534         scheduler->add_thread(t);
2535 }
2536
2537 /**
2538  * Removes a thread from the scheduler.
2539  * @param the thread to remove.
2540  */
2541 void ModelChecker::remove_thread(Thread *t)
2542 {
2543         scheduler->remove_thread(t);
2544 }
2545
2546 /**
2547  * @brief Get a Thread reference by its ID
2548  * @param tid The Thread's ID
2549  * @return A Thread reference
2550  */
2551 Thread * ModelChecker::get_thread(thread_id_t tid) const
2552 {
2553         return thread_map->get(id_to_int(tid));
2554 }
2555
2556 /**
2557  * @brief Get a reference to the Thread in which a ModelAction was executed
2558  * @param act The ModelAction
2559  * @return A Thread reference
2560  */
2561 Thread * ModelChecker::get_thread(ModelAction *act) const
2562 {
2563         return get_thread(act->get_tid());
2564 }
2565
2566 /**
2567  * @brief Check if a Thread is currently enabled
2568  * @param t The Thread to check
2569  * @return True if the Thread is currently enabled
2570  */
2571 bool ModelChecker::is_enabled(Thread *t) const
2572 {
2573         return scheduler->is_enabled(t);
2574 }
2575
2576 /**
2577  * @brief Check if a Thread is currently enabled
2578  * @param tid The ID of the Thread to check
2579  * @return True if the Thread is currently enabled
2580  */
2581 bool ModelChecker::is_enabled(thread_id_t tid) const
2582 {
2583         return scheduler->is_enabled(tid);
2584 }
2585
2586 /**
2587  * Switch from a user-context to the "master thread" context (a.k.a. system
2588  * context). This switch is made with the intention of exploring a particular
2589  * model-checking action (described by a ModelAction object). Must be called
2590  * from a user-thread context.
2591  *
2592  * @param act The current action that will be explored. May be NULL only if
2593  * trace is exiting via an assertion (see ModelChecker::set_assert and
2594  * ModelChecker::has_asserted).
2595  * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2596  */
2597 int ModelChecker::switch_to_master(ModelAction *act)
2598 {
2599         DBG();
2600         Thread *old = thread_current();
2601         set_current_action(act);
2602         old->set_state(THREAD_READY);
2603         return Thread::swap(old, &system_context);
2604 }
2605
2606 /**
2607  * Takes the next step in the execution, if possible.
2608  * @return Returns true (success) if a step was taken and false otherwise.
2609  */
2610 bool ModelChecker::take_step() {
2611         if (has_asserted())
2612                 return false;
2613
2614         Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2615         if (curr) {
2616                 if (curr->get_state() == THREAD_READY) {
2617                         ASSERT(priv->current_action);
2618
2619                         priv->nextThread = check_current_action(priv->current_action);
2620                         priv->current_action = NULL;
2621
2622                         if (curr->is_blocked() || curr->is_complete())
2623                                 scheduler->remove_thread(curr);
2624                 } else {
2625                         ASSERT(false);
2626                 }
2627         }
2628         Thread *next = scheduler->next_thread(priv->nextThread);
2629
2630         /* Infeasible -> don't take any more steps */
2631         if (is_infeasible())
2632                 return false;
2633         else if (isfeasibleprefix() && have_bug_reports()) {
2634                 set_assert();
2635                 return false;
2636         }
2637
2638         if (params.bound != 0) {
2639                 if (priv->used_sequence_numbers > params.bound) {
2640                         return false;
2641                 }
2642         }
2643
2644         DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2645                         next ? id_to_int(next->get_id()) : -1);
2646
2647         /*
2648          * Launch end-of-execution release sequence fixups only when there are:
2649          *
2650          * (1) no more user threads to run (or when execution replay chooses
2651          *     the 'model_thread')
2652          * (2) pending release sequences
2653          * (3) pending assertions (i.e., data races)
2654          * (4) no pending promises
2655          */
2656         if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2657                         is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2658                 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2659                                 pending_rel_seqs->size());
2660                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2661                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2662                                 model_thread);
2663                 set_current_action(fixup);
2664                 return true;
2665         }
2666
2667         /* next == NULL -> don't take any more steps */
2668         if (!next)
2669                 return false;
2670
2671         next->set_state(THREAD_RUNNING);
2672
2673         if (next->get_pending() != NULL) {
2674                 /* restart a pending action */
2675                 set_current_action(next->get_pending());
2676                 next->set_pending(NULL);
2677                 next->set_state(THREAD_READY);
2678                 return true;
2679         }
2680
2681         /* Return false only if swap fails with an error */
2682         return (Thread::swap(&system_context, next) == 0);
2683 }
2684
2685 /** Wrapper to run the user's main function, with appropriate arguments */
2686 void user_main_wrapper(void *)
2687 {
2688         user_main(model->params.argc, model->params.argv);
2689 }
2690
2691 /** @brief Run ModelChecker for the user program */
2692 void ModelChecker::run()
2693 {
2694         do {
2695                 thrd_t user_thread;
2696
2697                 /* Start user program */
2698                 add_thread(new Thread(&user_thread, &user_main_wrapper, NULL));
2699
2700                 /* Wait for all threads to complete */
2701                 while (take_step());
2702         } while (next_execution());
2703
2704         print_stats();
2705 }