990f03c5d9ef38d856c9cec5a46c6e7ad94b6ecd
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_future_value()) {
267                         /* The next node will try to read from a different future value. */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else if (nextnode->increment_relseq_break()) {
271                         /* The next node will try to resolve a release sequence differently */
272                         tid = next->get_tid();
273                         node_stack->pop_restofstack(2);
274                 } else {
275                         ASSERT(prevnode);
276                         /* Make a different thread execute for next step */
277                         scheduler->add_sleep(get_thread(next->get_tid()));
278                         tid = prevnode->get_next_backtrack();
279                         /* Make sure the backtracked thread isn't sleeping. */
280                         node_stack->pop_restofstack(1);
281                         if (diverge == earliest_diverge) {
282                                 earliest_diverge = prevnode->get_action();
283                         }
284                 }
285                 /* The correct sleep set is in the parent node. */
286                 execute_sleep_set();
287
288                 DEBUG("*** Divergence point ***\n");
289
290                 diverge = NULL;
291         } else {
292                 tid = next->get_tid();
293         }
294         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
295         ASSERT(tid != THREAD_ID_T_NONE);
296         return thread_map->get(id_to_int(tid));
297 }
298
299 /**
300  * We need to know what the next actions of all threads in the sleep
301  * set will be.  This method computes them and stores the actions at
302  * the corresponding thread object's pending action.
303  */
304
305 void ModelChecker::execute_sleep_set()
306 {
307         for (unsigned int i = 0; i < get_num_threads(); i++) {
308                 thread_id_t tid = int_to_id(i);
309                 Thread *thr = get_thread(tid);
310                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
311                         thr->get_pending()->set_sleep_flag();
312                 }
313         }
314 }
315
316 /**
317  * @brief Should the current action wake up a given thread?
318  *
319  * @param curr The current action
320  * @param thread The thread that we might wake up
321  * @return True, if we should wake up the sleeping thread; false otherwise
322  */
323 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
324 {
325         const ModelAction *asleep = thread->get_pending();
326         /* Don't allow partial RMW to wake anyone up */
327         if (curr->is_rmwr())
328                 return false;
329         /* Synchronizing actions may have been backtracked */
330         if (asleep->could_synchronize_with(curr))
331                 return true;
332         /* All acquire/release fences and fence-acquire/store-release */
333         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
334                 return true;
335         /* Fence-release + store can awake load-acquire on the same location */
336         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
337                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
338                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
339                         return true;
340         }
341         return false;
342 }
343
344 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
345 {
346         for (unsigned int i = 0; i < get_num_threads(); i++) {
347                 Thread *thr = get_thread(int_to_id(i));
348                 if (scheduler->is_sleep_set(thr)) {
349                         if (should_wake_up(curr, thr))
350                                 /* Remove this thread from sleep set */
351                                 scheduler->remove_sleep(thr);
352                 }
353         }
354 }
355
356 /** @brief Alert the model-checker that an incorrectly-ordered
357  * synchronization was made */
358 void ModelChecker::set_bad_synchronization()
359 {
360         priv->bad_synchronization = true;
361 }
362
363 /**
364  * Check whether the current trace has triggered an assertion which should halt
365  * its execution.
366  *
367  * @return True, if the execution should be aborted; false otherwise
368  */
369 bool ModelChecker::has_asserted() const
370 {
371         return priv->asserted;
372 }
373
374 /**
375  * Trigger a trace assertion which should cause this execution to be halted.
376  * This can be due to a detected bug or due to an infeasibility that should
377  * halt ASAP.
378  */
379 void ModelChecker::set_assert()
380 {
381         priv->asserted = true;
382 }
383
384 /**
385  * Check if we are in a deadlock. Should only be called at the end of an
386  * execution, although it should not give false positives in the middle of an
387  * execution (there should be some ENABLED thread).
388  *
389  * @return True if program is in a deadlock; false otherwise
390  */
391 bool ModelChecker::is_deadlocked() const
392 {
393         bool blocking_threads = false;
394         for (unsigned int i = 0; i < get_num_threads(); i++) {
395                 thread_id_t tid = int_to_id(i);
396                 if (is_enabled(tid))
397                         return false;
398                 Thread *t = get_thread(tid);
399                 if (!t->is_model_thread() && t->get_pending())
400                         blocking_threads = true;
401         }
402         return blocking_threads;
403 }
404
405 /**
406  * Check if this is a complete execution. That is, have all thread completed
407  * execution (rather than exiting because sleep sets have forced a redundant
408  * execution).
409  *
410  * @return True if the execution is complete.
411  */
412 bool ModelChecker::is_complete_execution() const
413 {
414         for (unsigned int i = 0; i < get_num_threads(); i++)
415                 if (is_enabled(int_to_id(i)))
416                         return false;
417         return true;
418 }
419
420 /**
421  * @brief Assert a bug in the executing program.
422  *
423  * Use this function to assert any sort of bug in the user program. If the
424  * current trace is feasible (actually, a prefix of some feasible execution),
425  * then this execution will be aborted, printing the appropriate message. If
426  * the current trace is not yet feasible, the error message will be stashed and
427  * printed if the execution ever becomes feasible.
428  *
429  * @param msg Descriptive message for the bug (do not include newline char)
430  * @return True if bug is immediately-feasible
431  */
432 bool ModelChecker::assert_bug(const char *msg)
433 {
434         priv->bugs.push_back(new bug_message(msg));
435
436         if (isfeasibleprefix()) {
437                 set_assert();
438                 return true;
439         }
440         return false;
441 }
442
443 /**
444  * @brief Assert a bug in the executing program, asserted by a user thread
445  * @see ModelChecker::assert_bug
446  * @param msg Descriptive message for the bug (do not include newline char)
447  */
448 void ModelChecker::assert_user_bug(const char *msg)
449 {
450         /* If feasible bug, bail out now */
451         if (assert_bug(msg))
452                 switch_to_master(NULL);
453 }
454
455 /** @return True, if any bugs have been reported for this execution */
456 bool ModelChecker::have_bug_reports() const
457 {
458         return priv->bugs.size() != 0;
459 }
460
461 /** @brief Print bug report listing for this execution (if any bugs exist) */
462 void ModelChecker::print_bugs() const
463 {
464         if (have_bug_reports()) {
465                 model_print("Bug report: %zu bug%s detected\n",
466                                 priv->bugs.size(),
467                                 priv->bugs.size() > 1 ? "s" : "");
468                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
469                         priv->bugs[i]->print();
470         }
471 }
472
473 /**
474  * @brief Record end-of-execution stats
475  *
476  * Must be run when exiting an execution. Records various stats.
477  * @see struct execution_stats
478  */
479 void ModelChecker::record_stats()
480 {
481         stats.num_total++;
482         if (!isfeasibleprefix())
483                 stats.num_infeasible++;
484         else if (have_bug_reports())
485                 stats.num_buggy_executions++;
486         else if (is_complete_execution())
487                 stats.num_complete++;
488         else
489                 stats.num_redundant++;
490 }
491
492 /** @brief Print execution stats */
493 void ModelChecker::print_stats() const
494 {
495         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
496         model_print("Number of redundant executions: %d\n", stats.num_redundant);
497         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
498         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
499         model_print("Total executions: %d\n", stats.num_total);
500         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
501 }
502
503 /**
504  * @brief End-of-exeuction print
505  * @param printbugs Should any existing bugs be printed?
506  */
507 void ModelChecker::print_execution(bool printbugs) const
508 {
509         print_program_output();
510
511         if (DBG_ENABLED() || params.verbose) {
512                 model_print("Earliest divergence point since last feasible execution:\n");
513                 if (earliest_diverge)
514                         earliest_diverge->print();
515                 else
516                         model_print("(Not set)\n");
517
518                 model_print("\n");
519                 print_stats();
520         }
521
522         /* Don't print invalid bugs */
523         if (printbugs)
524                 print_bugs();
525
526         model_print("\n");
527         print_summary();
528 }
529
530 /**
531  * Queries the model-checker for more executions to explore and, if one
532  * exists, resets the model-checker state to execute a new execution.
533  *
534  * @return If there are more executions to explore, return true. Otherwise,
535  * return false.
536  */
537 bool ModelChecker::next_execution()
538 {
539         DBG();
540         /* Is this execution a feasible execution that's worth bug-checking? */
541         bool complete = isfeasibleprefix() && (is_complete_execution() ||
542                         have_bug_reports());
543
544         /* End-of-execution bug checks */
545         if (complete) {
546                 if (is_deadlocked())
547                         assert_bug("Deadlock detected");
548
549                 checkDataRaces();
550         }
551
552         record_stats();
553
554         /* Output */
555         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
556                 print_execution(complete);
557         else
558                 clear_program_output();
559
560         if (complete)
561                 earliest_diverge = NULL;
562
563         if ((diverge = get_next_backtrack()) == NULL)
564                 return false;
565
566         if (DBG_ENABLED()) {
567                 model_print("Next execution will diverge at:\n");
568                 diverge->print();
569         }
570
571         reset_to_initial_state();
572         return true;
573 }
574
575 /**
576  * @brief Find the last fence-related backtracking conflict for a ModelAction
577  *
578  * This function performs the search for the most recent conflicting action
579  * against which we should perform backtracking, as affected by fence
580  * operations. This includes pairs of potentially-synchronizing actions which
581  * occur due to fence-acquire or fence-release, and hence should be explored in
582  * the opposite execution order.
583  *
584  * @param act The current action
585  * @return The most recent action which conflicts with act due to fences
586  */
587 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
588 {
589         /* Only perform release/acquire fence backtracking for stores */
590         if (!act->is_write())
591                 return NULL;
592
593         /* Find a fence-release (or, act is a release) */
594         ModelAction *last_release;
595         if (act->is_release())
596                 last_release = act;
597         else
598                 last_release = get_last_fence_release(act->get_tid());
599         if (!last_release)
600                 return NULL;
601
602         /* Skip past the release */
603         action_list_t *list = action_trace;
604         action_list_t::reverse_iterator rit;
605         for (rit = list->rbegin(); rit != list->rend(); rit++)
606                 if (*rit == last_release)
607                         break;
608         ASSERT(rit != list->rend());
609
610         /* Find a prior:
611          *   load-acquire
612          * or
613          *   load --sb-> fence-acquire */
614         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
615         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
616         bool found_acquire_fences = false;
617         for ( ; rit != list->rend(); rit++) {
618                 ModelAction *prev = *rit;
619                 if (act->same_thread(prev))
620                         continue;
621
622                 int tid = id_to_int(prev->get_tid());
623
624                 if (prev->is_read() && act->same_var(prev)) {
625                         if (prev->is_acquire()) {
626                                 /* Found most recent load-acquire, don't need
627                                  * to search for more fences */
628                                 if (!found_acquire_fences)
629                                         return NULL;
630                         } else {
631                                 prior_loads[tid] = prev;
632                         }
633                 }
634                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
635                         found_acquire_fences = true;
636                         acquire_fences[tid] = prev;
637                 }
638         }
639
640         ModelAction *latest_backtrack = NULL;
641         for (unsigned int i = 0; i < acquire_fences.size(); i++)
642                 if (acquire_fences[i] && prior_loads[i])
643                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
644                                 latest_backtrack = acquire_fences[i];
645         return latest_backtrack;
646 }
647
648 /**
649  * @brief Find the last backtracking conflict for a ModelAction
650  *
651  * This function performs the search for the most recent conflicting action
652  * against which we should perform backtracking. This primary includes pairs of
653  * synchronizing actions which should be explored in the opposite execution
654  * order.
655  *
656  * @param act The current action
657  * @return The most recent action which conflicts with act
658  */
659 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
660 {
661         switch (act->get_type()) {
662         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
663         case ATOMIC_READ:
664         case ATOMIC_WRITE:
665         case ATOMIC_RMW: {
666                 ModelAction *ret = NULL;
667
668                 /* linear search: from most recent to oldest */
669                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
670                 action_list_t::reverse_iterator rit;
671                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
672                         ModelAction *prev = *rit;
673                         if (prev->could_synchronize_with(act)) {
674                                 ret = prev;
675                                 break;
676                         }
677                 }
678
679                 ModelAction *ret2 = get_last_fence_conflict(act);
680                 if (!ret2)
681                         return ret;
682                 if (!ret)
683                         return ret2;
684                 if (*ret < *ret2)
685                         return ret2;
686                 return ret;
687         }
688         case ATOMIC_LOCK:
689         case ATOMIC_TRYLOCK: {
690                 /* linear search: from most recent to oldest */
691                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
692                 action_list_t::reverse_iterator rit;
693                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
694                         ModelAction *prev = *rit;
695                         if (act->is_conflicting_lock(prev))
696                                 return prev;
697                 }
698                 break;
699         }
700         case ATOMIC_UNLOCK: {
701                 /* linear search: from most recent to oldest */
702                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
703                 action_list_t::reverse_iterator rit;
704                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
705                         ModelAction *prev = *rit;
706                         if (!act->same_thread(prev) && prev->is_failed_trylock())
707                                 return prev;
708                 }
709                 break;
710         }
711         case ATOMIC_WAIT: {
712                 /* linear search: from most recent to oldest */
713                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
714                 action_list_t::reverse_iterator rit;
715                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
716                         ModelAction *prev = *rit;
717                         if (!act->same_thread(prev) && prev->is_failed_trylock())
718                                 return prev;
719                         if (!act->same_thread(prev) && prev->is_notify())
720                                 return prev;
721                 }
722                 break;
723         }
724
725         case ATOMIC_NOTIFY_ALL:
726         case ATOMIC_NOTIFY_ONE: {
727                 /* linear search: from most recent to oldest */
728                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
729                 action_list_t::reverse_iterator rit;
730                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
731                         ModelAction *prev = *rit;
732                         if (!act->same_thread(prev) && prev->is_wait())
733                                 return prev;
734                 }
735                 break;
736         }
737         default:
738                 break;
739         }
740         return NULL;
741 }
742
743 /** This method finds backtracking points where we should try to
744  * reorder the parameter ModelAction against.
745  *
746  * @param the ModelAction to find backtracking points for.
747  */
748 void ModelChecker::set_backtracking(ModelAction *act)
749 {
750         Thread *t = get_thread(act);
751         ModelAction *prev = get_last_conflict(act);
752         if (prev == NULL)
753                 return;
754
755         Node *node = prev->get_node()->get_parent();
756
757         int low_tid, high_tid;
758         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
759                 low_tid = id_to_int(act->get_tid());
760                 high_tid = low_tid + 1;
761         } else {
762                 low_tid = 0;
763                 high_tid = get_num_threads();
764         }
765
766         for (int i = low_tid; i < high_tid; i++) {
767                 thread_id_t tid = int_to_id(i);
768
769                 /* Make sure this thread can be enabled here. */
770                 if (i >= node->get_num_threads())
771                         break;
772
773                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
774                 if (node->enabled_status(tid) != THREAD_ENABLED)
775                         continue;
776
777                 /* Check if this has been explored already */
778                 if (node->has_been_explored(tid))
779                         continue;
780
781                 /* See if fairness allows */
782                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
783                         bool unfair = false;
784                         for (int t = 0; t < node->get_num_threads(); t++) {
785                                 thread_id_t tother = int_to_id(t);
786                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
787                                         unfair = true;
788                                         break;
789                                 }
790                         }
791                         if (unfair)
792                                 continue;
793                 }
794                 /* Cache the latest backtracking point */
795                 set_latest_backtrack(prev);
796
797                 /* If this is a new backtracking point, mark the tree */
798                 if (!node->set_backtrack(tid))
799                         continue;
800                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
801                                         id_to_int(prev->get_tid()),
802                                         id_to_int(t->get_id()));
803                 if (DBG_ENABLED()) {
804                         prev->print();
805                         act->print();
806                 }
807         }
808 }
809
810 /**
811  * @brief Cache the a backtracking point as the "most recent", if eligible
812  *
813  * Note that this does not prepare the NodeStack for this backtracking
814  * operation, it only caches the action on a per-execution basis
815  *
816  * @param act The operation at which we should explore a different next action
817  * (i.e., backtracking point)
818  * @return True, if this action is now the most recent backtracking point;
819  * false otherwise
820  */
821 bool ModelChecker::set_latest_backtrack(ModelAction *act)
822 {
823         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
824                 priv->next_backtrack = act;
825                 return true;
826         }
827         return false;
828 }
829
830 /**
831  * Returns last backtracking point. The model checker will explore a different
832  * path for this point in the next execution.
833  * @return The ModelAction at which the next execution should diverge.
834  */
835 ModelAction * ModelChecker::get_next_backtrack()
836 {
837         ModelAction *next = priv->next_backtrack;
838         priv->next_backtrack = NULL;
839         return next;
840 }
841
842 /**
843  * Processes a read or rmw model action.
844  * @param curr is the read model action to process.
845  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
846  * @return True if processing this read updates the mo_graph.
847  */
848 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
849 {
850         uint64_t value = VALUE_NONE;
851         bool updated = false;
852         while (true) {
853                 const ModelAction *reads_from = curr->get_node()->get_read_from();
854                 if (reads_from != NULL) {
855                         mo_graph->startChanges();
856
857                         value = reads_from->get_value();
858                         bool r_status = false;
859
860                         if (!second_part_of_rmw) {
861                                 check_recency(curr, reads_from);
862                                 r_status = r_modification_order(curr, reads_from);
863                         }
864
865                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
866                                 mo_graph->rollbackChanges();
867                                 priv->too_many_reads = false;
868                                 continue;
869                         }
870
871                         read_from(curr, reads_from);
872                         mo_graph->commitChanges();
873                         mo_check_promises(curr, true);
874
875                         updated |= r_status;
876                 } else if (!second_part_of_rmw) {
877                         /* Read from future value */
878                         struct future_value fv = curr->get_node()->get_future_value();
879                         Promise *promise = new Promise(curr, fv);
880                         value = fv.value;
881                         curr->set_read_from_promise(promise);
882                         promises->push_back(promise);
883                         mo_graph->startChanges();
884                         updated = r_modification_order(curr, promise);
885                         mo_graph->commitChanges();
886                 }
887                 get_thread(curr)->set_return_value(value);
888                 return updated;
889         }
890 }
891
892 /**
893  * Processes a lock, trylock, or unlock model action.  @param curr is
894  * the read model action to process.
895  *
896  * The try lock operation checks whether the lock is taken.  If not,
897  * it falls to the normal lock operation case.  If so, it returns
898  * fail.
899  *
900  * The lock operation has already been checked that it is enabled, so
901  * it just grabs the lock and synchronizes with the previous unlock.
902  *
903  * The unlock operation has to re-enable all of the threads that are
904  * waiting on the lock.
905  *
906  * @return True if synchronization was updated; false otherwise
907  */
908 bool ModelChecker::process_mutex(ModelAction *curr)
909 {
910         std::mutex *mutex = NULL;
911         struct std::mutex_state *state = NULL;
912
913         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
914                 mutex = (std::mutex *)curr->get_location();
915                 state = mutex->get_state();
916         } else if (curr->is_wait()) {
917                 mutex = (std::mutex *)curr->get_value();
918                 state = mutex->get_state();
919         }
920
921         switch (curr->get_type()) {
922         case ATOMIC_TRYLOCK: {
923                 bool success = !state->islocked;
924                 curr->set_try_lock(success);
925                 if (!success) {
926                         get_thread(curr)->set_return_value(0);
927                         break;
928                 }
929                 get_thread(curr)->set_return_value(1);
930         }
931                 //otherwise fall into the lock case
932         case ATOMIC_LOCK: {
933                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
934                         assert_bug("Lock access before initialization");
935                 state->islocked = true;
936                 ModelAction *unlock = get_last_unlock(curr);
937                 //synchronize with the previous unlock statement
938                 if (unlock != NULL) {
939                         curr->synchronize_with(unlock);
940                         return true;
941                 }
942                 break;
943         }
944         case ATOMIC_UNLOCK: {
945                 //unlock the lock
946                 state->islocked = false;
947                 //wake up the other threads
948                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
949                 //activate all the waiting threads
950                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
951                         scheduler->wake(get_thread(*rit));
952                 }
953                 waiters->clear();
954                 break;
955         }
956         case ATOMIC_WAIT: {
957                 //unlock the lock
958                 state->islocked = false;
959                 //wake up the other threads
960                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
961                 //activate all the waiting threads
962                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
963                         scheduler->wake(get_thread(*rit));
964                 }
965                 waiters->clear();
966                 //check whether we should go to sleep or not...simulate spurious failures
967                 if (curr->get_node()->get_misc() == 0) {
968                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
969                         //disable us
970                         scheduler->sleep(get_thread(curr));
971                 }
972                 break;
973         }
974         case ATOMIC_NOTIFY_ALL: {
975                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
976                 //activate all the waiting threads
977                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
978                         scheduler->wake(get_thread(*rit));
979                 }
980                 waiters->clear();
981                 break;
982         }
983         case ATOMIC_NOTIFY_ONE: {
984                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
985                 int wakeupthread = curr->get_node()->get_misc();
986                 action_list_t::iterator it = waiters->begin();
987                 advance(it, wakeupthread);
988                 scheduler->wake(get_thread(*it));
989                 waiters->erase(it);
990                 break;
991         }
992
993         default:
994                 ASSERT(0);
995         }
996         return false;
997 }
998
999 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1000 {
1001         /* Do more ambitious checks now that mo is more complete */
1002         if (mo_may_allow(writer, reader)) {
1003                 Node *node = reader->get_node();
1004
1005                 /* Find an ancestor thread which exists at the time of the reader */
1006                 Thread *write_thread = get_thread(writer);
1007                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1008                         write_thread = write_thread->get_parent();
1009
1010                 struct future_value fv = {
1011                         writer->get_value(),
1012                         writer->get_seq_number() + params.maxfuturedelay,
1013                         write_thread->get_id(),
1014                 };
1015                 if (node->add_future_value(fv))
1016                         set_latest_backtrack(reader);
1017         }
1018 }
1019
1020 /**
1021  * Process a write ModelAction
1022  * @param curr The ModelAction to process
1023  * @return True if the mo_graph was updated or promises were resolved
1024  */
1025 bool ModelChecker::process_write(ModelAction *curr)
1026 {
1027         bool updated_mod_order = w_modification_order(curr);
1028         bool updated_promises = resolve_promises(curr);
1029
1030         if (promises->size() == 0) {
1031                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1032                         struct PendingFutureValue pfv = (*futurevalues)[i];
1033                         add_future_value(pfv.writer, pfv.act);
1034                 }
1035                 futurevalues->clear();
1036         }
1037
1038         mo_graph->commitChanges();
1039         mo_check_promises(curr, false);
1040
1041         get_thread(curr)->set_return_value(VALUE_NONE);
1042         return updated_mod_order || updated_promises;
1043 }
1044
1045 /**
1046  * Process a fence ModelAction
1047  * @param curr The ModelAction to process
1048  * @return True if synchronization was updated
1049  */
1050 bool ModelChecker::process_fence(ModelAction *curr)
1051 {
1052         /*
1053          * fence-relaxed: no-op
1054          * fence-release: only log the occurence (not in this function), for
1055          *   use in later synchronization
1056          * fence-acquire (this function): search for hypothetical release
1057          *   sequences
1058          */
1059         bool updated = false;
1060         if (curr->is_acquire()) {
1061                 action_list_t *list = action_trace;
1062                 action_list_t::reverse_iterator rit;
1063                 /* Find X : is_read(X) && X --sb-> curr */
1064                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1065                         ModelAction *act = *rit;
1066                         if (act == curr)
1067                                 continue;
1068                         if (act->get_tid() != curr->get_tid())
1069                                 continue;
1070                         /* Stop at the beginning of the thread */
1071                         if (act->is_thread_start())
1072                                 break;
1073                         /* Stop once we reach a prior fence-acquire */
1074                         if (act->is_fence() && act->is_acquire())
1075                                 break;
1076                         if (!act->is_read())
1077                                 continue;
1078                         /* read-acquire will find its own release sequences */
1079                         if (act->is_acquire())
1080                                 continue;
1081
1082                         /* Establish hypothetical release sequences */
1083                         rel_heads_list_t release_heads;
1084                         get_release_seq_heads(curr, act, &release_heads);
1085                         for (unsigned int i = 0; i < release_heads.size(); i++)
1086                                 if (!curr->synchronize_with(release_heads[i]))
1087                                         set_bad_synchronization();
1088                         if (release_heads.size() != 0)
1089                                 updated = true;
1090                 }
1091         }
1092         return updated;
1093 }
1094
1095 /**
1096  * @brief Process the current action for thread-related activity
1097  *
1098  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1099  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1100  * synchronization, etc.  This function is a no-op for non-THREAD actions
1101  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1102  *
1103  * @param curr The current action
1104  * @return True if synchronization was updated or a thread completed
1105  */
1106 bool ModelChecker::process_thread_action(ModelAction *curr)
1107 {
1108         bool updated = false;
1109
1110         switch (curr->get_type()) {
1111         case THREAD_CREATE: {
1112                 thrd_t *thrd = (thrd_t *)curr->get_location();
1113                 struct thread_params *params = (struct thread_params *)curr->get_value();
1114                 Thread *th = new Thread(thrd, params->func, params->arg);
1115                 add_thread(th);
1116                 th->set_creation(curr);
1117                 /* Promises can be satisfied by children */
1118                 for (unsigned int i = 0; i < promises->size(); i++) {
1119                         Promise *promise = (*promises)[i];
1120                         if (promise->thread_is_available(curr->get_tid()))
1121                                 promise->add_thread(th->get_id());
1122                 }
1123                 break;
1124         }
1125         case THREAD_JOIN: {
1126                 Thread *blocking = curr->get_thread_operand();
1127                 ModelAction *act = get_last_action(blocking->get_id());
1128                 curr->synchronize_with(act);
1129                 updated = true; /* trigger rel-seq checks */
1130                 break;
1131         }
1132         case THREAD_FINISH: {
1133                 Thread *th = get_thread(curr);
1134                 while (!th->wait_list_empty()) {
1135                         ModelAction *act = th->pop_wait_list();
1136                         scheduler->wake(get_thread(act));
1137                 }
1138                 th->complete();
1139                 /* Completed thread can't satisfy promises */
1140                 for (unsigned int i = 0; i < promises->size(); i++) {
1141                         Promise *promise = (*promises)[i];
1142                         if (promise->thread_is_available(th->get_id()))
1143                                 if (promise->eliminate_thread(th->get_id()))
1144                                         priv->failed_promise = true;
1145                 }
1146                 updated = true; /* trigger rel-seq checks */
1147                 break;
1148         }
1149         case THREAD_START: {
1150                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1151                 break;
1152         }
1153         default:
1154                 break;
1155         }
1156
1157         return updated;
1158 }
1159
1160 /**
1161  * @brief Process the current action for release sequence fixup activity
1162  *
1163  * Performs model-checker release sequence fixups for the current action,
1164  * forcing a single pending release sequence to break (with a given, potential
1165  * "loose" write) or to complete (i.e., synchronize). If a pending release
1166  * sequence forms a complete release sequence, then we must perform the fixup
1167  * synchronization, mo_graph additions, etc.
1168  *
1169  * @param curr The current action; must be a release sequence fixup action
1170  * @param work_queue The work queue to which to add work items as they are
1171  * generated
1172  */
1173 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1174 {
1175         const ModelAction *write = curr->get_node()->get_relseq_break();
1176         struct release_seq *sequence = pending_rel_seqs->back();
1177         pending_rel_seqs->pop_back();
1178         ASSERT(sequence);
1179         ModelAction *acquire = sequence->acquire;
1180         const ModelAction *rf = sequence->rf;
1181         const ModelAction *release = sequence->release;
1182         ASSERT(acquire);
1183         ASSERT(release);
1184         ASSERT(rf);
1185         ASSERT(release->same_thread(rf));
1186
1187         if (write == NULL) {
1188                 /**
1189                  * @todo Forcing a synchronization requires that we set
1190                  * modification order constraints. For instance, we can't allow
1191                  * a fixup sequence in which two separate read-acquire
1192                  * operations read from the same sequence, where the first one
1193                  * synchronizes and the other doesn't. Essentially, we can't
1194                  * allow any writes to insert themselves between 'release' and
1195                  * 'rf'
1196                  */
1197
1198                 /* Must synchronize */
1199                 if (!acquire->synchronize_with(release)) {
1200                         set_bad_synchronization();
1201                         return;
1202                 }
1203                 /* Re-check all pending release sequences */
1204                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1205                 /* Re-check act for mo_graph edges */
1206                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1207
1208                 /* propagate synchronization to later actions */
1209                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1210                 for (; (*rit) != acquire; rit++) {
1211                         ModelAction *propagate = *rit;
1212                         if (acquire->happens_before(propagate)) {
1213                                 propagate->synchronize_with(acquire);
1214                                 /* Re-check 'propagate' for mo_graph edges */
1215                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1216                         }
1217                 }
1218         } else {
1219                 /* Break release sequence with new edges:
1220                  *   release --mo--> write --mo--> rf */
1221                 mo_graph->addEdge(release, write);
1222                 mo_graph->addEdge(write, rf);
1223         }
1224
1225         /* See if we have realized a data race */
1226         checkDataRaces();
1227 }
1228
1229 /**
1230  * Initialize the current action by performing one or more of the following
1231  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1232  * in the NodeStack, manipulating backtracking sets, allocating and
1233  * initializing clock vectors, and computing the promises to fulfill.
1234  *
1235  * @param curr The current action, as passed from the user context; may be
1236  * freed/invalidated after the execution of this function, with a different
1237  * action "returned" its place (pass-by-reference)
1238  * @return True if curr is a newly-explored action; false otherwise
1239  */
1240 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1241 {
1242         ModelAction *newcurr;
1243
1244         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1245                 newcurr = process_rmw(*curr);
1246                 delete *curr;
1247
1248                 if (newcurr->is_rmw())
1249                         compute_promises(newcurr);
1250
1251                 *curr = newcurr;
1252                 return false;
1253         }
1254
1255         (*curr)->set_seq_number(get_next_seq_num());
1256
1257         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1258         if (newcurr) {
1259                 /* First restore type and order in case of RMW operation */
1260                 if ((*curr)->is_rmwr())
1261                         newcurr->copy_typeandorder(*curr);
1262
1263                 ASSERT((*curr)->get_location() == newcurr->get_location());
1264                 newcurr->copy_from_new(*curr);
1265
1266                 /* Discard duplicate ModelAction; use action from NodeStack */
1267                 delete *curr;
1268
1269                 /* Always compute new clock vector */
1270                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1271
1272                 *curr = newcurr;
1273                 return false; /* Action was explored previously */
1274         } else {
1275                 newcurr = *curr;
1276
1277                 /* Always compute new clock vector */
1278                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1279
1280                 /* Assign most recent release fence */
1281                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1282
1283                 /*
1284                  * Perform one-time actions when pushing new ModelAction onto
1285                  * NodeStack
1286                  */
1287                 if (newcurr->is_write())
1288                         compute_promises(newcurr);
1289                 else if (newcurr->is_relseq_fixup())
1290                         compute_relseq_breakwrites(newcurr);
1291                 else if (newcurr->is_wait())
1292                         newcurr->get_node()->set_misc_max(2);
1293                 else if (newcurr->is_notify_one()) {
1294                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1295                 }
1296                 return true; /* This was a new ModelAction */
1297         }
1298 }
1299
1300 /**
1301  * @brief Establish reads-from relation between two actions
1302  *
1303  * Perform basic operations involved with establishing a concrete rf relation,
1304  * including setting the ModelAction data and checking for release sequences.
1305  *
1306  * @param act The action that is reading (must be a read)
1307  * @param rf The action from which we are reading (must be a write)
1308  *
1309  * @return True if this read established synchronization
1310  */
1311 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1312 {
1313         act->set_read_from(rf);
1314         if (rf != NULL && act->is_acquire()) {
1315                 rel_heads_list_t release_heads;
1316                 get_release_seq_heads(act, act, &release_heads);
1317                 int num_heads = release_heads.size();
1318                 for (unsigned int i = 0; i < release_heads.size(); i++)
1319                         if (!act->synchronize_with(release_heads[i])) {
1320                                 set_bad_synchronization();
1321                                 num_heads--;
1322                         }
1323                 return num_heads > 0;
1324         }
1325         return false;
1326 }
1327
1328 /**
1329  * @brief Check whether a model action is enabled.
1330  *
1331  * Checks whether a lock or join operation would be successful (i.e., is the
1332  * lock already locked, or is the joined thread already complete). If not, put
1333  * the action in a waiter list.
1334  *
1335  * @param curr is the ModelAction to check whether it is enabled.
1336  * @return a bool that indicates whether the action is enabled.
1337  */
1338 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1339         if (curr->is_lock()) {
1340                 std::mutex *lock = (std::mutex *)curr->get_location();
1341                 struct std::mutex_state *state = lock->get_state();
1342                 if (state->islocked) {
1343                         //Stick the action in the appropriate waiting queue
1344                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1345                         return false;
1346                 }
1347         } else if (curr->get_type() == THREAD_JOIN) {
1348                 Thread *blocking = (Thread *)curr->get_location();
1349                 if (!blocking->is_complete()) {
1350                         blocking->push_wait_list(curr);
1351                         return false;
1352                 }
1353         }
1354
1355         return true;
1356 }
1357
1358 /**
1359  * This is the heart of the model checker routine. It performs model-checking
1360  * actions corresponding to a given "current action." Among other processes, it
1361  * calculates reads-from relationships, updates synchronization clock vectors,
1362  * forms a memory_order constraints graph, and handles replay/backtrack
1363  * execution when running permutations of previously-observed executions.
1364  *
1365  * @param curr The current action to process
1366  * @return The ModelAction that is actually executed; may be different than
1367  * curr; may be NULL, if the current action is not enabled to run
1368  */
1369 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1370 {
1371         ASSERT(curr);
1372         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1373
1374         if (!check_action_enabled(curr)) {
1375                 /* Make the execution look like we chose to run this action
1376                  * much later, when a lock/join can succeed */
1377                 get_thread(curr)->set_pending(curr);
1378                 scheduler->sleep(get_thread(curr));
1379                 return NULL;
1380         }
1381
1382         bool newly_explored = initialize_curr_action(&curr);
1383
1384         DBG();
1385         if (DBG_ENABLED())
1386                 curr->print();
1387
1388         wake_up_sleeping_actions(curr);
1389
1390         /* Add the action to lists before any other model-checking tasks */
1391         if (!second_part_of_rmw)
1392                 add_action_to_lists(curr);
1393
1394         /* Build may_read_from set for newly-created actions */
1395         if (newly_explored && curr->is_read())
1396                 build_reads_from_past(curr);
1397
1398         /* Initialize work_queue with the "current action" work */
1399         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1400         while (!work_queue.empty() && !has_asserted()) {
1401                 WorkQueueEntry work = work_queue.front();
1402                 work_queue.pop_front();
1403
1404                 switch (work.type) {
1405                 case WORK_CHECK_CURR_ACTION: {
1406                         ModelAction *act = work.action;
1407                         bool update = false; /* update this location's release seq's */
1408                         bool update_all = false; /* update all release seq's */
1409
1410                         if (process_thread_action(curr))
1411                                 update_all = true;
1412
1413                         if (act->is_read() && process_read(act, second_part_of_rmw))
1414                                 update = true;
1415
1416                         if (act->is_write() && process_write(act))
1417                                 update = true;
1418
1419                         if (act->is_fence() && process_fence(act))
1420                                 update_all = true;
1421
1422                         if (act->is_mutex_op() && process_mutex(act))
1423                                 update_all = true;
1424
1425                         if (act->is_relseq_fixup())
1426                                 process_relseq_fixup(curr, &work_queue);
1427
1428                         if (update_all)
1429                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1430                         else if (update)
1431                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1432                         break;
1433                 }
1434                 case WORK_CHECK_RELEASE_SEQ:
1435                         resolve_release_sequences(work.location, &work_queue);
1436                         break;
1437                 case WORK_CHECK_MO_EDGES: {
1438                         /** @todo Complete verification of work_queue */
1439                         ModelAction *act = work.action;
1440                         bool updated = false;
1441
1442                         if (act->is_read()) {
1443                                 const ModelAction *rf = act->get_reads_from();
1444                                 const Promise *promise = act->get_reads_from_promise();
1445                                 if (rf) {
1446                                         if (r_modification_order(act, rf))
1447                                                 updated = true;
1448                                 } else if (promise) {
1449                                         if (r_modification_order(act, promise))
1450                                                 updated = true;
1451                                 }
1452                         }
1453                         if (act->is_write()) {
1454                                 if (w_modification_order(act))
1455                                         updated = true;
1456                         }
1457                         mo_graph->commitChanges();
1458
1459                         if (updated)
1460                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1461                         break;
1462                 }
1463                 default:
1464                         ASSERT(false);
1465                         break;
1466                 }
1467         }
1468
1469         check_curr_backtracking(curr);
1470         set_backtracking(curr);
1471         return curr;
1472 }
1473
1474 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1475 {
1476         Node *currnode = curr->get_node();
1477         Node *parnode = currnode->get_parent();
1478
1479         if ((parnode && !parnode->backtrack_empty()) ||
1480                          !currnode->misc_empty() ||
1481                          !currnode->read_from_empty() ||
1482                          !currnode->future_value_empty() ||
1483                          !currnode->promise_empty() ||
1484                          !currnode->relseq_break_empty()) {
1485                 set_latest_backtrack(curr);
1486         }
1487 }
1488
1489 bool ModelChecker::promises_expired() const
1490 {
1491         for (unsigned int i = 0; i < promises->size(); i++) {
1492                 Promise *promise = (*promises)[i];
1493                 if (promise->get_expiration() < priv->used_sequence_numbers)
1494                         return true;
1495         }
1496         return false;
1497 }
1498
1499 /**
1500  * This is the strongest feasibility check available.
1501  * @return whether the current trace (partial or complete) must be a prefix of
1502  * a feasible trace.
1503  */
1504 bool ModelChecker::isfeasibleprefix() const
1505 {
1506         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1507 }
1508
1509 /**
1510  * Print disagnostic information about an infeasible execution
1511  * @param prefix A string to prefix the output with; if NULL, then a default
1512  * message prefix will be provided
1513  */
1514 void ModelChecker::print_infeasibility(const char *prefix) const
1515 {
1516         char buf[100];
1517         char *ptr = buf;
1518         if (mo_graph->checkForCycles())
1519                 ptr += sprintf(ptr, "[mo cycle]");
1520         if (priv->failed_promise)
1521                 ptr += sprintf(ptr, "[failed promise]");
1522         if (priv->too_many_reads)
1523                 ptr += sprintf(ptr, "[too many reads]");
1524         if (priv->no_valid_reads)
1525                 ptr += sprintf(ptr, "[no valid reads-from]");
1526         if (priv->bad_synchronization)
1527                 ptr += sprintf(ptr, "[bad sw ordering]");
1528         if (promises_expired())
1529                 ptr += sprintf(ptr, "[promise expired]");
1530         if (promises->size() != 0)
1531                 ptr += sprintf(ptr, "[unresolved promise]");
1532         if (ptr != buf)
1533                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1534 }
1535
1536 /**
1537  * Returns whether the current completed trace is feasible, except for pending
1538  * release sequences.
1539  */
1540 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1541 {
1542         return !is_infeasible() && promises->size() == 0;
1543 }
1544
1545 /**
1546  * Check if the current partial trace is infeasible. Does not check any
1547  * end-of-execution flags, which might rule out the execution. Thus, this is
1548  * useful only for ruling an execution as infeasible.
1549  * @return whether the current partial trace is infeasible.
1550  */
1551 bool ModelChecker::is_infeasible() const
1552 {
1553         return mo_graph->checkForCycles() ||
1554                 priv->no_valid_reads ||
1555                 priv->failed_promise ||
1556                 priv->too_many_reads ||
1557                 priv->bad_synchronization ||
1558                 promises_expired();
1559 }
1560
1561 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1562 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1563         ModelAction *lastread = get_last_action(act->get_tid());
1564         lastread->process_rmw(act);
1565         if (act->is_rmw()) {
1566                 if (lastread->get_reads_from())
1567                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1568                 else
1569                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1570                 mo_graph->commitChanges();
1571         }
1572         return lastread;
1573 }
1574
1575 /**
1576  * Checks whether a thread has read from the same write for too many times
1577  * without seeing the effects of a later write.
1578  *
1579  * Basic idea:
1580  * 1) there must a different write that we could read from that would satisfy the modification order,
1581  * 2) we must have read from the same value in excess of maxreads times, and
1582  * 3) that other write must have been in the reads_from set for maxreads times.
1583  *
1584  * If so, we decide that the execution is no longer feasible.
1585  */
1586 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1587 {
1588         if (params.maxreads != 0) {
1589                 if (curr->get_node()->get_read_from_size() <= 1)
1590                         return;
1591                 //Must make sure that execution is currently feasible...  We could
1592                 //accidentally clear by rolling back
1593                 if (is_infeasible())
1594                         return;
1595                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1596                 int tid = id_to_int(curr->get_tid());
1597
1598                 /* Skip checks */
1599                 if ((int)thrd_lists->size() <= tid)
1600                         return;
1601                 action_list_t *list = &(*thrd_lists)[tid];
1602
1603                 action_list_t::reverse_iterator rit = list->rbegin();
1604                 /* Skip past curr */
1605                 for (; (*rit) != curr; rit++)
1606                         ;
1607                 /* go past curr now */
1608                 rit++;
1609
1610                 action_list_t::reverse_iterator ritcopy = rit;
1611                 //See if we have enough reads from the same value
1612                 int count = 0;
1613                 for (; count < params.maxreads; rit++, count++) {
1614                         if (rit == list->rend())
1615                                 return;
1616                         ModelAction *act = *rit;
1617                         if (!act->is_read())
1618                                 return;
1619
1620                         if (act->get_reads_from() != rf)
1621                                 return;
1622                         if (act->get_node()->get_read_from_size() <= 1)
1623                                 return;
1624                 }
1625                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1626                         /* Get write */
1627                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1628
1629                         /* Need a different write */
1630                         if (write == rf)
1631                                 continue;
1632
1633                         /* Test to see whether this is a feasible write to read from */
1634                         /** NOTE: all members of read-from set should be
1635                          *  feasible, so we no longer check it here **/
1636
1637                         rit = ritcopy;
1638
1639                         bool feasiblewrite = true;
1640                         //new we need to see if this write works for everyone
1641
1642                         for (int loop = count; loop > 0; loop--, rit++) {
1643                                 ModelAction *act = *rit;
1644                                 bool foundvalue = false;
1645                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1646                                         if (act->get_node()->get_read_from_at(j) == write) {
1647                                                 foundvalue = true;
1648                                                 break;
1649                                         }
1650                                 }
1651                                 if (!foundvalue) {
1652                                         feasiblewrite = false;
1653                                         break;
1654                                 }
1655                         }
1656                         if (feasiblewrite) {
1657                                 priv->too_many_reads = true;
1658                                 return;
1659                         }
1660                 }
1661         }
1662 }
1663
1664 /**
1665  * Updates the mo_graph with the constraints imposed from the current
1666  * read.
1667  *
1668  * Basic idea is the following: Go through each other thread and find
1669  * the last action that happened before our read.  Two cases:
1670  *
1671  * (1) The action is a write => that write must either occur before
1672  * the write we read from or be the write we read from.
1673  *
1674  * (2) The action is a read => the write that that action read from
1675  * must occur before the write we read from or be the same write.
1676  *
1677  * @param curr The current action. Must be a read.
1678  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1679  * @return True if modification order edges were added; false otherwise
1680  */
1681 template <typename rf_type>
1682 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1683 {
1684         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1685         unsigned int i;
1686         bool added = false;
1687         ASSERT(curr->is_read());
1688
1689         /* Last SC fence in the current thread */
1690         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1691
1692         /* Iterate over all threads */
1693         for (i = 0; i < thrd_lists->size(); i++) {
1694                 /* Last SC fence in thread i */
1695                 ModelAction *last_sc_fence_thread_local = NULL;
1696                 if (int_to_id((int)i) != curr->get_tid())
1697                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1698
1699                 /* Last SC fence in thread i, before last SC fence in current thread */
1700                 ModelAction *last_sc_fence_thread_before = NULL;
1701                 if (last_sc_fence_local)
1702                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1703
1704                 /* Iterate over actions in thread, starting from most recent */
1705                 action_list_t *list = &(*thrd_lists)[i];
1706                 action_list_t::reverse_iterator rit;
1707                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1708                         ModelAction *act = *rit;
1709
1710                         if (act->is_write() && !act->equals(rf) && act != curr) {
1711                                 /* C++, Section 29.3 statement 5 */
1712                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1713                                                 *act < *last_sc_fence_thread_local) {
1714                                         added = mo_graph->addEdge(act, rf) || added;
1715                                         break;
1716                                 }
1717                                 /* C++, Section 29.3 statement 4 */
1718                                 else if (act->is_seqcst() && last_sc_fence_local &&
1719                                                 *act < *last_sc_fence_local) {
1720                                         added = mo_graph->addEdge(act, rf) || added;
1721                                         break;
1722                                 }
1723                                 /* C++, Section 29.3 statement 6 */
1724                                 else if (last_sc_fence_thread_before &&
1725                                                 *act < *last_sc_fence_thread_before) {
1726                                         added = mo_graph->addEdge(act, rf) || added;
1727                                         break;
1728                                 }
1729                         }
1730
1731                         /*
1732                          * Include at most one act per-thread that "happens
1733                          * before" curr. Don't consider reflexively.
1734                          */
1735                         if (act->happens_before(curr) && act != curr) {
1736                                 if (act->is_write()) {
1737                                         if (!act->equals(rf)) {
1738                                                 added = mo_graph->addEdge(act, rf) || added;
1739                                         }
1740                                 } else {
1741                                         const ModelAction *prevreadfrom = act->get_reads_from();
1742                                         //if the previous read is unresolved, keep going...
1743                                         if (prevreadfrom == NULL)
1744                                                 continue;
1745
1746                                         if (!prevreadfrom->equals(rf)) {
1747                                                 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1748                                         }
1749                                 }
1750                                 break;
1751                         }
1752                 }
1753         }
1754
1755         /*
1756          * All compatible, thread-exclusive promises must be ordered after any
1757          * concrete loads from the same thread
1758          */
1759         for (unsigned int i = 0; i < promises->size(); i++)
1760                 if ((*promises)[i]->is_compatible_exclusive(curr))
1761                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1762
1763         return added;
1764 }
1765
1766 /**
1767  * Updates the mo_graph with the constraints imposed from the current write.
1768  *
1769  * Basic idea is the following: Go through each other thread and find
1770  * the lastest action that happened before our write.  Two cases:
1771  *
1772  * (1) The action is a write => that write must occur before
1773  * the current write
1774  *
1775  * (2) The action is a read => the write that that action read from
1776  * must occur before the current write.
1777  *
1778  * This method also handles two other issues:
1779  *
1780  * (I) Sequential Consistency: Making sure that if the current write is
1781  * seq_cst, that it occurs after the previous seq_cst write.
1782  *
1783  * (II) Sending the write back to non-synchronizing reads.
1784  *
1785  * @param curr The current action. Must be a write.
1786  * @return True if modification order edges were added; false otherwise
1787  */
1788 bool ModelChecker::w_modification_order(ModelAction *curr)
1789 {
1790         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1791         unsigned int i;
1792         bool added = false;
1793         ASSERT(curr->is_write());
1794
1795         if (curr->is_seqcst()) {
1796                 /* We have to at least see the last sequentially consistent write,
1797                          so we are initialized. */
1798                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1799                 if (last_seq_cst != NULL) {
1800                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1801                 }
1802         }
1803
1804         /* Last SC fence in the current thread */
1805         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1806
1807         /* Iterate over all threads */
1808         for (i = 0; i < thrd_lists->size(); i++) {
1809                 /* Last SC fence in thread i, before last SC fence in current thread */
1810                 ModelAction *last_sc_fence_thread_before = NULL;
1811                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1812                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1813
1814                 /* Iterate over actions in thread, starting from most recent */
1815                 action_list_t *list = &(*thrd_lists)[i];
1816                 action_list_t::reverse_iterator rit;
1817                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1818                         ModelAction *act = *rit;
1819                         if (act == curr) {
1820                                 /*
1821                                  * 1) If RMW and it actually read from something, then we
1822                                  * already have all relevant edges, so just skip to next
1823                                  * thread.
1824                                  *
1825                                  * 2) If RMW and it didn't read from anything, we should
1826                                  * whatever edge we can get to speed up convergence.
1827                                  *
1828                                  * 3) If normal write, we need to look at earlier actions, so
1829                                  * continue processing list.
1830                                  */
1831                                 if (curr->is_rmw()) {
1832                                         if (curr->get_reads_from() != NULL)
1833                                                 break;
1834                                         else
1835                                                 continue;
1836                                 } else
1837                                         continue;
1838                         }
1839
1840                         /* C++, Section 29.3 statement 7 */
1841                         if (last_sc_fence_thread_before && act->is_write() &&
1842                                         *act < *last_sc_fence_thread_before) {
1843                                 added = mo_graph->addEdge(act, curr) || added;
1844                                 break;
1845                         }
1846
1847                         /*
1848                          * Include at most one act per-thread that "happens
1849                          * before" curr
1850                          */
1851                         if (act->happens_before(curr)) {
1852                                 /*
1853                                  * Note: if act is RMW, just add edge:
1854                                  *   act --mo--> curr
1855                                  * The following edge should be handled elsewhere:
1856                                  *   readfrom(act) --mo--> act
1857                                  */
1858                                 if (act->is_write())
1859                                         added = mo_graph->addEdge(act, curr) || added;
1860                                 else if (act->is_read()) {
1861                                         //if previous read accessed a null, just keep going
1862                                         if (act->get_reads_from() == NULL)
1863                                                 continue;
1864                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1865                                 }
1866                                 break;
1867                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1868                                                      !act->same_thread(curr)) {
1869                                 /* We have an action that:
1870                                    (1) did not happen before us
1871                                    (2) is a read and we are a write
1872                                    (3) cannot synchronize with us
1873                                    (4) is in a different thread
1874                                    =>
1875                                    that read could potentially read from our write.  Note that
1876                                    these checks are overly conservative at this point, we'll
1877                                    do more checks before actually removing the
1878                                    pendingfuturevalue.
1879
1880                                  */
1881                                 if (thin_air_constraint_may_allow(curr, act)) {
1882                                         if (!is_infeasible())
1883                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1884                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1885                                                 add_future_value(curr, act);
1886                                 }
1887                         }
1888                 }
1889         }
1890
1891         /*
1892          * All compatible, thread-exclusive promises must be ordered after any
1893          * concrete stores to the same thread, or else they can be merged with
1894          * this store later
1895          */
1896         for (unsigned int i = 0; i < promises->size(); i++)
1897                 if ((*promises)[i]->is_compatible_exclusive(curr))
1898                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1899
1900         return added;
1901 }
1902
1903 /** Arbitrary reads from the future are not allowed.  Section 29.3
1904  * part 9 places some constraints.  This method checks one result of constraint
1905  * constraint.  Others require compiler support. */
1906 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1907 {
1908         if (!writer->is_rmw())
1909                 return true;
1910
1911         if (!reader->is_rmw())
1912                 return true;
1913
1914         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1915                 if (search == reader)
1916                         return false;
1917                 if (search->get_tid() == reader->get_tid() &&
1918                                 search->happens_before(reader))
1919                         break;
1920         }
1921
1922         return true;
1923 }
1924
1925 /**
1926  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1927  * some constraints. This method checks one the following constraint (others
1928  * require compiler support):
1929  *
1930  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1931  */
1932 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1933 {
1934         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1935         unsigned int i;
1936         /* Iterate over all threads */
1937         for (i = 0; i < thrd_lists->size(); i++) {
1938                 const ModelAction *write_after_read = NULL;
1939
1940                 /* Iterate over actions in thread, starting from most recent */
1941                 action_list_t *list = &(*thrd_lists)[i];
1942                 action_list_t::reverse_iterator rit;
1943                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1944                         ModelAction *act = *rit;
1945
1946                         /* Don't disallow due to act == reader */
1947                         if (!reader->happens_before(act) || reader == act)
1948                                 break;
1949                         else if (act->is_write())
1950                                 write_after_read = act;
1951                         else if (act->is_read() && act->get_reads_from() != NULL)
1952                                 write_after_read = act->get_reads_from();
1953                 }
1954
1955                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1956                         return false;
1957         }
1958         return true;
1959 }
1960
1961 /**
1962  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1963  * The ModelAction under consideration is expected to be taking part in
1964  * release/acquire synchronization as an object of the "reads from" relation.
1965  * Note that this can only provide release sequence support for RMW chains
1966  * which do not read from the future, as those actions cannot be traced until
1967  * their "promise" is fulfilled. Similarly, we may not even establish the
1968  * presence of a release sequence with certainty, as some modification order
1969  * constraints may be decided further in the future. Thus, this function
1970  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1971  * and a boolean representing certainty.
1972  *
1973  * @param rf The action that might be part of a release sequence. Must be a
1974  * write.
1975  * @param release_heads A pass-by-reference style return parameter. After
1976  * execution of this function, release_heads will contain the heads of all the
1977  * relevant release sequences, if any exists with certainty
1978  * @param pending A pass-by-reference style return parameter which is only used
1979  * when returning false (i.e., uncertain). Returns most information regarding
1980  * an uncertain release sequence, including any write operations that might
1981  * break the sequence.
1982  * @return true, if the ModelChecker is certain that release_heads is complete;
1983  * false otherwise
1984  */
1985 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1986                 rel_heads_list_t *release_heads,
1987                 struct release_seq *pending) const
1988 {
1989         /* Only check for release sequences if there are no cycles */
1990         if (mo_graph->checkForCycles())
1991                 return false;
1992
1993         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1994                 ASSERT(rf->is_write());
1995
1996                 if (rf->is_release())
1997                         release_heads->push_back(rf);
1998                 else if (rf->get_last_fence_release())
1999                         release_heads->push_back(rf->get_last_fence_release());
2000                 if (!rf->is_rmw())
2001                         break; /* End of RMW chain */
2002
2003                 /** @todo Need to be smarter here...  In the linux lock
2004                  * example, this will run to the beginning of the program for
2005                  * every acquire. */
2006                 /** @todo The way to be smarter here is to keep going until 1
2007                  * thread has a release preceded by an acquire and you've seen
2008                  *       both. */
2009
2010                 /* acq_rel RMW is a sufficient stopping condition */
2011                 if (rf->is_acquire() && rf->is_release())
2012                         return true; /* complete */
2013         };
2014         if (!rf) {
2015                 /* read from future: need to settle this later */
2016                 pending->rf = NULL;
2017                 return false; /* incomplete */
2018         }
2019
2020         if (rf->is_release())
2021                 return true; /* complete */
2022
2023         /* else relaxed write
2024          * - check for fence-release in the same thread (29.8, stmt. 3)
2025          * - check modification order for contiguous subsequence
2026          *   -> rf must be same thread as release */
2027
2028         const ModelAction *fence_release = rf->get_last_fence_release();
2029         /* Synchronize with a fence-release unconditionally; we don't need to
2030          * find any more "contiguous subsequence..." for it */
2031         if (fence_release)
2032                 release_heads->push_back(fence_release);
2033
2034         int tid = id_to_int(rf->get_tid());
2035         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2036         action_list_t *list = &(*thrd_lists)[tid];
2037         action_list_t::const_reverse_iterator rit;
2038
2039         /* Find rf in the thread list */
2040         rit = std::find(list->rbegin(), list->rend(), rf);
2041         ASSERT(rit != list->rend());
2042
2043         /* Find the last {write,fence}-release */
2044         for (; rit != list->rend(); rit++) {
2045                 if (fence_release && *(*rit) < *fence_release)
2046                         break;
2047                 if ((*rit)->is_release())
2048                         break;
2049         }
2050         if (rit == list->rend()) {
2051                 /* No write-release in this thread */
2052                 return true; /* complete */
2053         } else if (fence_release && *(*rit) < *fence_release) {
2054                 /* The fence-release is more recent (and so, "stronger") than
2055                  * the most recent write-release */
2056                 return true; /* complete */
2057         } /* else, need to establish contiguous release sequence */
2058         ModelAction *release = *rit;
2059
2060         ASSERT(rf->same_thread(release));
2061
2062         pending->writes.clear();
2063
2064         bool certain = true;
2065         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2066                 if (id_to_int(rf->get_tid()) == (int)i)
2067                         continue;
2068                 list = &(*thrd_lists)[i];
2069
2070                 /* Can we ensure no future writes from this thread may break
2071                  * the release seq? */
2072                 bool future_ordered = false;
2073
2074                 ModelAction *last = get_last_action(int_to_id(i));
2075                 Thread *th = get_thread(int_to_id(i));
2076                 if ((last && rf->happens_before(last)) ||
2077                                 !is_enabled(th) ||
2078                                 th->is_complete())
2079                         future_ordered = true;
2080
2081                 ASSERT(!th->is_model_thread() || future_ordered);
2082
2083                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2084                         const ModelAction *act = *rit;
2085                         /* Reach synchronization -> this thread is complete */
2086                         if (act->happens_before(release))
2087                                 break;
2088                         if (rf->happens_before(act)) {
2089                                 future_ordered = true;
2090                                 continue;
2091                         }
2092
2093                         /* Only non-RMW writes can break release sequences */
2094                         if (!act->is_write() || act->is_rmw())
2095                                 continue;
2096
2097                         /* Check modification order */
2098                         if (mo_graph->checkReachable(rf, act)) {
2099                                 /* rf --mo--> act */
2100                                 future_ordered = true;
2101                                 continue;
2102                         }
2103                         if (mo_graph->checkReachable(act, release))
2104                                 /* act --mo--> release */
2105                                 break;
2106                         if (mo_graph->checkReachable(release, act) &&
2107                                       mo_graph->checkReachable(act, rf)) {
2108                                 /* release --mo-> act --mo--> rf */
2109                                 return true; /* complete */
2110                         }
2111                         /* act may break release sequence */
2112                         pending->writes.push_back(act);
2113                         certain = false;
2114                 }
2115                 if (!future_ordered)
2116                         certain = false; /* This thread is uncertain */
2117         }
2118
2119         if (certain) {
2120                 release_heads->push_back(release);
2121                 pending->writes.clear();
2122         } else {
2123                 pending->release = release;
2124                 pending->rf = rf;
2125         }
2126         return certain;
2127 }
2128
2129 /**
2130  * An interface for getting the release sequence head(s) with which a
2131  * given ModelAction must synchronize. This function only returns a non-empty
2132  * result when it can locate a release sequence head with certainty. Otherwise,
2133  * it may mark the internal state of the ModelChecker so that it will handle
2134  * the release sequence at a later time, causing @a acquire to update its
2135  * synchronization at some later point in execution.
2136  *
2137  * @param acquire The 'acquire' action that may synchronize with a release
2138  * sequence
2139  * @param read The read action that may read from a release sequence; this may
2140  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2141  * when 'acquire' is a fence-acquire)
2142  * @param release_heads A pass-by-reference return parameter. Will be filled
2143  * with the head(s) of the release sequence(s), if they exists with certainty.
2144  * @see ModelChecker::release_seq_heads
2145  */
2146 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2147                 ModelAction *read, rel_heads_list_t *release_heads)
2148 {
2149         const ModelAction *rf = read->get_reads_from();
2150         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2151         sequence->acquire = acquire;
2152         sequence->read = read;
2153
2154         if (!release_seq_heads(rf, release_heads, sequence)) {
2155                 /* add act to 'lazy checking' list */
2156                 pending_rel_seqs->push_back(sequence);
2157         } else {
2158                 snapshot_free(sequence);
2159         }
2160 }
2161
2162 /**
2163  * Attempt to resolve all stashed operations that might synchronize with a
2164  * release sequence for a given location. This implements the "lazy" portion of
2165  * determining whether or not a release sequence was contiguous, since not all
2166  * modification order information is present at the time an action occurs.
2167  *
2168  * @param location The location/object that should be checked for release
2169  * sequence resolutions. A NULL value means to check all locations.
2170  * @param work_queue The work queue to which to add work items as they are
2171  * generated
2172  * @return True if any updates occurred (new synchronization, new mo_graph
2173  * edges)
2174  */
2175 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2176 {
2177         bool updated = false;
2178         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2179         while (it != pending_rel_seqs->end()) {
2180                 struct release_seq *pending = *it;
2181                 ModelAction *acquire = pending->acquire;
2182                 const ModelAction *read = pending->read;
2183
2184                 /* Only resolve sequences on the given location, if provided */
2185                 if (location && read->get_location() != location) {
2186                         it++;
2187                         continue;
2188                 }
2189
2190                 const ModelAction *rf = read->get_reads_from();
2191                 rel_heads_list_t release_heads;
2192                 bool complete;
2193                 complete = release_seq_heads(rf, &release_heads, pending);
2194                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2195                         if (!acquire->has_synchronized_with(release_heads[i])) {
2196                                 if (acquire->synchronize_with(release_heads[i]))
2197                                         updated = true;
2198                                 else
2199                                         set_bad_synchronization();
2200                         }
2201                 }
2202
2203                 if (updated) {
2204                         /* Re-check all pending release sequences */
2205                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2206                         /* Re-check read-acquire for mo_graph edges */
2207                         if (acquire->is_read())
2208                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2209
2210                         /* propagate synchronization to later actions */
2211                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2212                         for (; (*rit) != acquire; rit++) {
2213                                 ModelAction *propagate = *rit;
2214                                 if (acquire->happens_before(propagate)) {
2215                                         propagate->synchronize_with(acquire);
2216                                         /* Re-check 'propagate' for mo_graph edges */
2217                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2218                                 }
2219                         }
2220                 }
2221                 if (complete) {
2222                         it = pending_rel_seqs->erase(it);
2223                         snapshot_free(pending);
2224                 } else {
2225                         it++;
2226                 }
2227         }
2228
2229         // If we resolved promises or data races, see if we have realized a data race.
2230         checkDataRaces();
2231
2232         return updated;
2233 }
2234
2235 /**
2236  * Performs various bookkeeping operations for the current ModelAction. For
2237  * instance, adds action to the per-object, per-thread action vector and to the
2238  * action trace list of all thread actions.
2239  *
2240  * @param act is the ModelAction to add.
2241  */
2242 void ModelChecker::add_action_to_lists(ModelAction *act)
2243 {
2244         int tid = id_to_int(act->get_tid());
2245         ModelAction *uninit = NULL;
2246         int uninit_id = -1;
2247         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2248         if (list->empty() && act->is_atomic_var()) {
2249                 uninit = new_uninitialized_action(act->get_location());
2250                 uninit_id = id_to_int(uninit->get_tid());
2251                 list->push_back(uninit);
2252         }
2253         list->push_back(act);
2254
2255         action_trace->push_back(act);
2256         if (uninit)
2257                 action_trace->push_front(uninit);
2258
2259         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2260         if (tid >= (int)vec->size())
2261                 vec->resize(priv->next_thread_id);
2262         (*vec)[tid].push_back(act);
2263         if (uninit)
2264                 (*vec)[uninit_id].push_front(uninit);
2265
2266         if ((int)thrd_last_action->size() <= tid)
2267                 thrd_last_action->resize(get_num_threads());
2268         (*thrd_last_action)[tid] = act;
2269         if (uninit)
2270                 (*thrd_last_action)[uninit_id] = uninit;
2271
2272         if (act->is_fence() && act->is_release()) {
2273                 if ((int)thrd_last_fence_release->size() <= tid)
2274                         thrd_last_fence_release->resize(get_num_threads());
2275                 (*thrd_last_fence_release)[tid] = act;
2276         }
2277
2278         if (act->is_wait()) {
2279                 void *mutex_loc = (void *) act->get_value();
2280                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2281
2282                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2283                 if (tid >= (int)vec->size())
2284                         vec->resize(priv->next_thread_id);
2285                 (*vec)[tid].push_back(act);
2286         }
2287 }
2288
2289 /**
2290  * @brief Get the last action performed by a particular Thread
2291  * @param tid The thread ID of the Thread in question
2292  * @return The last action in the thread
2293  */
2294 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2295 {
2296         int threadid = id_to_int(tid);
2297         if (threadid < (int)thrd_last_action->size())
2298                 return (*thrd_last_action)[id_to_int(tid)];
2299         else
2300                 return NULL;
2301 }
2302
2303 /**
2304  * @brief Get the last fence release performed by a particular Thread
2305  * @param tid The thread ID of the Thread in question
2306  * @return The last fence release in the thread, if one exists; NULL otherwise
2307  */
2308 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2309 {
2310         int threadid = id_to_int(tid);
2311         if (threadid < (int)thrd_last_fence_release->size())
2312                 return (*thrd_last_fence_release)[id_to_int(tid)];
2313         else
2314                 return NULL;
2315 }
2316
2317 /**
2318  * Gets the last memory_order_seq_cst write (in the total global sequence)
2319  * performed on a particular object (i.e., memory location), not including the
2320  * current action.
2321  * @param curr The current ModelAction; also denotes the object location to
2322  * check
2323  * @return The last seq_cst write
2324  */
2325 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2326 {
2327         void *location = curr->get_location();
2328         action_list_t *list = get_safe_ptr_action(obj_map, location);
2329         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2330         action_list_t::reverse_iterator rit;
2331         for (rit = list->rbegin(); rit != list->rend(); rit++)
2332                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2333                         return *rit;
2334         return NULL;
2335 }
2336
2337 /**
2338  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2339  * performed in a particular thread, prior to a particular fence.
2340  * @param tid The ID of the thread to check
2341  * @param before_fence The fence from which to begin the search; if NULL, then
2342  * search for the most recent fence in the thread.
2343  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2344  */
2345 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2346 {
2347         /* All fences should have NULL location */
2348         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2349         action_list_t::reverse_iterator rit = list->rbegin();
2350
2351         if (before_fence) {
2352                 for (; rit != list->rend(); rit++)
2353                         if (*rit == before_fence)
2354                                 break;
2355
2356                 ASSERT(*rit == before_fence);
2357                 rit++;
2358         }
2359
2360         for (; rit != list->rend(); rit++)
2361                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2362                         return *rit;
2363         return NULL;
2364 }
2365
2366 /**
2367  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2368  * location). This function identifies the mutex according to the current
2369  * action, which is presumed to perform on the same mutex.
2370  * @param curr The current ModelAction; also denotes the object location to
2371  * check
2372  * @return The last unlock operation
2373  */
2374 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2375 {
2376         void *location = curr->get_location();
2377         action_list_t *list = get_safe_ptr_action(obj_map, location);
2378         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2379         action_list_t::reverse_iterator rit;
2380         for (rit = list->rbegin(); rit != list->rend(); rit++)
2381                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2382                         return *rit;
2383         return NULL;
2384 }
2385
2386 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2387 {
2388         ModelAction *parent = get_last_action(tid);
2389         if (!parent)
2390                 parent = get_thread(tid)->get_creation();
2391         return parent;
2392 }
2393
2394 /**
2395  * Returns the clock vector for a given thread.
2396  * @param tid The thread whose clock vector we want
2397  * @return Desired clock vector
2398  */
2399 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2400 {
2401         return get_parent_action(tid)->get_cv();
2402 }
2403
2404 /**
2405  * Resolve a set of Promises with a current write. The set is provided in the
2406  * Node corresponding to @a write.
2407  * @param write The ModelAction that is fulfilling Promises
2408  * @return True if promises were resolved; false otherwise
2409  */
2410 bool ModelChecker::resolve_promises(ModelAction *write)
2411 {
2412         bool haveResolved = false;
2413         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2414         promise_list_t mustResolve, resolved;
2415
2416         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2417                 Promise *promise = (*promises)[promise_index];
2418                 if (write->get_node()->get_promise(i)) {
2419                         ModelAction *read = promise->get_action();
2420                         read_from(read, write);
2421                         //Make sure the promise's value matches the write's value
2422                         ASSERT(promise->is_compatible(write));
2423                         mo_graph->resolvePromise(read, write, &mustResolve);
2424
2425                         resolved.push_back(promise);
2426                         promises->erase(promises->begin() + promise_index);
2427                         actions_to_check.push_back(read);
2428
2429                         haveResolved = true;
2430                 } else
2431                         promise_index++;
2432         }
2433
2434         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2435                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2436                                 == resolved.end())
2437                         priv->failed_promise = true;
2438         }
2439         for (unsigned int i = 0; i < resolved.size(); i++)
2440                 delete resolved[i];
2441         //Check whether reading these writes has made threads unable to
2442         //resolve promises
2443
2444         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2445                 ModelAction *read = actions_to_check[i];
2446                 mo_check_promises(read, true);
2447         }
2448
2449         return haveResolved;
2450 }
2451
2452 /**
2453  * Compute the set of promises that could potentially be satisfied by this
2454  * action. Note that the set computation actually appears in the Node, not in
2455  * ModelChecker.
2456  * @param curr The ModelAction that may satisfy promises
2457  */
2458 void ModelChecker::compute_promises(ModelAction *curr)
2459 {
2460         for (unsigned int i = 0; i < promises->size(); i++) {
2461                 Promise *promise = (*promises)[i];
2462                 const ModelAction *act = promise->get_action();
2463                 if (!act->happens_before(curr) &&
2464                                 act->is_read() &&
2465                                 !act->could_synchronize_with(curr) &&
2466                                 !act->same_thread(curr) &&
2467                                 act->get_location() == curr->get_location() &&
2468                                 promise->get_value() == curr->get_value()) {
2469                         curr->get_node()->set_promise(i, act->is_rmw());
2470                 }
2471         }
2472 }
2473
2474 /** Checks promises in response to change in ClockVector Threads. */
2475 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2476 {
2477         for (unsigned int i = 0; i < promises->size(); i++) {
2478                 Promise *promise = (*promises)[i];
2479                 const ModelAction *act = promise->get_action();
2480                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2481                                 merge_cv->synchronized_since(act)) {
2482                         if (promise->eliminate_thread(tid)) {
2483                                 //Promise has failed
2484                                 priv->failed_promise = true;
2485                                 return;
2486                         }
2487                 }
2488         }
2489 }
2490
2491 void ModelChecker::check_promises_thread_disabled()
2492 {
2493         for (unsigned int i = 0; i < promises->size(); i++) {
2494                 Promise *promise = (*promises)[i];
2495                 if (promise->has_failed()) {
2496                         priv->failed_promise = true;
2497                         return;
2498                 }
2499         }
2500 }
2501
2502 /**
2503  * @brief Checks promises in response to addition to modification order for
2504  * threads.
2505  *
2506  * We test whether threads are still available for satisfying promises after an
2507  * addition to our modification order constraints. Those that are unavailable
2508  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2509  * that promise has failed.
2510  *
2511  * @param act The ModelAction which updated the modification order
2512  * @param is_read_check Should be true if act is a read and we must check for
2513  * updates to the store from which it read (there is a distinction here for
2514  * RMW's, which are both a load and a store)
2515  */
2516 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2517 {
2518         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2519
2520         for (unsigned int i = 0; i < promises->size(); i++) {
2521                 Promise *promise = (*promises)[i];
2522                 const ModelAction *pread = promise->get_action();
2523
2524                 // Is this promise on the same location?
2525                 if (!pread->same_var(write))
2526                         continue;
2527
2528                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2529                         priv->failed_promise = true;
2530                         return;
2531                 }
2532
2533                 // Don't do any lookups twice for the same thread
2534                 if (!promise->thread_is_available(act->get_tid()))
2535                         continue;
2536
2537                 if (mo_graph->checkReachable(promise, write)) {
2538                         if (mo_graph->checkPromise(write, promise)) {
2539                                 priv->failed_promise = true;
2540                                 return;
2541                         }
2542                 }
2543         }
2544 }
2545
2546 /**
2547  * Compute the set of writes that may break the current pending release
2548  * sequence. This information is extracted from previou release sequence
2549  * calculations.
2550  *
2551  * @param curr The current ModelAction. Must be a release sequence fixup
2552  * action.
2553  */
2554 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2555 {
2556         if (pending_rel_seqs->empty())
2557                 return;
2558
2559         struct release_seq *pending = pending_rel_seqs->back();
2560         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2561                 const ModelAction *write = pending->writes[i];
2562                 curr->get_node()->add_relseq_break(write);
2563         }
2564
2565         /* NULL means don't break the sequence; just synchronize */
2566         curr->get_node()->add_relseq_break(NULL);
2567 }
2568
2569 /**
2570  * Build up an initial set of all past writes that this 'read' action may read
2571  * from. This set is determined by the clock vector's "happens before"
2572  * relationship.
2573  * @param curr is the current ModelAction that we are exploring; it must be a
2574  * 'read' operation.
2575  */
2576 void ModelChecker::build_reads_from_past(ModelAction *curr)
2577 {
2578         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2579         unsigned int i;
2580         ASSERT(curr->is_read());
2581
2582         ModelAction *last_sc_write = NULL;
2583
2584         if (curr->is_seqcst())
2585                 last_sc_write = get_last_seq_cst_write(curr);
2586
2587         /* Iterate over all threads */
2588         for (i = 0; i < thrd_lists->size(); i++) {
2589                 /* Iterate over actions in thread, starting from most recent */
2590                 action_list_t *list = &(*thrd_lists)[i];
2591                 action_list_t::reverse_iterator rit;
2592                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2593                         ModelAction *act = *rit;
2594
2595                         /* Only consider 'write' actions */
2596                         if (!act->is_write() || act == curr)
2597                                 continue;
2598
2599                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2600                         bool allow_read = true;
2601
2602                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2603                                 allow_read = false;
2604                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2605                                 allow_read = false;
2606
2607                         if (allow_read) {
2608                                 /* Only add feasible reads */
2609                                 mo_graph->startChanges();
2610                                 r_modification_order(curr, act);
2611                                 if (!is_infeasible())
2612                                         curr->get_node()->add_read_from(act);
2613                                 mo_graph->rollbackChanges();
2614                         }
2615
2616                         /* Include at most one act per-thread that "happens before" curr */
2617                         if (act->happens_before(curr))
2618                                 break;
2619                 }
2620         }
2621         /* We may find no valid may-read-from only if the execution is doomed */
2622         if (!curr->get_node()->get_read_from_size() && curr->get_node()->future_value_empty()) {
2623                 priv->no_valid_reads = true;
2624                 set_assert();
2625         }
2626
2627         if (DBG_ENABLED()) {
2628                 model_print("Reached read action:\n");
2629                 curr->print();
2630                 model_print("Printing may_read_from\n");
2631                 curr->get_node()->print_may_read_from();
2632                 model_print("End printing may_read_from\n");
2633         }
2634 }
2635
2636 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2637 {
2638         for ( ; write != NULL; write = write->get_reads_from()) {
2639                 /* UNINIT actions don't have a Node, and they never sleep */
2640                 if (write->is_uninitialized())
2641                         return true;
2642                 Node *prevnode = write->get_node()->get_parent();
2643
2644                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2645                 if (write->is_release() && thread_sleep)
2646                         return true;
2647                 if (!write->is_rmw())
2648                         return false;
2649         }
2650         return true;
2651 }
2652
2653 /**
2654  * @brief Create a new action representing an uninitialized atomic
2655  * @param location The memory location of the atomic object
2656  * @return A pointer to a new ModelAction
2657  */
2658 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2659 {
2660         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2661         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2662         act->create_cv(NULL);
2663         return act;
2664 }
2665
2666 static void print_list(action_list_t *list)
2667 {
2668         action_list_t::iterator it;
2669
2670         model_print("---------------------------------------------------------------------\n");
2671
2672         unsigned int hash = 0;
2673
2674         for (it = list->begin(); it != list->end(); it++) {
2675                 (*it)->print();
2676                 hash = hash^(hash<<3)^((*it)->hash());
2677         }
2678         model_print("HASH %u\n", hash);
2679         model_print("---------------------------------------------------------------------\n");
2680 }
2681
2682 #if SUPPORT_MOD_ORDER_DUMP
2683 void ModelChecker::dumpGraph(char *filename) const
2684 {
2685         char buffer[200];
2686         sprintf(buffer, "%s.dot", filename);
2687         FILE *file = fopen(buffer, "w");
2688         fprintf(file, "digraph %s {\n", filename);
2689         mo_graph->dumpNodes(file);
2690         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2691
2692         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2693                 ModelAction *action = *it;
2694                 if (action->is_read()) {
2695                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2696                         if (action->get_reads_from() != NULL)
2697                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2698                 }
2699                 if (thread_array[action->get_tid()] != NULL) {
2700                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2701                 }
2702
2703                 thread_array[action->get_tid()] = action;
2704         }
2705         fprintf(file, "}\n");
2706         model_free(thread_array);
2707         fclose(file);
2708 }
2709 #endif
2710
2711 /** @brief Prints an execution trace summary. */
2712 void ModelChecker::print_summary() const
2713 {
2714 #if SUPPORT_MOD_ORDER_DUMP
2715         char buffername[100];
2716         sprintf(buffername, "exec%04u", stats.num_total);
2717         mo_graph->dumpGraphToFile(buffername);
2718         sprintf(buffername, "graph%04u", stats.num_total);
2719         dumpGraph(buffername);
2720 #endif
2721
2722         model_print("Execution %d:", stats.num_total);
2723         if (isfeasibleprefix())
2724                 model_print("\n");
2725         else
2726                 print_infeasibility(" INFEASIBLE");
2727         print_list(action_trace);
2728         model_print("\n");
2729 }
2730
2731 /**
2732  * Add a Thread to the system for the first time. Should only be called once
2733  * per thread.
2734  * @param t The Thread to add
2735  */
2736 void ModelChecker::add_thread(Thread *t)
2737 {
2738         thread_map->put(id_to_int(t->get_id()), t);
2739         scheduler->add_thread(t);
2740 }
2741
2742 /**
2743  * Removes a thread from the scheduler.
2744  * @param the thread to remove.
2745  */
2746 void ModelChecker::remove_thread(Thread *t)
2747 {
2748         scheduler->remove_thread(t);
2749 }
2750
2751 /**
2752  * @brief Get a Thread reference by its ID
2753  * @param tid The Thread's ID
2754  * @return A Thread reference
2755  */
2756 Thread * ModelChecker::get_thread(thread_id_t tid) const
2757 {
2758         return thread_map->get(id_to_int(tid));
2759 }
2760
2761 /**
2762  * @brief Get a reference to the Thread in which a ModelAction was executed
2763  * @param act The ModelAction
2764  * @return A Thread reference
2765  */
2766 Thread * ModelChecker::get_thread(const ModelAction *act) const
2767 {
2768         return get_thread(act->get_tid());
2769 }
2770
2771 /**
2772  * @brief Check if a Thread is currently enabled
2773  * @param t The Thread to check
2774  * @return True if the Thread is currently enabled
2775  */
2776 bool ModelChecker::is_enabled(Thread *t) const
2777 {
2778         return scheduler->is_enabled(t);
2779 }
2780
2781 /**
2782  * @brief Check if a Thread is currently enabled
2783  * @param tid The ID of the Thread to check
2784  * @return True if the Thread is currently enabled
2785  */
2786 bool ModelChecker::is_enabled(thread_id_t tid) const
2787 {
2788         return scheduler->is_enabled(tid);
2789 }
2790
2791 /**
2792  * Switch from a model-checker context to a user-thread context. This is the
2793  * complement of ModelChecker::switch_to_master and must be called from the
2794  * model-checker context
2795  *
2796  * @param thread The user-thread to switch to
2797  */
2798 void ModelChecker::switch_from_master(Thread *thread)
2799 {
2800         scheduler->set_current_thread(thread);
2801         Thread::swap(&system_context, thread);
2802 }
2803
2804 /**
2805  * Switch from a user-context to the "master thread" context (a.k.a. system
2806  * context). This switch is made with the intention of exploring a particular
2807  * model-checking action (described by a ModelAction object). Must be called
2808  * from a user-thread context.
2809  *
2810  * @param act The current action that will be explored. May be NULL only if
2811  * trace is exiting via an assertion (see ModelChecker::set_assert and
2812  * ModelChecker::has_asserted).
2813  * @return Return the value returned by the current action
2814  */
2815 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2816 {
2817         DBG();
2818         Thread *old = thread_current();
2819         ASSERT(!old->get_pending());
2820         old->set_pending(act);
2821         if (Thread::swap(old, &system_context) < 0) {
2822                 perror("swap threads");
2823                 exit(EXIT_FAILURE);
2824         }
2825         return old->get_return_value();
2826 }
2827
2828 /**
2829  * Takes the next step in the execution, if possible.
2830  * @param curr The current step to take
2831  * @return Returns the next Thread to run, if any; NULL if this execution
2832  * should terminate
2833  */
2834 Thread * ModelChecker::take_step(ModelAction *curr)
2835 {
2836         Thread *curr_thrd = get_thread(curr);
2837         ASSERT(curr_thrd->get_state() == THREAD_READY);
2838
2839         curr = check_current_action(curr);
2840
2841         /* Infeasible -> don't take any more steps */
2842         if (is_infeasible())
2843                 return NULL;
2844         else if (isfeasibleprefix() && have_bug_reports()) {
2845                 set_assert();
2846                 return NULL;
2847         }
2848
2849         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2850                 return NULL;
2851
2852         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2853                 scheduler->remove_thread(curr_thrd);
2854
2855         Thread *next_thrd = get_next_thread(curr);
2856
2857         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2858                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2859
2860         return next_thrd;
2861 }
2862
2863 /** Wrapper to run the user's main function, with appropriate arguments */
2864 void user_main_wrapper(void *)
2865 {
2866         user_main(model->params.argc, model->params.argv);
2867 }
2868
2869 /** @brief Run ModelChecker for the user program */
2870 void ModelChecker::run()
2871 {
2872         do {
2873                 thrd_t user_thread;
2874                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2875                 add_thread(t);
2876
2877                 do {
2878                         /*
2879                          * Stash next pending action(s) for thread(s). There
2880                          * should only need to stash one thread's action--the
2881                          * thread which just took a step--plus the first step
2882                          * for any newly-created thread
2883                          */
2884                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2885                                 thread_id_t tid = int_to_id(i);
2886                                 Thread *thr = get_thread(tid);
2887                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2888                                         switch_from_master(thr);
2889                                 }
2890                         }
2891
2892                         /* Catch assertions from prior take_step or from
2893                          * between-ModelAction bugs (e.g., data races) */
2894                         if (has_asserted())
2895                                 break;
2896
2897                         /* Consume the next action for a Thread */
2898                         ModelAction *curr = t->get_pending();
2899                         t->set_pending(NULL);
2900                         t = take_step(curr);
2901                 } while (t && !t->is_model_thread());
2902
2903                 /*
2904                  * Launch end-of-execution release sequence fixups only when
2905                  * the execution is otherwise feasible AND there are:
2906                  *
2907                  * (1) pending release sequences
2908                  * (2) pending assertions that could be invalidated by a change
2909                  * in clock vectors (i.e., data races)
2910                  * (3) no pending promises
2911                  */
2912                 while (!pending_rel_seqs->empty() &&
2913                                 is_feasible_prefix_ignore_relseq() &&
2914                                 !unrealizedraces.empty()) {
2915                         model_print("*** WARNING: release sequence fixup action "
2916                                         "(%zu pending release seuqence(s)) ***\n",
2917                                         pending_rel_seqs->size());
2918                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2919                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
2920                                         model_thread);
2921                         take_step(fixup);
2922                 };
2923         } while (next_execution());
2924
2925         model_print("******* Model-checking complete: *******\n");
2926         print_stats();
2927 }