c0f81ea077c25caf442a6c8117a29158f9adcc7a
[cdsspec-compiler.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "action.h"
9 #include "nodestack.h"
10 #include "schedule.h"
11 #include "snapshot-interface.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "output.h"
19 #include "traceanalysis.h"
20 #include "bugmessage.h"
21
22 #define INITIAL_THREAD_ID       0
23
24 ModelChecker *model;
25
26 /**
27  * Structure for holding small ModelChecker members that should be snapshotted
28  */
29 struct model_snapshot_members {
30         model_snapshot_members() :
31                 /* First thread created will have id INITIAL_THREAD_ID */
32                 next_thread_id(INITIAL_THREAD_ID),
33                 used_sequence_numbers(0),
34                 next_backtrack(NULL),
35                 bugs(),
36                 stats(),
37                 failed_promise(false),
38                 too_many_reads(false),
39                 no_valid_reads(false),
40                 bad_synchronization(false),
41                 asserted(false)
42         { }
43
44         ~model_snapshot_members() {
45                 for (unsigned int i = 0; i < bugs.size(); i++)
46                         delete bugs[i];
47                 bugs.clear();
48         }
49
50         unsigned int next_thread_id;
51         modelclock_t used_sequence_numbers;
52         ModelAction *next_backtrack;
53         SnapVector<bug_message *> bugs;
54         struct execution_stats stats;
55         bool failed_promise;
56         bool too_many_reads;
57         bool no_valid_reads;
58         /** @brief Incorrectly-ordered synchronization was made */
59         bool bad_synchronization;
60         bool asserted;
61
62         SNAPSHOTALLOC
63 };
64
65 /** @brief Constructor */
66 ModelChecker::ModelChecker(struct model_params params) :
67         /* Initialize default scheduler */
68         params(params),
69         scheduler(new Scheduler()),
70         diverge(NULL),
71         earliest_diverge(NULL),
72         action_trace(new action_list_t()),
73         thread_map(new HashTable<int, Thread *, int>()),
74         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
75         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
76         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
77         promises(new SnapVector<Promise *>()),
78         futurevalues(new SnapVector<struct PendingFutureValue>()),
79         pending_rel_seqs(new SnapVector<struct release_seq *>()),
80         thrd_last_action(new SnapVector<ModelAction *>(1)),
81         thrd_last_fence_release(new SnapVector<ModelAction *>()),
82         node_stack(new NodeStack()),
83         trace_analyses(new ModelVector<TraceAnalysis *>()),
84         priv(new struct model_snapshot_members()),
85         mo_graph(new CycleGraph())
86 {
87         /* Initialize a model-checker thread, for special ModelActions */
88         model_thread = new Thread(get_next_id());
89         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
90 }
91
92 /** @brief Destructor */
93 ModelChecker::~ModelChecker()
94 {
95         for (unsigned int i = 0; i < get_num_threads(); i++)
96                 delete thread_map->get(i);
97         delete thread_map;
98
99         delete obj_thrd_map;
100         delete obj_map;
101         delete condvar_waiters_map;
102         delete action_trace;
103
104         for (unsigned int i = 0; i < promises->size(); i++)
105                 delete (*promises)[i];
106         delete promises;
107
108         delete pending_rel_seqs;
109
110         delete thrd_last_action;
111         delete thrd_last_fence_release;
112         delete node_stack;
113         for (unsigned int i = 0; i < trace_analyses->size(); i++)
114                 delete (*trace_analyses)[i];
115         delete trace_analyses;
116         delete scheduler;
117         delete mo_graph;
118         delete priv;
119 }
120
121 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
122 {
123         action_list_t *tmp = hash->get(ptr);
124         if (tmp == NULL) {
125                 tmp = new action_list_t();
126                 hash->put(ptr, tmp);
127         }
128         return tmp;
129 }
130
131 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
132 {
133         SnapVector<action_list_t> *tmp = hash->get(ptr);
134         if (tmp == NULL) {
135                 tmp = new SnapVector<action_list_t>();
136                 hash->put(ptr, tmp);
137         }
138         return tmp;
139 }
140
141 action_list_t * ModelChecker::get_actions_on_obj(void * obj, thread_id_t tid) {
142         SnapVector<action_list_t> *wrv=obj_thrd_map->get(obj);
143         if (wrv==NULL)
144                 return NULL;
145         unsigned int thread=id_to_int(tid);
146         if (thread < wrv->size())
147                 return &(*wrv)[thread];
148         else
149                 return NULL;
150 }
151
152
153 /**
154  * Restores user program to initial state and resets all model-checker data
155  * structures.
156  */
157 void ModelChecker::reset_to_initial_state()
158 {
159         DEBUG("+++ Resetting to initial state +++\n");
160         node_stack->reset_execution();
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 /**
203  * @brief Select the next thread to execute based on the curren action
204  *
205  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
206  * actions should be followed by the execution of their child thread. In either
207  * case, the current action should determine the next thread schedule.
208  *
209  * @param curr The current action
210  * @return The next thread to run, if the current action will determine this
211  * selection; otherwise NULL
212  */
213 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
214 {
215         /* Do not split atomic RMW */
216         if (curr->is_rmwr())
217                 return get_thread(curr);
218         /* Follow CREATE with the created thread */
219         if (curr->get_type() == THREAD_CREATE)
220                 return curr->get_thread_operand();
221         return NULL;
222 }
223
224 /**
225  * @brief Choose the next thread to execute.
226  *
227  * This function chooses the next thread that should execute. It can enforce
228  * execution replay/backtracking or, if the model-checker has no preference
229  * regarding the next thread (i.e., when exploring a new execution ordering),
230  * we defer to the scheduler.
231  *
232  * @return The next chosen thread to run, if any exist. Or else if the current
233  * execution should terminate, return NULL.
234  */
235 Thread * ModelChecker::get_next_thread()
236 {
237         thread_id_t tid;
238
239         /*
240          * Have we completed exploring the preselected path? Then let the
241          * scheduler decide
242          */
243         if (diverge == NULL)
244                 return scheduler->select_next_thread(node_stack->get_head());
245
246         /* Else, we are trying to replay an execution */
247         ModelAction *next = node_stack->get_next()->get_action();
248
249         if (next == diverge) {
250                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
251                         earliest_diverge = diverge;
252
253                 Node *nextnode = next->get_node();
254                 Node *prevnode = nextnode->get_parent();
255                 scheduler->update_sleep_set(prevnode);
256
257                 /* Reached divergence point */
258                 if (nextnode->increment_behaviors()) {
259                         /* Execute the same thread with a new behavior */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else {
263                         ASSERT(prevnode);
264                         /* Make a different thread execute for next step */
265                         scheduler->add_sleep(get_thread(next->get_tid()));
266                         tid = prevnode->get_next_backtrack();
267                         /* Make sure the backtracked thread isn't sleeping. */
268                         node_stack->pop_restofstack(1);
269                         if (diverge == earliest_diverge) {
270                                 earliest_diverge = prevnode->get_action();
271                         }
272                 }
273                 /* Start the round robin scheduler from this thread id */
274                 scheduler->set_scheduler_thread(tid);
275                 /* The correct sleep set is in the parent node. */
276                 execute_sleep_set();
277
278                 DEBUG("*** Divergence point ***\n");
279
280                 diverge = NULL;
281         } else {
282                 tid = next->get_tid();
283         }
284         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
285         ASSERT(tid != THREAD_ID_T_NONE);
286         return get_thread(id_to_int(tid));
287 }
288
289 /**
290  * We need to know what the next actions of all threads in the sleep
291  * set will be.  This method computes them and stores the actions at
292  * the corresponding thread object's pending action.
293  */
294
295 void ModelChecker::execute_sleep_set()
296 {
297         for (unsigned int i = 0; i < get_num_threads(); i++) {
298                 thread_id_t tid = int_to_id(i);
299                 Thread *thr = get_thread(tid);
300                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
301                         thr->get_pending()->set_sleep_flag();
302                 }
303         }
304 }
305
306 /**
307  * @brief Should the current action wake up a given thread?
308  *
309  * @param curr The current action
310  * @param thread The thread that we might wake up
311  * @return True, if we should wake up the sleeping thread; false otherwise
312  */
313 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
314 {
315         const ModelAction *asleep = thread->get_pending();
316         /* Don't allow partial RMW to wake anyone up */
317         if (curr->is_rmwr())
318                 return false;
319         /* Synchronizing actions may have been backtracked */
320         if (asleep->could_synchronize_with(curr))
321                 return true;
322         /* All acquire/release fences and fence-acquire/store-release */
323         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
324                 return true;
325         /* Fence-release + store can awake load-acquire on the same location */
326         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
327                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
328                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
329                         return true;
330         }
331         return false;
332 }
333
334 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
335 {
336         for (unsigned int i = 0; i < get_num_threads(); i++) {
337                 Thread *thr = get_thread(int_to_id(i));
338                 if (scheduler->is_sleep_set(thr)) {
339                         if (should_wake_up(curr, thr))
340                                 /* Remove this thread from sleep set */
341                                 scheduler->remove_sleep(thr);
342                 }
343         }
344 }
345
346 /** @brief Alert the model-checker that an incorrectly-ordered
347  * synchronization was made */
348 void ModelChecker::set_bad_synchronization()
349 {
350         priv->bad_synchronization = true;
351 }
352
353 /**
354  * Check whether the current trace has triggered an assertion which should halt
355  * its execution.
356  *
357  * @return True, if the execution should be aborted; false otherwise
358  */
359 bool ModelChecker::has_asserted() const
360 {
361         return priv->asserted;
362 }
363
364 /**
365  * Trigger a trace assertion which should cause this execution to be halted.
366  * This can be due to a detected bug or due to an infeasibility that should
367  * halt ASAP.
368  */
369 void ModelChecker::set_assert()
370 {
371         priv->asserted = true;
372 }
373
374 /**
375  * Check if we are in a deadlock. Should only be called at the end of an
376  * execution, although it should not give false positives in the middle of an
377  * execution (there should be some ENABLED thread).
378  *
379  * @return True if program is in a deadlock; false otherwise
380  */
381 bool ModelChecker::is_deadlocked() const
382 {
383         bool blocking_threads = false;
384         for (unsigned int i = 0; i < get_num_threads(); i++) {
385                 thread_id_t tid = int_to_id(i);
386                 if (is_enabled(tid))
387                         return false;
388                 Thread *t = get_thread(tid);
389                 if (!t->is_model_thread() && t->get_pending())
390                         blocking_threads = true;
391         }
392         return blocking_threads;
393 }
394
395 /**
396  * Check if this is a complete execution. That is, have all thread completed
397  * execution (rather than exiting because sleep sets have forced a redundant
398  * execution).
399  *
400  * @return True if the execution is complete.
401  */
402 bool ModelChecker::is_complete_execution() const
403 {
404         for (unsigned int i = 0; i < get_num_threads(); i++)
405                 if (is_enabled(int_to_id(i)))
406                         return false;
407         return true;
408 }
409
410 /**
411  * @brief Assert a bug in the executing program.
412  *
413  * Use this function to assert any sort of bug in the user program. If the
414  * current trace is feasible (actually, a prefix of some feasible execution),
415  * then this execution will be aborted, printing the appropriate message. If
416  * the current trace is not yet feasible, the error message will be stashed and
417  * printed if the execution ever becomes feasible.
418  *
419  * @param msg Descriptive message for the bug (do not include newline char)
420  * @return True if bug is immediately-feasible
421  */
422 bool ModelChecker::assert_bug(const char *msg, ...)
423 {
424         char str[800];
425
426         va_list ap;
427         va_start(ap, msg);
428         vsnprintf(str, sizeof(str), msg, ap);
429         va_end(ap);
430
431         priv->bugs.push_back(new bug_message(str));
432
433         if (isfeasibleprefix()) {
434                 set_assert();
435                 return true;
436         }
437         return false;
438 }
439
440 /**
441  * @brief Assert a bug in the executing program, asserted by a user thread
442  * @see ModelChecker::assert_bug
443  * @param msg Descriptive message for the bug (do not include newline char)
444  */
445 void ModelChecker::assert_user_bug(const char *msg)
446 {
447         /* If feasible bug, bail out now */
448         if (assert_bug(msg))
449                 switch_to_master(NULL);
450 }
451
452 /** @return True, if any bugs have been reported for this execution */
453 bool ModelChecker::have_bug_reports() const
454 {
455         return priv->bugs.size() != 0;
456 }
457
458 /** @brief Print bug report listing for this execution (if any bugs exist) */
459 void ModelChecker::print_bugs() const
460 {
461         if (have_bug_reports()) {
462                 model_print("Bug report: %zu bug%s detected\n",
463                                 priv->bugs.size(),
464                                 priv->bugs.size() > 1 ? "s" : "");
465                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
466                         priv->bugs[i]->print();
467         }
468 }
469
470 /**
471  * @brief Record end-of-execution stats
472  *
473  * Must be run when exiting an execution. Records various stats.
474  * @see struct execution_stats
475  */
476 void ModelChecker::record_stats()
477 {
478         stats.num_total++;
479         if (!isfeasibleprefix())
480                 stats.num_infeasible++;
481         else if (have_bug_reports())
482                 stats.num_buggy_executions++;
483         else if (is_complete_execution())
484                 stats.num_complete++;
485         else {
486                 stats.num_redundant++;
487
488                 /**
489                  * @todo We can violate this ASSERT() when fairness/sleep sets
490                  * conflict to cause an execution to terminate, e.g. with:
491                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
492                  */
493                 //ASSERT(scheduler->all_threads_sleeping());
494         }
495 }
496
497 /** @brief Print execution stats */
498 void ModelChecker::print_stats() const
499 {
500         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
501         model_print("Number of redundant executions: %d\n", stats.num_redundant);
502         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
503         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
504         model_print("Total executions: %d\n", stats.num_total);
505         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
506 }
507
508 /**
509  * @brief End-of-exeuction print
510  * @param printbugs Should any existing bugs be printed?
511  */
512 void ModelChecker::print_execution(bool printbugs) const
513 {
514         print_program_output();
515
516         if (params.verbose) {
517                 model_print("Earliest divergence point since last feasible execution:\n");
518                 if (earliest_diverge)
519                         earliest_diverge->print();
520                 else
521                         model_print("(Not set)\n");
522
523                 model_print("\n");
524                 print_stats();
525         }
526
527         /* Don't print invalid bugs */
528         if (printbugs)
529                 print_bugs();
530
531         model_print("\n");
532         print_summary();
533 }
534
535 /**
536  * Queries the model-checker for more executions to explore and, if one
537  * exists, resets the model-checker state to execute a new execution.
538  *
539  * @return If there are more executions to explore, return true. Otherwise,
540  * return false.
541  */
542 bool ModelChecker::next_execution()
543 {
544         DBG();
545         /* Is this execution a feasible execution that's worth bug-checking? */
546         bool complete = isfeasibleprefix() && (is_complete_execution() ||
547                         have_bug_reports());
548
549         /* End-of-execution bug checks */
550         if (complete) {
551                 if (is_deadlocked())
552                         assert_bug("Deadlock detected");
553
554                 checkDataRaces();
555                 run_trace_analyses();
556         }
557
558         record_stats();
559
560         /* Output */
561         if (params.verbose || (complete && have_bug_reports()))
562                 print_execution(complete);
563         else
564                 clear_program_output();
565
566         if (complete)
567                 earliest_diverge = NULL;
568
569         if ((diverge = get_next_backtrack()) == NULL)
570                 return false;
571
572         if (DBG_ENABLED()) {
573                 model_print("Next execution will diverge at:\n");
574                 diverge->print();
575         }
576
577         reset_to_initial_state();
578         return true;
579 }
580
581 /** @brief Run trace analyses on complete trace */
582 void ModelChecker::run_trace_analyses() {
583         for (unsigned int i = 0; i < trace_analyses->size(); i++)
584                 (*trace_analyses)[i]->analyze(action_trace);
585 }
586
587 /**
588  * @brief Find the last fence-related backtracking conflict for a ModelAction
589  *
590  * This function performs the search for the most recent conflicting action
591  * against which we should perform backtracking, as affected by fence
592  * operations. This includes pairs of potentially-synchronizing actions which
593  * occur due to fence-acquire or fence-release, and hence should be explored in
594  * the opposite execution order.
595  *
596  * @param act The current action
597  * @return The most recent action which conflicts with act due to fences
598  */
599 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
600 {
601         /* Only perform release/acquire fence backtracking for stores */
602         if (!act->is_write())
603                 return NULL;
604
605         /* Find a fence-release (or, act is a release) */
606         ModelAction *last_release;
607         if (act->is_release())
608                 last_release = act;
609         else
610                 last_release = get_last_fence_release(act->get_tid());
611         if (!last_release)
612                 return NULL;
613
614         /* Skip past the release */
615         action_list_t *list = action_trace;
616         action_list_t::reverse_iterator rit;
617         for (rit = list->rbegin(); rit != list->rend(); rit++)
618                 if (*rit == last_release)
619                         break;
620         ASSERT(rit != list->rend());
621
622         /* Find a prior:
623          *   load-acquire
624          * or
625          *   load --sb-> fence-acquire */
626         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
627         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
628         bool found_acquire_fences = false;
629         for ( ; rit != list->rend(); rit++) {
630                 ModelAction *prev = *rit;
631                 if (act->same_thread(prev))
632                         continue;
633
634                 int tid = id_to_int(prev->get_tid());
635
636                 if (prev->is_read() && act->same_var(prev)) {
637                         if (prev->is_acquire()) {
638                                 /* Found most recent load-acquire, don't need
639                                  * to search for more fences */
640                                 if (!found_acquire_fences)
641                                         return NULL;
642                         } else {
643                                 prior_loads[tid] = prev;
644                         }
645                 }
646                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
647                         found_acquire_fences = true;
648                         acquire_fences[tid] = prev;
649                 }
650         }
651
652         ModelAction *latest_backtrack = NULL;
653         for (unsigned int i = 0; i < acquire_fences.size(); i++)
654                 if (acquire_fences[i] && prior_loads[i])
655                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
656                                 latest_backtrack = acquire_fences[i];
657         return latest_backtrack;
658 }
659
660 /**
661  * @brief Find the last backtracking conflict for a ModelAction
662  *
663  * This function performs the search for the most recent conflicting action
664  * against which we should perform backtracking. This primary includes pairs of
665  * synchronizing actions which should be explored in the opposite execution
666  * order.
667  *
668  * @param act The current action
669  * @return The most recent action which conflicts with act
670  */
671 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
672 {
673         switch (act->get_type()) {
674         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
675         case ATOMIC_READ:
676         case ATOMIC_WRITE:
677         case ATOMIC_RMW: {
678                 ModelAction *ret = NULL;
679
680                 /* linear search: from most recent to oldest */
681                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
682                 action_list_t::reverse_iterator rit;
683                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
684                         ModelAction *prev = *rit;
685                         if (prev->could_synchronize_with(act)) {
686                                 ret = prev;
687                                 break;
688                         }
689                 }
690
691                 ModelAction *ret2 = get_last_fence_conflict(act);
692                 if (!ret2)
693                         return ret;
694                 if (!ret)
695                         return ret2;
696                 if (*ret < *ret2)
697                         return ret2;
698                 return ret;
699         }
700         case ATOMIC_LOCK:
701         case ATOMIC_TRYLOCK: {
702                 /* linear search: from most recent to oldest */
703                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
704                 action_list_t::reverse_iterator rit;
705                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
706                         ModelAction *prev = *rit;
707                         if (act->is_conflicting_lock(prev))
708                                 return prev;
709                 }
710                 break;
711         }
712         case ATOMIC_UNLOCK: {
713                 /* linear search: from most recent to oldest */
714                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
715                 action_list_t::reverse_iterator rit;
716                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
717                         ModelAction *prev = *rit;
718                         if (!act->same_thread(prev) && prev->is_failed_trylock())
719                                 return prev;
720                 }
721                 break;
722         }
723         case ATOMIC_WAIT: {
724                 /* linear search: from most recent to oldest */
725                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
726                 action_list_t::reverse_iterator rit;
727                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
728                         ModelAction *prev = *rit;
729                         if (!act->same_thread(prev) && prev->is_failed_trylock())
730                                 return prev;
731                         if (!act->same_thread(prev) && prev->is_notify())
732                                 return prev;
733                 }
734                 break;
735         }
736
737         case ATOMIC_NOTIFY_ALL:
738         case ATOMIC_NOTIFY_ONE: {
739                 /* linear search: from most recent to oldest */
740                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
741                 action_list_t::reverse_iterator rit;
742                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
743                         ModelAction *prev = *rit;
744                         if (!act->same_thread(prev) && prev->is_wait())
745                                 return prev;
746                 }
747                 break;
748         }
749         default:
750                 break;
751         }
752         return NULL;
753 }
754
755 /** This method finds backtracking points where we should try to
756  * reorder the parameter ModelAction against.
757  *
758  * @param the ModelAction to find backtracking points for.
759  */
760 void ModelChecker::set_backtracking(ModelAction *act)
761 {
762         Thread *t = get_thread(act);
763         ModelAction *prev = get_last_conflict(act);
764         if (prev == NULL)
765                 return;
766
767         Node *node = prev->get_node()->get_parent();
768
769         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
770         int low_tid, high_tid;
771         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
772                 low_tid = id_to_int(act->get_tid());
773                 high_tid = low_tid + 1;
774         } else {
775                 low_tid = 0;
776                 high_tid = get_num_threads();
777         }
778
779         for (int i = low_tid; i < high_tid; i++) {
780                 thread_id_t tid = int_to_id(i);
781
782                 /* Make sure this thread can be enabled here. */
783                 if (i >= node->get_num_threads())
784                         break;
785
786                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
787                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
788                 if (node->enabled_status(tid) != THREAD_ENABLED)
789                         continue;
790
791                 /* Check if this has been explored already */
792                 if (node->has_been_explored(tid))
793                         continue;
794
795                 /* See if fairness allows */
796                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
797                         bool unfair = false;
798                         for (int t = 0; t < node->get_num_threads(); t++) {
799                                 thread_id_t tother = int_to_id(t);
800                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
801                                         unfair = true;
802                                         break;
803                                 }
804                         }
805                         if (unfair)
806                                 continue;
807                 }
808
809                 /* See if CHESS-like yield fairness allows */
810                 if (model->params.yieldon) {
811                         bool unfair = false;
812                         for (int t = 0; t < node->get_num_threads(); t++) {
813                                 thread_id_t tother = int_to_id(t);
814                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
815                                         unfair = true;
816                                         break;
817                                 }
818                         }
819                         if (unfair)
820                                 continue;
821                 }
822                 
823                 /* Cache the latest backtracking point */
824                 set_latest_backtrack(prev);
825
826                 /* If this is a new backtracking point, mark the tree */
827                 if (!node->set_backtrack(tid))
828                         continue;
829                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
830                                         id_to_int(prev->get_tid()),
831                                         id_to_int(t->get_id()));
832                 if (DBG_ENABLED()) {
833                         prev->print();
834                         act->print();
835                 }
836         }
837 }
838
839 /**
840  * @brief Cache the a backtracking point as the "most recent", if eligible
841  *
842  * Note that this does not prepare the NodeStack for this backtracking
843  * operation, it only caches the action on a per-execution basis
844  *
845  * @param act The operation at which we should explore a different next action
846  * (i.e., backtracking point)
847  * @return True, if this action is now the most recent backtracking point;
848  * false otherwise
849  */
850 bool ModelChecker::set_latest_backtrack(ModelAction *act)
851 {
852         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
853                 priv->next_backtrack = act;
854                 return true;
855         }
856         return false;
857 }
858
859 /**
860  * Returns last backtracking point. The model checker will explore a different
861  * path for this point in the next execution.
862  * @return The ModelAction at which the next execution should diverge.
863  */
864 ModelAction * ModelChecker::get_next_backtrack()
865 {
866         ModelAction *next = priv->next_backtrack;
867         priv->next_backtrack = NULL;
868         return next;
869 }
870
871 /**
872  * Processes a read model action.
873  * @param curr is the read model action to process.
874  * @return True if processing this read updates the mo_graph.
875  */
876 bool ModelChecker::process_read(ModelAction *curr)
877 {
878         Node *node = curr->get_node();
879         while (true) {
880                 bool updated = false;
881                 switch (node->get_read_from_status()) {
882                 case READ_FROM_PAST: {
883                         const ModelAction *rf = node->get_read_from_past();
884                         ASSERT(rf);
885
886                         mo_graph->startChanges();
887
888                         ASSERT(!is_infeasible());
889                         if (!check_recency(curr, rf)) {
890                                 if (node->increment_read_from()) {
891                                         mo_graph->rollbackChanges();
892                                         continue;
893                                 } else {
894                                         priv->too_many_reads = true;
895                                 }
896                         }
897
898                         updated = r_modification_order(curr, rf);
899                         read_from(curr, rf);
900                         mo_graph->commitChanges();
901                         mo_check_promises(curr, true);
902                         break;
903                 }
904                 case READ_FROM_PROMISE: {
905                         Promise *promise = curr->get_node()->get_read_from_promise();
906                         if (promise->add_reader(curr))
907                                 priv->failed_promise = true;
908                         curr->set_read_from_promise(promise);
909                         mo_graph->startChanges();
910                         if (!check_recency(curr, promise))
911                                 priv->too_many_reads = true;
912                         updated = r_modification_order(curr, promise);
913                         mo_graph->commitChanges();
914                         break;
915                 }
916                 case READ_FROM_FUTURE: {
917                         /* Read from future value */
918                         struct future_value fv = node->get_future_value();
919                         Promise *promise = new Promise(curr, fv);
920                         curr->set_read_from_promise(promise);
921                         promises->push_back(promise);
922                         mo_graph->startChanges();
923                         updated = r_modification_order(curr, promise);
924                         mo_graph->commitChanges();
925                         break;
926                 }
927                 default:
928                         ASSERT(false);
929                 }
930                 get_thread(curr)->set_return_value(curr->get_return_value());
931                 return updated;
932         }
933 }
934
935 /**
936  * Processes a lock, trylock, or unlock model action.  @param curr is
937  * the read model action to process.
938  *
939  * The try lock operation checks whether the lock is taken.  If not,
940  * it falls to the normal lock operation case.  If so, it returns
941  * fail.
942  *
943  * The lock operation has already been checked that it is enabled, so
944  * it just grabs the lock and synchronizes with the previous unlock.
945  *
946  * The unlock operation has to re-enable all of the threads that are
947  * waiting on the lock.
948  *
949  * @return True if synchronization was updated; false otherwise
950  */
951 bool ModelChecker::process_mutex(ModelAction *curr)
952 {
953         std::mutex *mutex = curr->get_mutex();
954         struct std::mutex_state *state = NULL;
955
956         if (mutex)
957                 state = mutex->get_state();
958
959         switch (curr->get_type()) {
960         case ATOMIC_TRYLOCK: {
961                 bool success = !state->locked;
962                 curr->set_try_lock(success);
963                 if (!success) {
964                         get_thread(curr)->set_return_value(0);
965                         break;
966                 }
967                 get_thread(curr)->set_return_value(1);
968         }
969                 //otherwise fall into the lock case
970         case ATOMIC_LOCK: {
971                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
972                         assert_bug("Lock access before initialization");
973                 state->locked = get_thread(curr);
974                 ModelAction *unlock = get_last_unlock(curr);
975                 //synchronize with the previous unlock statement
976                 if (unlock != NULL) {
977                         synchronize(unlock, curr);
978                         return true;
979                 }
980                 break;
981         }
982         case ATOMIC_WAIT:
983         case ATOMIC_UNLOCK: {
984                 /* wake up the other threads */
985                 for (unsigned int i = 0; i < get_num_threads(); i++) {
986                         Thread *t = get_thread(int_to_id(i));
987                         Thread *curr_thrd = get_thread(curr);
988                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
989                                 scheduler->wake(t);
990                 }
991
992                 /* unlock the lock - after checking who was waiting on it */
993                 state->locked = NULL;
994
995                 if (!curr->is_wait())
996                         break; /* The rest is only for ATOMIC_WAIT */
997
998                 /* Should we go to sleep? (simulate spurious failures) */
999                 if (curr->get_node()->get_misc() == 0) {
1000                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
1001                         /* disable us */
1002                         scheduler->sleep(get_thread(curr));
1003                 }
1004                 break;
1005         }
1006         case ATOMIC_NOTIFY_ALL: {
1007                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1008                 //activate all the waiting threads
1009                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1010                         scheduler->wake(get_thread(*rit));
1011                 }
1012                 waiters->clear();
1013                 break;
1014         }
1015         case ATOMIC_NOTIFY_ONE: {
1016                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1017                 int wakeupthread = curr->get_node()->get_misc();
1018                 action_list_t::iterator it = waiters->begin();
1019                 advance(it, wakeupthread);
1020                 scheduler->wake(get_thread(*it));
1021                 waiters->erase(it);
1022                 break;
1023         }
1024
1025         default:
1026                 ASSERT(0);
1027         }
1028         return false;
1029 }
1030
1031 /**
1032  * @brief Check if the current pending promises allow a future value to be sent
1033  *
1034  * If one of the following is true:
1035  *  (a) there are no pending promises
1036  *  (b) the reader and writer do not cross any promises
1037  * Then, it is safe to pass a future value back now.
1038  *
1039  * Otherwise, we must save the pending future value until (a) or (b) is true
1040  *
1041  * @param writer The operation which sends the future value. Must be a write.
1042  * @param reader The operation which will observe the value. Must be a read.
1043  * @return True if the future value can be sent now; false if it must wait.
1044  */
1045 bool ModelChecker::promises_may_allow(const ModelAction *writer,
1046                 const ModelAction *reader) const
1047 {
1048         if (promises->empty())
1049                 return true;
1050         for(int i=promises->size()-1;i>=0;i--) {
1051                 ModelAction *pr=(*promises)[i]->get_reader(0);
1052                 //reader is after promise...doesn't cross any promise
1053                 if (*reader > *pr)
1054                         return true;
1055                 //writer is after promise, reader before...bad...
1056                 if (*writer > *pr)
1057                         return false;
1058         }
1059         return true;
1060 }
1061
1062 /**
1063  * @brief Add a future value to a reader
1064  *
1065  * This function performs a few additional checks to ensure that the future
1066  * value can be feasibly observed by the reader
1067  *
1068  * @param writer The operation whose value is sent. Must be a write.
1069  * @param reader The read operation which may read the future value. Must be a read.
1070  */
1071 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1072 {
1073         /* Do more ambitious checks now that mo is more complete */
1074         if (!mo_may_allow(writer, reader))
1075                 return;
1076
1077         Node *node = reader->get_node();
1078
1079         /* Find an ancestor thread which exists at the time of the reader */
1080         Thread *write_thread = get_thread(writer);
1081         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1082                 write_thread = write_thread->get_parent();
1083
1084         struct future_value fv = {
1085                 writer->get_write_value(),
1086                 writer->get_seq_number() + params.maxfuturedelay,
1087                 write_thread->get_id(),
1088         };
1089         if (node->add_future_value(fv))
1090                 set_latest_backtrack(reader);
1091 }
1092
1093 /**
1094  * Process a write ModelAction
1095  * @param curr The ModelAction to process
1096  * @return True if the mo_graph was updated or promises were resolved
1097  */
1098 bool ModelChecker::process_write(ModelAction *curr)
1099 {
1100         /* Readers to which we may send our future value */
1101         ModelVector<ModelAction *> send_fv;
1102
1103         const ModelAction *earliest_promise_reader;
1104         bool updated_promises = false;
1105
1106         bool updated_mod_order = w_modification_order(curr, &send_fv);
1107         Promise *promise = pop_promise_to_resolve(curr);
1108
1109         if (promise) {
1110                 earliest_promise_reader = promise->get_reader(0);
1111                 updated_promises = resolve_promise(curr, promise);
1112         } else
1113                 earliest_promise_reader = NULL;
1114
1115         for (unsigned int i = 0; i < send_fv.size(); i++) {
1116                 ModelAction *read = send_fv[i];
1117
1118                 /* Don't send future values to reads after the Promise we resolve */
1119                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
1120                         /* Check if future value can be sent immediately */
1121                         if (promises_may_allow(curr, read)) {
1122                                 add_future_value(curr, read);
1123                         } else {
1124                                 futurevalues->push_back(PendingFutureValue(curr, read));
1125                         }
1126                 }
1127         }
1128
1129         /* Check the pending future values */
1130         for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
1131                 struct PendingFutureValue pfv = (*futurevalues)[i];
1132                 if (promises_may_allow(pfv.writer, pfv.reader)) {
1133                         add_future_value(pfv.writer, pfv.reader);
1134                         futurevalues->erase(futurevalues->begin() + i);
1135                 }
1136         }
1137
1138         mo_graph->commitChanges();
1139         mo_check_promises(curr, false);
1140
1141         get_thread(curr)->set_return_value(VALUE_NONE);
1142         return updated_mod_order || updated_promises;
1143 }
1144
1145 /**
1146  * Process a fence ModelAction
1147  * @param curr The ModelAction to process
1148  * @return True if synchronization was updated
1149  */
1150 bool ModelChecker::process_fence(ModelAction *curr)
1151 {
1152         /*
1153          * fence-relaxed: no-op
1154          * fence-release: only log the occurence (not in this function), for
1155          *   use in later synchronization
1156          * fence-acquire (this function): search for hypothetical release
1157          *   sequences
1158          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1159          */
1160         bool updated = false;
1161         if (curr->is_acquire()) {
1162                 action_list_t *list = action_trace;
1163                 action_list_t::reverse_iterator rit;
1164                 /* Find X : is_read(X) && X --sb-> curr */
1165                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1166                         ModelAction *act = *rit;
1167                         if (act == curr)
1168                                 continue;
1169                         if (act->get_tid() != curr->get_tid())
1170                                 continue;
1171                         /* Stop at the beginning of the thread */
1172                         if (act->is_thread_start())
1173                                 break;
1174                         /* Stop once we reach a prior fence-acquire */
1175                         if (act->is_fence() && act->is_acquire())
1176                                 break;
1177                         if (!act->is_read())
1178                                 continue;
1179                         /* read-acquire will find its own release sequences */
1180                         if (act->is_acquire())
1181                                 continue;
1182
1183                         /* Establish hypothetical release sequences */
1184                         rel_heads_list_t release_heads;
1185                         get_release_seq_heads(curr, act, &release_heads);
1186                         for (unsigned int i = 0; i < release_heads.size(); i++)
1187                                 synchronize(release_heads[i], curr);
1188                         if (release_heads.size() != 0)
1189                                 updated = true;
1190                 }
1191         }
1192         return updated;
1193 }
1194
1195 /**
1196  * @brief Process the current action for thread-related activity
1197  *
1198  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1199  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1200  * synchronization, etc.  This function is a no-op for non-THREAD actions
1201  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1202  *
1203  * @param curr The current action
1204  * @return True if synchronization was updated or a thread completed
1205  */
1206 bool ModelChecker::process_thread_action(ModelAction *curr)
1207 {
1208         bool updated = false;
1209
1210         switch (curr->get_type()) {
1211         case THREAD_CREATE: {
1212                 thrd_t *thrd = (thrd_t *)curr->get_location();
1213                 struct thread_params *params = (struct thread_params *)curr->get_value();
1214                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1215                 add_thread(th);
1216                 th->set_creation(curr);
1217                 /* Promises can be satisfied by children */
1218                 for (unsigned int i = 0; i < promises->size(); i++) {
1219                         Promise *promise = (*promises)[i];
1220                         if (promise->thread_is_available(curr->get_tid()))
1221                                 promise->add_thread(th->get_id());
1222                 }
1223                 break;
1224         }
1225         case THREAD_JOIN: {
1226                 Thread *blocking = curr->get_thread_operand();
1227                 ModelAction *act = get_last_action(blocking->get_id());
1228                 synchronize(act, curr);
1229                 updated = true; /* trigger rel-seq checks */
1230                 break;
1231         }
1232         case THREAD_FINISH: {
1233                 Thread *th = get_thread(curr);
1234                 /* Wake up any joining threads */
1235                 for (unsigned int i = 0; i < get_num_threads(); i++) {
1236                         Thread *waiting = get_thread(int_to_id(i));
1237                         if (waiting->waiting_on() == th &&
1238                                         waiting->get_pending()->is_thread_join())
1239                                 scheduler->wake(waiting);
1240                 }
1241                 th->complete();
1242                 /* Completed thread can't satisfy promises */
1243                 for (unsigned int i = 0; i < promises->size(); i++) {
1244                         Promise *promise = (*promises)[i];
1245                         if (promise->thread_is_available(th->get_id()))
1246                                 if (promise->eliminate_thread(th->get_id()))
1247                                         priv->failed_promise = true;
1248                 }
1249                 updated = true; /* trigger rel-seq checks */
1250                 break;
1251         }
1252         case THREAD_START: {
1253                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1254                 break;
1255         }
1256         default:
1257                 break;
1258         }
1259
1260         return updated;
1261 }
1262
1263 /**
1264  * @brief Process the current action for release sequence fixup activity
1265  *
1266  * Performs model-checker release sequence fixups for the current action,
1267  * forcing a single pending release sequence to break (with a given, potential
1268  * "loose" write) or to complete (i.e., synchronize). If a pending release
1269  * sequence forms a complete release sequence, then we must perform the fixup
1270  * synchronization, mo_graph additions, etc.
1271  *
1272  * @param curr The current action; must be a release sequence fixup action
1273  * @param work_queue The work queue to which to add work items as they are
1274  * generated
1275  */
1276 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1277 {
1278         const ModelAction *write = curr->get_node()->get_relseq_break();
1279         struct release_seq *sequence = pending_rel_seqs->back();
1280         pending_rel_seqs->pop_back();
1281         ASSERT(sequence);
1282         ModelAction *acquire = sequence->acquire;
1283         const ModelAction *rf = sequence->rf;
1284         const ModelAction *release = sequence->release;
1285         ASSERT(acquire);
1286         ASSERT(release);
1287         ASSERT(rf);
1288         ASSERT(release->same_thread(rf));
1289
1290         if (write == NULL) {
1291                 /**
1292                  * @todo Forcing a synchronization requires that we set
1293                  * modification order constraints. For instance, we can't allow
1294                  * a fixup sequence in which two separate read-acquire
1295                  * operations read from the same sequence, where the first one
1296                  * synchronizes and the other doesn't. Essentially, we can't
1297                  * allow any writes to insert themselves between 'release' and
1298                  * 'rf'
1299                  */
1300
1301                 /* Must synchronize */
1302                 if (!synchronize(release, acquire))
1303                         return;
1304                 /* Re-check all pending release sequences */
1305                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1306                 /* Re-check act for mo_graph edges */
1307                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1308
1309                 /* propagate synchronization to later actions */
1310                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1311                 for (; (*rit) != acquire; rit++) {
1312                         ModelAction *propagate = *rit;
1313                         if (acquire->happens_before(propagate)) {
1314                                 synchronize(acquire, propagate);
1315                                 /* Re-check 'propagate' for mo_graph edges */
1316                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1317                         }
1318                 }
1319         } else {
1320                 /* Break release sequence with new edges:
1321                  *   release --mo--> write --mo--> rf */
1322                 mo_graph->addEdge(release, write);
1323                 mo_graph->addEdge(write, rf);
1324         }
1325
1326         /* See if we have realized a data race */
1327         checkDataRaces();
1328 }
1329
1330 /**
1331  * Initialize the current action by performing one or more of the following
1332  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1333  * in the NodeStack, manipulating backtracking sets, allocating and
1334  * initializing clock vectors, and computing the promises to fulfill.
1335  *
1336  * @param curr The current action, as passed from the user context; may be
1337  * freed/invalidated after the execution of this function, with a different
1338  * action "returned" its place (pass-by-reference)
1339  * @return True if curr is a newly-explored action; false otherwise
1340  */
1341 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1342 {
1343         ModelAction *newcurr;
1344
1345         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1346                 newcurr = process_rmw(*curr);
1347                 delete *curr;
1348
1349                 if (newcurr->is_rmw())
1350                         compute_promises(newcurr);
1351
1352                 *curr = newcurr;
1353                 return false;
1354         }
1355
1356         (*curr)->set_seq_number(get_next_seq_num());
1357
1358         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1359         if (newcurr) {
1360                 /* First restore type and order in case of RMW operation */
1361                 if ((*curr)->is_rmwr())
1362                         newcurr->copy_typeandorder(*curr);
1363
1364                 ASSERT((*curr)->get_location() == newcurr->get_location());
1365                 newcurr->copy_from_new(*curr);
1366
1367                 /* Discard duplicate ModelAction; use action from NodeStack */
1368                 delete *curr;
1369
1370                 /* Always compute new clock vector */
1371                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1372
1373                 *curr = newcurr;
1374                 return false; /* Action was explored previously */
1375         } else {
1376                 newcurr = *curr;
1377
1378                 /* Always compute new clock vector */
1379                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1380
1381                 /* Assign most recent release fence */
1382                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1383
1384                 /*
1385                  * Perform one-time actions when pushing new ModelAction onto
1386                  * NodeStack
1387                  */
1388                 if (newcurr->is_write())
1389                         compute_promises(newcurr);
1390                 else if (newcurr->is_relseq_fixup())
1391                         compute_relseq_breakwrites(newcurr);
1392                 else if (newcurr->is_wait())
1393                         newcurr->get_node()->set_misc_max(2);
1394                 else if (newcurr->is_notify_one()) {
1395                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1396                 }
1397                 return true; /* This was a new ModelAction */
1398         }
1399 }
1400
1401 /**
1402  * @brief Establish reads-from relation between two actions
1403  *
1404  * Perform basic operations involved with establishing a concrete rf relation,
1405  * including setting the ModelAction data and checking for release sequences.
1406  *
1407  * @param act The action that is reading (must be a read)
1408  * @param rf The action from which we are reading (must be a write)
1409  *
1410  * @return True if this read established synchronization
1411  */
1412 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1413 {
1414         ASSERT(rf);
1415         ASSERT(rf->is_write());
1416
1417         act->set_read_from(rf);
1418         if (act->is_acquire()) {
1419                 rel_heads_list_t release_heads;
1420                 get_release_seq_heads(act, act, &release_heads);
1421                 int num_heads = release_heads.size();
1422                 for (unsigned int i = 0; i < release_heads.size(); i++)
1423                         if (!synchronize(release_heads[i], act))
1424                                 num_heads--;
1425                 return num_heads > 0;
1426         }
1427         return false;
1428 }
1429
1430 /**
1431  * @brief Synchronizes two actions
1432  *
1433  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1434  * This function performs the synchronization as well as providing other hooks
1435  * for other checks along with synchronization.
1436  *
1437  * @param first The left-hand side of the synchronizes-with relation
1438  * @param second The right-hand side of the synchronizes-with relation
1439  * @return True if the synchronization was successful (i.e., was consistent
1440  * with the execution order); false otherwise
1441  */
1442 bool ModelChecker::synchronize(const ModelAction *first, ModelAction *second)
1443 {
1444         if (*second < *first) {
1445                 set_bad_synchronization();
1446                 return false;
1447         }
1448         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1449         return second->synchronize_with(first);
1450 }
1451
1452 /**
1453  * Check promises and eliminate potentially-satisfying threads when a thread is
1454  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1455  * no longer satisfy a promise generated from that thread.
1456  *
1457  * @param blocker The thread on which a thread is waiting
1458  * @param waiting The waiting thread
1459  */
1460 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1461 {
1462         for (unsigned int i = 0; i < promises->size(); i++) {
1463                 Promise *promise = (*promises)[i];
1464                 if (!promise->thread_is_available(waiting->get_id()))
1465                         continue;
1466                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1467                         ModelAction *reader = promise->get_reader(j);
1468                         if (reader->get_tid() != blocker->get_id())
1469                                 continue;
1470                         if (promise->eliminate_thread(waiting->get_id())) {
1471                                 /* Promise has failed */
1472                                 priv->failed_promise = true;
1473                         } else {
1474                                 /* Only eliminate the 'waiting' thread once */
1475                                 return;
1476                         }
1477                 }
1478         }
1479 }
1480
1481 /**
1482  * @brief Check whether a model action is enabled.
1483  *
1484  * Checks whether a lock or join operation would be successful (i.e., is the
1485  * lock already locked, or is the joined thread already complete). If not, put
1486  * the action in a waiter list.
1487  *
1488  * @param curr is the ModelAction to check whether it is enabled.
1489  * @return a bool that indicates whether the action is enabled.
1490  */
1491 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1492         if (curr->is_lock()) {
1493                 std::mutex *lock = curr->get_mutex();
1494                 struct std::mutex_state *state = lock->get_state();
1495                 if (state->locked)
1496                         return false;
1497         } else if (curr->is_thread_join()) {
1498                 Thread *blocking = curr->get_thread_operand();
1499                 if (!blocking->is_complete()) {
1500                         thread_blocking_check_promises(blocking, get_thread(curr));
1501                         return false;
1502                 }
1503         }
1504
1505         return true;
1506 }
1507
1508 /**
1509  * This is the heart of the model checker routine. It performs model-checking
1510  * actions corresponding to a given "current action." Among other processes, it
1511  * calculates reads-from relationships, updates synchronization clock vectors,
1512  * forms a memory_order constraints graph, and handles replay/backtrack
1513  * execution when running permutations of previously-observed executions.
1514  *
1515  * @param curr The current action to process
1516  * @return The ModelAction that is actually executed; may be different than
1517  * curr; may be NULL, if the current action is not enabled to run
1518  */
1519 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1520 {
1521         ASSERT(curr);
1522         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1523         bool newly_explored = initialize_curr_action(&curr);
1524
1525         DBG();
1526
1527         wake_up_sleeping_actions(curr);
1528
1529         /* Compute fairness information for CHESS yield algorithm */
1530         if (model->params.yieldon) {
1531                 curr->get_node()->update_yield(scheduler);
1532         }
1533
1534         /* Add the action to lists before any other model-checking tasks */
1535         if (!second_part_of_rmw)
1536                 add_action_to_lists(curr);
1537
1538         /* Build may_read_from set for newly-created actions */
1539         if (newly_explored && curr->is_read())
1540                 build_may_read_from(curr);
1541
1542         /* Initialize work_queue with the "current action" work */
1543         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1544         while (!work_queue.empty() && !has_asserted()) {
1545                 WorkQueueEntry work = work_queue.front();
1546                 work_queue.pop_front();
1547
1548                 switch (work.type) {
1549                 case WORK_CHECK_CURR_ACTION: {
1550                         ModelAction *act = work.action;
1551                         bool update = false; /* update this location's release seq's */
1552                         bool update_all = false; /* update all release seq's */
1553
1554                         if (process_thread_action(curr))
1555                                 update_all = true;
1556
1557                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1558                                 update = true;
1559
1560                         if (act->is_write() && process_write(act))
1561                                 update = true;
1562
1563                         if (act->is_fence() && process_fence(act))
1564                                 update_all = true;
1565
1566                         if (act->is_mutex_op() && process_mutex(act))
1567                                 update_all = true;
1568
1569                         if (act->is_relseq_fixup())
1570                                 process_relseq_fixup(curr, &work_queue);
1571
1572                         if (update_all)
1573                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1574                         else if (update)
1575                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1576                         break;
1577                 }
1578                 case WORK_CHECK_RELEASE_SEQ:
1579                         resolve_release_sequences(work.location, &work_queue);
1580                         break;
1581                 case WORK_CHECK_MO_EDGES: {
1582                         /** @todo Complete verification of work_queue */
1583                         ModelAction *act = work.action;
1584                         bool updated = false;
1585
1586                         if (act->is_read()) {
1587                                 const ModelAction *rf = act->get_reads_from();
1588                                 const Promise *promise = act->get_reads_from_promise();
1589                                 if (rf) {
1590                                         if (r_modification_order(act, rf))
1591                                                 updated = true;
1592                                 } else if (promise) {
1593                                         if (r_modification_order(act, promise))
1594                                                 updated = true;
1595                                 }
1596                         }
1597                         if (act->is_write()) {
1598                                 if (w_modification_order(act, NULL))
1599                                         updated = true;
1600                         }
1601                         mo_graph->commitChanges();
1602
1603                         if (updated)
1604                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1605                         break;
1606                 }
1607                 default:
1608                         ASSERT(false);
1609                         break;
1610                 }
1611         }
1612
1613         check_curr_backtracking(curr);
1614         set_backtracking(curr);
1615         return curr;
1616 }
1617
1618 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1619 {
1620         Node *currnode = curr->get_node();
1621         Node *parnode = currnode->get_parent();
1622
1623         if ((parnode && !parnode->backtrack_empty()) ||
1624                          !currnode->misc_empty() ||
1625                          !currnode->read_from_empty() ||
1626                          !currnode->promise_empty() ||
1627                          !currnode->relseq_break_empty()) {
1628                 set_latest_backtrack(curr);
1629         }
1630 }
1631
1632 bool ModelChecker::promises_expired() const
1633 {
1634         for (unsigned int i = 0; i < promises->size(); i++) {
1635                 Promise *promise = (*promises)[i];
1636                 if (promise->get_expiration() < priv->used_sequence_numbers)
1637                         return true;
1638         }
1639         return false;
1640 }
1641
1642 /**
1643  * This is the strongest feasibility check available.
1644  * @return whether the current trace (partial or complete) must be a prefix of
1645  * a feasible trace.
1646  */
1647 bool ModelChecker::isfeasibleprefix() const
1648 {
1649         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1650 }
1651
1652 /**
1653  * Print disagnostic information about an infeasible execution
1654  * @param prefix A string to prefix the output with; if NULL, then a default
1655  * message prefix will be provided
1656  */
1657 void ModelChecker::print_infeasibility(const char *prefix) const
1658 {
1659         char buf[100];
1660         char *ptr = buf;
1661         if (mo_graph->checkForCycles())
1662                 ptr += sprintf(ptr, "[mo cycle]");
1663         if (priv->failed_promise)
1664                 ptr += sprintf(ptr, "[failed promise]");
1665         if (priv->too_many_reads)
1666                 ptr += sprintf(ptr, "[too many reads]");
1667         if (priv->no_valid_reads)
1668                 ptr += sprintf(ptr, "[no valid reads-from]");
1669         if (priv->bad_synchronization)
1670                 ptr += sprintf(ptr, "[bad sw ordering]");
1671         if (promises_expired())
1672                 ptr += sprintf(ptr, "[promise expired]");
1673         if (promises->size() != 0)
1674                 ptr += sprintf(ptr, "[unresolved promise]");
1675         if (ptr != buf)
1676                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1677 }
1678
1679 /**
1680  * Returns whether the current completed trace is feasible, except for pending
1681  * release sequences.
1682  */
1683 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1684 {
1685         return !is_infeasible() && promises->size() == 0;
1686 }
1687
1688 /**
1689  * Check if the current partial trace is infeasible. Does not check any
1690  * end-of-execution flags, which might rule out the execution. Thus, this is
1691  * useful only for ruling an execution as infeasible.
1692  * @return whether the current partial trace is infeasible.
1693  */
1694 bool ModelChecker::is_infeasible() const
1695 {
1696         return mo_graph->checkForCycles() ||
1697                 priv->no_valid_reads ||
1698                 priv->failed_promise ||
1699                 priv->too_many_reads ||
1700                 priv->bad_synchronization ||
1701                 promises_expired();
1702 }
1703
1704 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1705 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1706         ModelAction *lastread = get_last_action(act->get_tid());
1707         lastread->process_rmw(act);
1708         if (act->is_rmw()) {
1709                 if (lastread->get_reads_from())
1710                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1711                 else
1712                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1713                 mo_graph->commitChanges();
1714         }
1715         return lastread;
1716 }
1717
1718 /**
1719  * A helper function for ModelChecker::check_recency, to check if the current
1720  * thread is able to read from a different write/promise for 'params.maxreads'
1721  * number of steps and if that write/promise should become visible (i.e., is
1722  * ordered later in the modification order). This helps model memory liveness.
1723  *
1724  * @param curr The current action. Must be a read.
1725  * @param rf The write/promise from which we plan to read
1726  * @param other_rf The write/promise from which we may read
1727  * @return True if we were able to read from other_rf for params.maxreads steps
1728  */
1729 template <typename T, typename U>
1730 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1731 {
1732         /* Need a different write/promise */
1733         if (other_rf->equals(rf))
1734                 return false;
1735
1736         /* Only look for "newer" writes/promises */
1737         if (!mo_graph->checkReachable(rf, other_rf))
1738                 return false;
1739
1740         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1741         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1742         action_list_t::reverse_iterator rit = list->rbegin();
1743         ASSERT((*rit) == curr);
1744         /* Skip past curr */
1745         rit++;
1746
1747         /* Does this write/promise work for everyone? */
1748         for (int i = 0; i < params.maxreads; i++, rit++) {
1749                 ModelAction *act = *rit;
1750                 if (!act->may_read_from(other_rf))
1751                         return false;
1752         }
1753         return true;
1754 }
1755
1756 /**
1757  * Checks whether a thread has read from the same write or Promise for too many
1758  * times without seeing the effects of a later write/Promise.
1759  *
1760  * Basic idea:
1761  * 1) there must a different write/promise that we could read from,
1762  * 2) we must have read from the same write/promise in excess of maxreads times,
1763  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1764  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1765  *
1766  * If so, we decide that the execution is no longer feasible.
1767  *
1768  * @param curr The current action. Must be a read.
1769  * @param rf The ModelAction/Promise from which we might read.
1770  * @return True if the read should succeed; false otherwise
1771  */
1772 template <typename T>
1773 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1774 {
1775         if (!params.maxreads)
1776                 return true;
1777
1778         //NOTE: Next check is just optimization, not really necessary....
1779         if (curr->get_node()->get_read_from_past_size() +
1780                         curr->get_node()->get_read_from_promise_size() <= 1)
1781                 return true;
1782
1783         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1784         int tid = id_to_int(curr->get_tid());
1785         ASSERT(tid < (int)thrd_lists->size());
1786         action_list_t *list = &(*thrd_lists)[tid];
1787         action_list_t::reverse_iterator rit = list->rbegin();
1788         ASSERT((*rit) == curr);
1789         /* Skip past curr */
1790         rit++;
1791
1792         action_list_t::reverse_iterator ritcopy = rit;
1793         /* See if we have enough reads from the same value */
1794         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1795                 if (ritcopy == list->rend())
1796                         return true;
1797                 ModelAction *act = *ritcopy;
1798                 if (!act->is_read())
1799                         return true;
1800                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1801                         return true;
1802                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1803                         return true;
1804                 if (act->get_node()->get_read_from_past_size() +
1805                                 act->get_node()->get_read_from_promise_size() <= 1)
1806                         return true;
1807         }
1808         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1809                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1810                 if (should_read_instead(curr, rf, write))
1811                         return false; /* liveness failure */
1812         }
1813         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1814                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1815                 if (should_read_instead(curr, rf, promise))
1816                         return false; /* liveness failure */
1817         }
1818         return true;
1819 }
1820
1821 /**
1822  * @brief Updates the mo_graph with the constraints imposed from the current
1823  * read.
1824  *
1825  * Basic idea is the following: Go through each other thread and find
1826  * the last action that happened before our read.  Two cases:
1827  *
1828  * -# The action is a write: that write must either occur before
1829  * the write we read from or be the write we read from.
1830  * -# The action is a read: the write that that action read from
1831  * must occur before the write we read from or be the same write.
1832  *
1833  * @param curr The current action. Must be a read.
1834  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1835  * @return True if modification order edges were added; false otherwise
1836  */
1837 template <typename rf_type>
1838 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1839 {
1840         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1841         unsigned int i;
1842         bool added = false;
1843         ASSERT(curr->is_read());
1844
1845         /* Last SC fence in the current thread */
1846         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1847         ModelAction *last_sc_write = NULL;
1848         if (curr->is_seqcst())
1849                 last_sc_write = get_last_seq_cst_write(curr);
1850
1851         /* Iterate over all threads */
1852         for (i = 0; i < thrd_lists->size(); i++) {
1853                 /* Last SC fence in thread i */
1854                 ModelAction *last_sc_fence_thread_local = NULL;
1855                 if (int_to_id((int)i) != curr->get_tid())
1856                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1857
1858                 /* Last SC fence in thread i, before last SC fence in current thread */
1859                 ModelAction *last_sc_fence_thread_before = NULL;
1860                 if (last_sc_fence_local)
1861                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1862
1863                 /* Iterate over actions in thread, starting from most recent */
1864                 action_list_t *list = &(*thrd_lists)[i];
1865                 action_list_t::reverse_iterator rit;
1866                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1867                         ModelAction *act = *rit;
1868
1869                         /* Skip curr */
1870                         if (act == curr)
1871                                 continue;
1872                         /* Don't want to add reflexive edges on 'rf' */
1873                         if (act->equals(rf)) {
1874                                 if (act->happens_before(curr))
1875                                         break;
1876                                 else
1877                                         continue;
1878                         }
1879
1880                         if (act->is_write()) {
1881                                 /* C++, Section 29.3 statement 5 */
1882                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1883                                                 *act < *last_sc_fence_thread_local) {
1884                                         added = mo_graph->addEdge(act, rf) || added;
1885                                         break;
1886                                 }
1887                                 /* C++, Section 29.3 statement 4 */
1888                                 else if (act->is_seqcst() && last_sc_fence_local &&
1889                                                 *act < *last_sc_fence_local) {
1890                                         added = mo_graph->addEdge(act, rf) || added;
1891                                         break;
1892                                 }
1893                                 /* C++, Section 29.3 statement 6 */
1894                                 else if (last_sc_fence_thread_before &&
1895                                                 *act < *last_sc_fence_thread_before) {
1896                                         added = mo_graph->addEdge(act, rf) || added;
1897                                         break;
1898                                 }
1899                         }
1900
1901                         /* C++, Section 29.3 statement 3 (second subpoint) */
1902                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1903                                 added = mo_graph->addEdge(act, rf) || added;
1904                                 break;
1905                         }
1906
1907                         /*
1908                          * Include at most one act per-thread that "happens
1909                          * before" curr
1910                          */
1911                         if (act->happens_before(curr)) {
1912                                 if (act->is_write()) {
1913                                         added = mo_graph->addEdge(act, rf) || added;
1914                                 } else {
1915                                         const ModelAction *prevrf = act->get_reads_from();
1916                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1917                                         if (prevrf) {
1918                                                 if (!prevrf->equals(rf))
1919                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1920                                         } else if (!prevrf_promise->equals(rf)) {
1921                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1922                                         }
1923                                 }
1924                                 break;
1925                         }
1926                 }
1927         }
1928
1929         /*
1930          * All compatible, thread-exclusive promises must be ordered after any
1931          * concrete loads from the same thread
1932          */
1933         for (unsigned int i = 0; i < promises->size(); i++)
1934                 if ((*promises)[i]->is_compatible_exclusive(curr))
1935                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1936
1937         return added;
1938 }
1939
1940 /**
1941  * Updates the mo_graph with the constraints imposed from the current write.
1942  *
1943  * Basic idea is the following: Go through each other thread and find
1944  * the lastest action that happened before our write.  Two cases:
1945  *
1946  * (1) The action is a write => that write must occur before
1947  * the current write
1948  *
1949  * (2) The action is a read => the write that that action read from
1950  * must occur before the current write.
1951  *
1952  * This method also handles two other issues:
1953  *
1954  * (I) Sequential Consistency: Making sure that if the current write is
1955  * seq_cst, that it occurs after the previous seq_cst write.
1956  *
1957  * (II) Sending the write back to non-synchronizing reads.
1958  *
1959  * @param curr The current action. Must be a write.
1960  * @param send_fv A vector for stashing reads to which we may pass our future
1961  * value. If NULL, then don't record any future values.
1962  * @return True if modification order edges were added; false otherwise
1963  */
1964 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1965 {
1966         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1967         unsigned int i;
1968         bool added = false;
1969         ASSERT(curr->is_write());
1970
1971         if (curr->is_seqcst()) {
1972                 /* We have to at least see the last sequentially consistent write,
1973                          so we are initialized. */
1974                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1975                 if (last_seq_cst != NULL) {
1976                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1977                 }
1978         }
1979
1980         /* Last SC fence in the current thread */
1981         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1982
1983         /* Iterate over all threads */
1984         for (i = 0; i < thrd_lists->size(); i++) {
1985                 /* Last SC fence in thread i, before last SC fence in current thread */
1986                 ModelAction *last_sc_fence_thread_before = NULL;
1987                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1988                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1989
1990                 /* Iterate over actions in thread, starting from most recent */
1991                 action_list_t *list = &(*thrd_lists)[i];
1992                 action_list_t::reverse_iterator rit;
1993                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1994                         ModelAction *act = *rit;
1995                         if (act == curr) {
1996                                 /*
1997                                  * 1) If RMW and it actually read from something, then we
1998                                  * already have all relevant edges, so just skip to next
1999                                  * thread.
2000                                  *
2001                                  * 2) If RMW and it didn't read from anything, we should
2002                                  * whatever edge we can get to speed up convergence.
2003                                  *
2004                                  * 3) If normal write, we need to look at earlier actions, so
2005                                  * continue processing list.
2006                                  */
2007                                 if (curr->is_rmw()) {
2008                                         if (curr->get_reads_from() != NULL)
2009                                                 break;
2010                                         else
2011                                                 continue;
2012                                 } else
2013                                         continue;
2014                         }
2015
2016                         /* C++, Section 29.3 statement 7 */
2017                         if (last_sc_fence_thread_before && act->is_write() &&
2018                                         *act < *last_sc_fence_thread_before) {
2019                                 added = mo_graph->addEdge(act, curr) || added;
2020                                 break;
2021                         }
2022
2023                         /*
2024                          * Include at most one act per-thread that "happens
2025                          * before" curr
2026                          */
2027                         if (act->happens_before(curr)) {
2028                                 /*
2029                                  * Note: if act is RMW, just add edge:
2030                                  *   act --mo--> curr
2031                                  * The following edge should be handled elsewhere:
2032                                  *   readfrom(act) --mo--> act
2033                                  */
2034                                 if (act->is_write())
2035                                         added = mo_graph->addEdge(act, curr) || added;
2036                                 else if (act->is_read()) {
2037                                         //if previous read accessed a null, just keep going
2038                                         if (act->get_reads_from() == NULL)
2039                                                 continue;
2040                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2041                                 }
2042                                 break;
2043                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2044                                                      !act->same_thread(curr)) {
2045                                 /* We have an action that:
2046                                    (1) did not happen before us
2047                                    (2) is a read and we are a write
2048                                    (3) cannot synchronize with us
2049                                    (4) is in a different thread
2050                                    =>
2051                                    that read could potentially read from our write.  Note that
2052                                    these checks are overly conservative at this point, we'll
2053                                    do more checks before actually removing the
2054                                    pendingfuturevalue.
2055
2056                                  */
2057                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2058                                         if (!is_infeasible())
2059                                                 send_fv->push_back(act);
2060                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2061                                                 add_future_value(curr, act);
2062                                 }
2063                         }
2064                 }
2065         }
2066
2067         /*
2068          * All compatible, thread-exclusive promises must be ordered after any
2069          * concrete stores to the same thread, or else they can be merged with
2070          * this store later
2071          */
2072         for (unsigned int i = 0; i < promises->size(); i++)
2073                 if ((*promises)[i]->is_compatible_exclusive(curr))
2074                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2075
2076         return added;
2077 }
2078
2079 /** Arbitrary reads from the future are not allowed.  Section 29.3
2080  * part 9 places some constraints.  This method checks one result of constraint
2081  * constraint.  Others require compiler support. */
2082 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
2083 {
2084         if (!writer->is_rmw())
2085                 return true;
2086
2087         if (!reader->is_rmw())
2088                 return true;
2089
2090         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2091                 if (search == reader)
2092                         return false;
2093                 if (search->get_tid() == reader->get_tid() &&
2094                                 search->happens_before(reader))
2095                         break;
2096         }
2097
2098         return true;
2099 }
2100
2101 /**
2102  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2103  * some constraints. This method checks one the following constraint (others
2104  * require compiler support):
2105  *
2106  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2107  */
2108 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2109 {
2110         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2111         unsigned int i;
2112         /* Iterate over all threads */
2113         for (i = 0; i < thrd_lists->size(); i++) {
2114                 const ModelAction *write_after_read = NULL;
2115
2116                 /* Iterate over actions in thread, starting from most recent */
2117                 action_list_t *list = &(*thrd_lists)[i];
2118                 action_list_t::reverse_iterator rit;
2119                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2120                         ModelAction *act = *rit;
2121
2122                         /* Don't disallow due to act == reader */
2123                         if (!reader->happens_before(act) || reader == act)
2124                                 break;
2125                         else if (act->is_write())
2126                                 write_after_read = act;
2127                         else if (act->is_read() && act->get_reads_from() != NULL)
2128                                 write_after_read = act->get_reads_from();
2129                 }
2130
2131                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2132                         return false;
2133         }
2134         return true;
2135 }
2136
2137 /**
2138  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2139  * The ModelAction under consideration is expected to be taking part in
2140  * release/acquire synchronization as an object of the "reads from" relation.
2141  * Note that this can only provide release sequence support for RMW chains
2142  * which do not read from the future, as those actions cannot be traced until
2143  * their "promise" is fulfilled. Similarly, we may not even establish the
2144  * presence of a release sequence with certainty, as some modification order
2145  * constraints may be decided further in the future. Thus, this function
2146  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2147  * and a boolean representing certainty.
2148  *
2149  * @param rf The action that might be part of a release sequence. Must be a
2150  * write.
2151  * @param release_heads A pass-by-reference style return parameter. After
2152  * execution of this function, release_heads will contain the heads of all the
2153  * relevant release sequences, if any exists with certainty
2154  * @param pending A pass-by-reference style return parameter which is only used
2155  * when returning false (i.e., uncertain). Returns most information regarding
2156  * an uncertain release sequence, including any write operations that might
2157  * break the sequence.
2158  * @return true, if the ModelChecker is certain that release_heads is complete;
2159  * false otherwise
2160  */
2161 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2162                 rel_heads_list_t *release_heads,
2163                 struct release_seq *pending) const
2164 {
2165         /* Only check for release sequences if there are no cycles */
2166         if (mo_graph->checkForCycles())
2167                 return false;
2168
2169         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2170                 ASSERT(rf->is_write());
2171
2172                 if (rf->is_release())
2173                         release_heads->push_back(rf);
2174                 else if (rf->get_last_fence_release())
2175                         release_heads->push_back(rf->get_last_fence_release());
2176                 if (!rf->is_rmw())
2177                         break; /* End of RMW chain */
2178
2179                 /** @todo Need to be smarter here...  In the linux lock
2180                  * example, this will run to the beginning of the program for
2181                  * every acquire. */
2182                 /** @todo The way to be smarter here is to keep going until 1
2183                  * thread has a release preceded by an acquire and you've seen
2184                  *       both. */
2185
2186                 /* acq_rel RMW is a sufficient stopping condition */
2187                 if (rf->is_acquire() && rf->is_release())
2188                         return true; /* complete */
2189         };
2190         if (!rf) {
2191                 /* read from future: need to settle this later */
2192                 pending->rf = NULL;
2193                 return false; /* incomplete */
2194         }
2195
2196         if (rf->is_release())
2197                 return true; /* complete */
2198
2199         /* else relaxed write
2200          * - check for fence-release in the same thread (29.8, stmt. 3)
2201          * - check modification order for contiguous subsequence
2202          *   -> rf must be same thread as release */
2203
2204         const ModelAction *fence_release = rf->get_last_fence_release();
2205         /* Synchronize with a fence-release unconditionally; we don't need to
2206          * find any more "contiguous subsequence..." for it */
2207         if (fence_release)
2208                 release_heads->push_back(fence_release);
2209
2210         int tid = id_to_int(rf->get_tid());
2211         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2212         action_list_t *list = &(*thrd_lists)[tid];
2213         action_list_t::const_reverse_iterator rit;
2214
2215         /* Find rf in the thread list */
2216         rit = std::find(list->rbegin(), list->rend(), rf);
2217         ASSERT(rit != list->rend());
2218
2219         /* Find the last {write,fence}-release */
2220         for (; rit != list->rend(); rit++) {
2221                 if (fence_release && *(*rit) < *fence_release)
2222                         break;
2223                 if ((*rit)->is_release())
2224                         break;
2225         }
2226         if (rit == list->rend()) {
2227                 /* No write-release in this thread */
2228                 return true; /* complete */
2229         } else if (fence_release && *(*rit) < *fence_release) {
2230                 /* The fence-release is more recent (and so, "stronger") than
2231                  * the most recent write-release */
2232                 return true; /* complete */
2233         } /* else, need to establish contiguous release sequence */
2234         ModelAction *release = *rit;
2235
2236         ASSERT(rf->same_thread(release));
2237
2238         pending->writes.clear();
2239
2240         bool certain = true;
2241         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2242                 if (id_to_int(rf->get_tid()) == (int)i)
2243                         continue;
2244                 list = &(*thrd_lists)[i];
2245
2246                 /* Can we ensure no future writes from this thread may break
2247                  * the release seq? */
2248                 bool future_ordered = false;
2249
2250                 ModelAction *last = get_last_action(int_to_id(i));
2251                 Thread *th = get_thread(int_to_id(i));
2252                 if ((last && rf->happens_before(last)) ||
2253                                 !is_enabled(th) ||
2254                                 th->is_complete())
2255                         future_ordered = true;
2256
2257                 ASSERT(!th->is_model_thread() || future_ordered);
2258
2259                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2260                         const ModelAction *act = *rit;
2261                         /* Reach synchronization -> this thread is complete */
2262                         if (act->happens_before(release))
2263                                 break;
2264                         if (rf->happens_before(act)) {
2265                                 future_ordered = true;
2266                                 continue;
2267                         }
2268
2269                         /* Only non-RMW writes can break release sequences */
2270                         if (!act->is_write() || act->is_rmw())
2271                                 continue;
2272
2273                         /* Check modification order */
2274                         if (mo_graph->checkReachable(rf, act)) {
2275                                 /* rf --mo--> act */
2276                                 future_ordered = true;
2277                                 continue;
2278                         }
2279                         if (mo_graph->checkReachable(act, release))
2280                                 /* act --mo--> release */
2281                                 break;
2282                         if (mo_graph->checkReachable(release, act) &&
2283                                       mo_graph->checkReachable(act, rf)) {
2284                                 /* release --mo-> act --mo--> rf */
2285                                 return true; /* complete */
2286                         }
2287                         /* act may break release sequence */
2288                         pending->writes.push_back(act);
2289                         certain = false;
2290                 }
2291                 if (!future_ordered)
2292                         certain = false; /* This thread is uncertain */
2293         }
2294
2295         if (certain) {
2296                 release_heads->push_back(release);
2297                 pending->writes.clear();
2298         } else {
2299                 pending->release = release;
2300                 pending->rf = rf;
2301         }
2302         return certain;
2303 }
2304
2305 /**
2306  * An interface for getting the release sequence head(s) with which a
2307  * given ModelAction must synchronize. This function only returns a non-empty
2308  * result when it can locate a release sequence head with certainty. Otherwise,
2309  * it may mark the internal state of the ModelChecker so that it will handle
2310  * the release sequence at a later time, causing @a acquire to update its
2311  * synchronization at some later point in execution.
2312  *
2313  * @param acquire The 'acquire' action that may synchronize with a release
2314  * sequence
2315  * @param read The read action that may read from a release sequence; this may
2316  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2317  * when 'acquire' is a fence-acquire)
2318  * @param release_heads A pass-by-reference return parameter. Will be filled
2319  * with the head(s) of the release sequence(s), if they exists with certainty.
2320  * @see ModelChecker::release_seq_heads
2321  */
2322 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2323                 ModelAction *read, rel_heads_list_t *release_heads)
2324 {
2325         const ModelAction *rf = read->get_reads_from();
2326         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2327         sequence->acquire = acquire;
2328         sequence->read = read;
2329
2330         if (!release_seq_heads(rf, release_heads, sequence)) {
2331                 /* add act to 'lazy checking' list */
2332                 pending_rel_seqs->push_back(sequence);
2333         } else {
2334                 snapshot_free(sequence);
2335         }
2336 }
2337
2338 /**
2339  * Attempt to resolve all stashed operations that might synchronize with a
2340  * release sequence for a given location. This implements the "lazy" portion of
2341  * determining whether or not a release sequence was contiguous, since not all
2342  * modification order information is present at the time an action occurs.
2343  *
2344  * @param location The location/object that should be checked for release
2345  * sequence resolutions. A NULL value means to check all locations.
2346  * @param work_queue The work queue to which to add work items as they are
2347  * generated
2348  * @return True if any updates occurred (new synchronization, new mo_graph
2349  * edges)
2350  */
2351 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2352 {
2353         bool updated = false;
2354         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2355         while (it != pending_rel_seqs->end()) {
2356                 struct release_seq *pending = *it;
2357                 ModelAction *acquire = pending->acquire;
2358                 const ModelAction *read = pending->read;
2359
2360                 /* Only resolve sequences on the given location, if provided */
2361                 if (location && read->get_location() != location) {
2362                         it++;
2363                         continue;
2364                 }
2365
2366                 const ModelAction *rf = read->get_reads_from();
2367                 rel_heads_list_t release_heads;
2368                 bool complete;
2369                 complete = release_seq_heads(rf, &release_heads, pending);
2370                 for (unsigned int i = 0; i < release_heads.size(); i++)
2371                         if (!acquire->has_synchronized_with(release_heads[i]))
2372                                 if (synchronize(release_heads[i], acquire))
2373                                         updated = true;
2374
2375                 if (updated) {
2376                         /* Re-check all pending release sequences */
2377                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2378                         /* Re-check read-acquire for mo_graph edges */
2379                         if (acquire->is_read())
2380                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2381
2382                         /* propagate synchronization to later actions */
2383                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2384                         for (; (*rit) != acquire; rit++) {
2385                                 ModelAction *propagate = *rit;
2386                                 if (acquire->happens_before(propagate)) {
2387                                         synchronize(acquire, propagate);
2388                                         /* Re-check 'propagate' for mo_graph edges */
2389                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2390                                 }
2391                         }
2392                 }
2393                 if (complete) {
2394                         it = pending_rel_seqs->erase(it);
2395                         snapshot_free(pending);
2396                 } else {
2397                         it++;
2398                 }
2399         }
2400
2401         // If we resolved promises or data races, see if we have realized a data race.
2402         checkDataRaces();
2403
2404         return updated;
2405 }
2406
2407 /**
2408  * Performs various bookkeeping operations for the current ModelAction. For
2409  * instance, adds action to the per-object, per-thread action vector and to the
2410  * action trace list of all thread actions.
2411  *
2412  * @param act is the ModelAction to add.
2413  */
2414 void ModelChecker::add_action_to_lists(ModelAction *act)
2415 {
2416         int tid = id_to_int(act->get_tid());
2417         ModelAction *uninit = NULL;
2418         int uninit_id = -1;
2419         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2420         if (list->empty() && act->is_atomic_var()) {
2421                 uninit = get_uninitialized_action(act);
2422                 uninit_id = id_to_int(uninit->get_tid());
2423                 list->push_front(uninit);
2424         }
2425         list->push_back(act);
2426
2427         action_trace->push_back(act);
2428         if (uninit)
2429                 action_trace->push_front(uninit);
2430
2431         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2432         if (tid >= (int)vec->size())
2433                 vec->resize(priv->next_thread_id);
2434         (*vec)[tid].push_back(act);
2435         if (uninit)
2436                 (*vec)[uninit_id].push_front(uninit);
2437
2438         if ((int)thrd_last_action->size() <= tid)
2439                 thrd_last_action->resize(get_num_threads());
2440         (*thrd_last_action)[tid] = act;
2441         if (uninit)
2442                 (*thrd_last_action)[uninit_id] = uninit;
2443
2444         if (act->is_fence() && act->is_release()) {
2445                 if ((int)thrd_last_fence_release->size() <= tid)
2446                         thrd_last_fence_release->resize(get_num_threads());
2447                 (*thrd_last_fence_release)[tid] = act;
2448         }
2449
2450         if (act->is_wait()) {
2451                 void *mutex_loc = (void *) act->get_value();
2452                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2453
2454                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2455                 if (tid >= (int)vec->size())
2456                         vec->resize(priv->next_thread_id);
2457                 (*vec)[tid].push_back(act);
2458         }
2459 }
2460
2461 /**
2462  * @brief Get the last action performed by a particular Thread
2463  * @param tid The thread ID of the Thread in question
2464  * @return The last action in the thread
2465  */
2466 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2467 {
2468         int threadid = id_to_int(tid);
2469         if (threadid < (int)thrd_last_action->size())
2470                 return (*thrd_last_action)[id_to_int(tid)];
2471         else
2472                 return NULL;
2473 }
2474
2475 /**
2476  * @brief Get the last fence release performed by a particular Thread
2477  * @param tid The thread ID of the Thread in question
2478  * @return The last fence release in the thread, if one exists; NULL otherwise
2479  */
2480 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2481 {
2482         int threadid = id_to_int(tid);
2483         if (threadid < (int)thrd_last_fence_release->size())
2484                 return (*thrd_last_fence_release)[id_to_int(tid)];
2485         else
2486                 return NULL;
2487 }
2488
2489 /**
2490  * Gets the last memory_order_seq_cst write (in the total global sequence)
2491  * performed on a particular object (i.e., memory location), not including the
2492  * current action.
2493  * @param curr The current ModelAction; also denotes the object location to
2494  * check
2495  * @return The last seq_cst write
2496  */
2497 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2498 {
2499         void *location = curr->get_location();
2500         action_list_t *list = get_safe_ptr_action(obj_map, location);
2501         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2502         action_list_t::reverse_iterator rit;
2503         for (rit = list->rbegin(); (*rit) != curr; rit++)
2504                 ;
2505         rit++; /* Skip past curr */
2506         for ( ; rit != list->rend(); rit++)
2507                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2508                         return *rit;
2509         return NULL;
2510 }
2511
2512 /**
2513  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2514  * performed in a particular thread, prior to a particular fence.
2515  * @param tid The ID of the thread to check
2516  * @param before_fence The fence from which to begin the search; if NULL, then
2517  * search for the most recent fence in the thread.
2518  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2519  */
2520 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2521 {
2522         /* All fences should have NULL location */
2523         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2524         action_list_t::reverse_iterator rit = list->rbegin();
2525
2526         if (before_fence) {
2527                 for (; rit != list->rend(); rit++)
2528                         if (*rit == before_fence)
2529                                 break;
2530
2531                 ASSERT(*rit == before_fence);
2532                 rit++;
2533         }
2534
2535         for (; rit != list->rend(); rit++)
2536                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2537                         return *rit;
2538         return NULL;
2539 }
2540
2541 /**
2542  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2543  * location). This function identifies the mutex according to the current
2544  * action, which is presumed to perform on the same mutex.
2545  * @param curr The current ModelAction; also denotes the object location to
2546  * check
2547  * @return The last unlock operation
2548  */
2549 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2550 {
2551         void *location = curr->get_location();
2552         action_list_t *list = get_safe_ptr_action(obj_map, location);
2553         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2554         action_list_t::reverse_iterator rit;
2555         for (rit = list->rbegin(); rit != list->rend(); rit++)
2556                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2557                         return *rit;
2558         return NULL;
2559 }
2560
2561 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2562 {
2563         ModelAction *parent = get_last_action(tid);
2564         if (!parent)
2565                 parent = get_thread(tid)->get_creation();
2566         return parent;
2567 }
2568
2569 /**
2570  * Returns the clock vector for a given thread.
2571  * @param tid The thread whose clock vector we want
2572  * @return Desired clock vector
2573  */
2574 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2575 {
2576         return get_parent_action(tid)->get_cv();
2577 }
2578
2579 /**
2580  * @brief Find the promise (if any) to resolve for the current action and
2581  * remove it from the pending promise vector
2582  * @param curr The current ModelAction. Should be a write.
2583  * @return The Promise to resolve, if any; otherwise NULL
2584  */
2585 Promise * ModelChecker::pop_promise_to_resolve(const ModelAction *curr)
2586 {
2587         for (unsigned int i = 0; i < promises->size(); i++)
2588                 if (curr->get_node()->get_promise(i)) {
2589                         Promise *ret = (*promises)[i];
2590                         promises->erase(promises->begin() + i);
2591                         return ret;
2592                 }
2593         return NULL;
2594 }
2595
2596 /**
2597  * Resolve a Promise with a current write.
2598  * @param write The ModelAction that is fulfilling Promises
2599  * @param promise The Promise to resolve
2600  * @return True if the Promise was successfully resolved; false otherwise
2601  */
2602 bool ModelChecker::resolve_promise(ModelAction *write, Promise *promise)
2603 {
2604         ModelVector<ModelAction *> actions_to_check;
2605
2606         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2607                 ModelAction *read = promise->get_reader(i);
2608                 read_from(read, write);
2609                 actions_to_check.push_back(read);
2610         }
2611         /* Make sure the promise's value matches the write's value */
2612         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2613         if (!mo_graph->resolvePromise(promise, write))
2614                 priv->failed_promise = true;
2615
2616         /**
2617          * @todo  It is possible to end up in an inconsistent state, where a
2618          * "resolved" promise may still be referenced if
2619          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2620          *
2621          * Note that the inconsistency only matters when dumping mo_graph to
2622          * file.
2623          *
2624          * delete promise;
2625          */
2626
2627         //Check whether reading these writes has made threads unable to
2628         //resolve promises
2629         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2630                 ModelAction *read = actions_to_check[i];
2631                 mo_check_promises(read, true);
2632         }
2633
2634         return true;
2635 }
2636
2637 /**
2638  * Compute the set of promises that could potentially be satisfied by this
2639  * action. Note that the set computation actually appears in the Node, not in
2640  * ModelChecker.
2641  * @param curr The ModelAction that may satisfy promises
2642  */
2643 void ModelChecker::compute_promises(ModelAction *curr)
2644 {
2645         for (unsigned int i = 0; i < promises->size(); i++) {
2646                 Promise *promise = (*promises)[i];
2647                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2648                         continue;
2649
2650                 bool satisfy = true;
2651                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2652                         const ModelAction *act = promise->get_reader(j);
2653                         if (act->happens_before(curr) ||
2654                                         act->could_synchronize_with(curr)) {
2655                                 satisfy = false;
2656                                 break;
2657                         }
2658                 }
2659                 if (satisfy)
2660                         curr->get_node()->set_promise(i);
2661         }
2662 }
2663
2664 /** Checks promises in response to change in ClockVector Threads. */
2665 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2666 {
2667         for (unsigned int i = 0; i < promises->size(); i++) {
2668                 Promise *promise = (*promises)[i];
2669                 if (!promise->thread_is_available(tid))
2670                         continue;
2671                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2672                         const ModelAction *act = promise->get_reader(j);
2673                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2674                                         merge_cv->synchronized_since(act)) {
2675                                 if (promise->eliminate_thread(tid)) {
2676                                         /* Promise has failed */
2677                                         priv->failed_promise = true;
2678                                         return;
2679                                 }
2680                         }
2681                 }
2682         }
2683 }
2684
2685 void ModelChecker::check_promises_thread_disabled()
2686 {
2687         for (unsigned int i = 0; i < promises->size(); i++) {
2688                 Promise *promise = (*promises)[i];
2689                 if (promise->has_failed()) {
2690                         priv->failed_promise = true;
2691                         return;
2692                 }
2693         }
2694 }
2695
2696 /**
2697  * @brief Checks promises in response to addition to modification order for
2698  * threads.
2699  *
2700  * We test whether threads are still available for satisfying promises after an
2701  * addition to our modification order constraints. Those that are unavailable
2702  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2703  * that promise has failed.
2704  *
2705  * @param act The ModelAction which updated the modification order
2706  * @param is_read_check Should be true if act is a read and we must check for
2707  * updates to the store from which it read (there is a distinction here for
2708  * RMW's, which are both a load and a store)
2709  */
2710 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2711 {
2712         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2713
2714         for (unsigned int i = 0; i < promises->size(); i++) {
2715                 Promise *promise = (*promises)[i];
2716
2717                 // Is this promise on the same location?
2718                 if (!promise->same_location(write))
2719                         continue;
2720
2721                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2722                         const ModelAction *pread = promise->get_reader(j);
2723                         if (!pread->happens_before(act))
2724                                continue;
2725                         if (mo_graph->checkPromise(write, promise)) {
2726                                 priv->failed_promise = true;
2727                                 return;
2728                         }
2729                         break;
2730                 }
2731
2732                 // Don't do any lookups twice for the same thread
2733                 if (!promise->thread_is_available(act->get_tid()))
2734                         continue;
2735
2736                 if (mo_graph->checkReachable(promise, write)) {
2737                         if (mo_graph->checkPromise(write, promise)) {
2738                                 priv->failed_promise = true;
2739                                 return;
2740                         }
2741                 }
2742         }
2743 }
2744
2745 /**
2746  * Compute the set of writes that may break the current pending release
2747  * sequence. This information is extracted from previou release sequence
2748  * calculations.
2749  *
2750  * @param curr The current ModelAction. Must be a release sequence fixup
2751  * action.
2752  */
2753 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2754 {
2755         if (pending_rel_seqs->empty())
2756                 return;
2757
2758         struct release_seq *pending = pending_rel_seqs->back();
2759         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2760                 const ModelAction *write = pending->writes[i];
2761                 curr->get_node()->add_relseq_break(write);
2762         }
2763
2764         /* NULL means don't break the sequence; just synchronize */
2765         curr->get_node()->add_relseq_break(NULL);
2766 }
2767
2768 /**
2769  * Build up an initial set of all past writes that this 'read' action may read
2770  * from, as well as any previously-observed future values that must still be valid.
2771  *
2772  * @param curr is the current ModelAction that we are exploring; it must be a
2773  * 'read' operation.
2774  */
2775 void ModelChecker::build_may_read_from(ModelAction *curr)
2776 {
2777         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2778         unsigned int i;
2779         ASSERT(curr->is_read());
2780
2781         ModelAction *last_sc_write = NULL;
2782
2783         if (curr->is_seqcst())
2784                 last_sc_write = get_last_seq_cst_write(curr);
2785
2786         /* Iterate over all threads */
2787         for (i = 0; i < thrd_lists->size(); i++) {
2788                 /* Iterate over actions in thread, starting from most recent */
2789                 action_list_t *list = &(*thrd_lists)[i];
2790                 action_list_t::reverse_iterator rit;
2791                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2792                         ModelAction *act = *rit;
2793
2794                         /* Only consider 'write' actions */
2795                         if (!act->is_write() || act == curr)
2796                                 continue;
2797
2798                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2799                         bool allow_read = true;
2800
2801                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2802                                 allow_read = false;
2803                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2804                                 allow_read = false;
2805
2806                         if (allow_read) {
2807                                 /* Only add feasible reads */
2808                                 mo_graph->startChanges();
2809                                 r_modification_order(curr, act);
2810                                 if (!is_infeasible())
2811                                         curr->get_node()->add_read_from_past(act);
2812                                 mo_graph->rollbackChanges();
2813                         }
2814
2815                         /* Include at most one act per-thread that "happens before" curr */
2816                         if (act->happens_before(curr))
2817                                 break;
2818                 }
2819         }
2820
2821         /* Inherit existing, promised future values */
2822         for (i = 0; i < promises->size(); i++) {
2823                 const Promise *promise = (*promises)[i];
2824                 const ModelAction *promise_read = promise->get_reader(0);
2825                 if (promise_read->same_var(curr)) {
2826                         /* Only add feasible future-values */
2827                         mo_graph->startChanges();
2828                         r_modification_order(curr, promise);
2829                         if (!is_infeasible())
2830                                 curr->get_node()->add_read_from_promise(promise_read);
2831                         mo_graph->rollbackChanges();
2832                 }
2833         }
2834
2835         /* We may find no valid may-read-from only if the execution is doomed */
2836         if (!curr->get_node()->read_from_size()) {
2837                 priv->no_valid_reads = true;
2838                 set_assert();
2839         }
2840
2841         if (DBG_ENABLED()) {
2842                 model_print("Reached read action:\n");
2843                 curr->print();
2844                 model_print("Printing read_from_past\n");
2845                 curr->get_node()->print_read_from_past();
2846                 model_print("End printing read_from_past\n");
2847         }
2848 }
2849
2850 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2851 {
2852         for ( ; write != NULL; write = write->get_reads_from()) {
2853                 /* UNINIT actions don't have a Node, and they never sleep */
2854                 if (write->is_uninitialized())
2855                         return true;
2856                 Node *prevnode = write->get_node()->get_parent();
2857
2858                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2859                 if (write->is_release() && thread_sleep)
2860                         return true;
2861                 if (!write->is_rmw())
2862                         return false;
2863         }
2864         return true;
2865 }
2866
2867 /**
2868  * @brief Get an action representing an uninitialized atomic
2869  *
2870  * This function may create a new one or try to retrieve one from the NodeStack
2871  *
2872  * @param curr The current action, which prompts the creation of an UNINIT action
2873  * @return A pointer to the UNINIT ModelAction
2874  */
2875 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2876 {
2877         Node *node = curr->get_node();
2878         ModelAction *act = node->get_uninit_action();
2879         if (!act) {
2880                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2881                 node->set_uninit_action(act);
2882         }
2883         act->create_cv(NULL);
2884         return act;
2885 }
2886
2887 static void print_list(action_list_t *list)
2888 {
2889         action_list_t::iterator it;
2890
2891         model_print("---------------------------------------------------------------------\n");
2892
2893         unsigned int hash = 0;
2894
2895         for (it = list->begin(); it != list->end(); it++) {
2896                 const ModelAction *act = *it;
2897                 if (act->get_seq_number() > 0)
2898                         act->print();
2899                 hash = hash^(hash<<3)^((*it)->hash());
2900         }
2901         model_print("HASH %u\n", hash);
2902         model_print("---------------------------------------------------------------------\n");
2903 }
2904
2905 #if SUPPORT_MOD_ORDER_DUMP
2906 void ModelChecker::dumpGraph(char *filename) const
2907 {
2908         char buffer[200];
2909         sprintf(buffer, "%s.dot", filename);
2910         FILE *file = fopen(buffer, "w");
2911         fprintf(file, "digraph %s {\n", filename);
2912         mo_graph->dumpNodes(file);
2913         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2914
2915         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2916                 ModelAction *act = *it;
2917                 if (act->is_read()) {
2918                         mo_graph->dot_print_node(file, act);
2919                         if (act->get_reads_from())
2920                                 mo_graph->dot_print_edge(file,
2921                                                 act->get_reads_from(),
2922                                                 act,
2923                                                 "label=\"rf\", color=red, weight=2");
2924                         else
2925                                 mo_graph->dot_print_edge(file,
2926                                                 act->get_reads_from_promise(),
2927                                                 act,
2928                                                 "label=\"rf\", color=red");
2929                 }
2930                 if (thread_array[act->get_tid()]) {
2931                         mo_graph->dot_print_edge(file,
2932                                         thread_array[id_to_int(act->get_tid())],
2933                                         act,
2934                                         "label=\"sb\", color=blue, weight=400");
2935                 }
2936
2937                 thread_array[act->get_tid()] = act;
2938         }
2939         fprintf(file, "}\n");
2940         model_free(thread_array);
2941         fclose(file);
2942 }
2943 #endif
2944
2945 /** @brief Prints an execution trace summary. */
2946 void ModelChecker::print_summary() const
2947 {
2948 #if SUPPORT_MOD_ORDER_DUMP
2949         char buffername[100];
2950         sprintf(buffername, "exec%04u", stats.num_total);
2951         mo_graph->dumpGraphToFile(buffername);
2952         sprintf(buffername, "graph%04u", stats.num_total);
2953         dumpGraph(buffername);
2954 #endif
2955
2956         model_print("Execution %d:", stats.num_total);
2957         if (isfeasibleprefix()) {
2958                 if (scheduler->all_threads_sleeping())
2959                         model_print(" SLEEP-SET REDUNDANT");
2960                 model_print("\n");
2961         } else
2962                 print_infeasibility(" INFEASIBLE");
2963         print_list(action_trace);
2964         model_print("\n");
2965         if (!promises->empty()) {
2966                 model_print("Pending promises:\n");
2967                 for (unsigned int i = 0; i < promises->size(); i++) {
2968                         model_print(" [P%u] ", i);
2969                         (*promises)[i]->print();
2970                 }
2971                 model_print("\n");
2972         }
2973 }
2974
2975 /**
2976  * Add a Thread to the system for the first time. Should only be called once
2977  * per thread.
2978  * @param t The Thread to add
2979  */
2980 void ModelChecker::add_thread(Thread *t)
2981 {
2982         thread_map->put(id_to_int(t->get_id()), t);
2983         scheduler->add_thread(t);
2984 }
2985
2986 /**
2987  * @brief Get a Thread reference by its ID
2988  * @param tid The Thread's ID
2989  * @return A Thread reference
2990  */
2991 Thread * ModelChecker::get_thread(thread_id_t tid) const
2992 {
2993         return thread_map->get(id_to_int(tid));
2994 }
2995
2996 /**
2997  * @brief Get a reference to the Thread in which a ModelAction was executed
2998  * @param act The ModelAction
2999  * @return A Thread reference
3000  */
3001 Thread * ModelChecker::get_thread(const ModelAction *act) const
3002 {
3003         return get_thread(act->get_tid());
3004 }
3005
3006 /**
3007  * @brief Get a Promise's "promise number"
3008  *
3009  * A "promise number" is an index number that is unique to a promise, valid
3010  * only for a specific snapshot of an execution trace. Promises may come and go
3011  * as they are generated an resolved, so an index only retains meaning for the
3012  * current snapshot.
3013  *
3014  * @param promise The Promise to check
3015  * @return The promise index, if the promise still is valid; otherwise -1
3016  */
3017 int ModelChecker::get_promise_number(const Promise *promise) const
3018 {
3019         for (unsigned int i = 0; i < promises->size(); i++)
3020                 if ((*promises)[i] == promise)
3021                         return i;
3022         /* Not found */
3023         return -1;
3024 }
3025
3026 /**
3027  * @brief Check if a Thread is currently enabled
3028  * @param t The Thread to check
3029  * @return True if the Thread is currently enabled
3030  */
3031 bool ModelChecker::is_enabled(Thread *t) const
3032 {
3033         return scheduler->is_enabled(t);
3034 }
3035
3036 /**
3037  * @brief Check if a Thread is currently enabled
3038  * @param tid The ID of the Thread to check
3039  * @return True if the Thread is currently enabled
3040  */
3041 bool ModelChecker::is_enabled(thread_id_t tid) const
3042 {
3043         return scheduler->is_enabled(tid);
3044 }
3045
3046 /**
3047  * Switch from a model-checker context to a user-thread context. This is the
3048  * complement of ModelChecker::switch_to_master and must be called from the
3049  * model-checker context
3050  *
3051  * @param thread The user-thread to switch to
3052  */
3053 void ModelChecker::switch_from_master(Thread *thread)
3054 {
3055         scheduler->set_current_thread(thread);
3056         Thread::swap(&system_context, thread);
3057 }
3058
3059 /**
3060  * Switch from a user-context to the "master thread" context (a.k.a. system
3061  * context). This switch is made with the intention of exploring a particular
3062  * model-checking action (described by a ModelAction object). Must be called
3063  * from a user-thread context.
3064  *
3065  * @param act The current action that will be explored. May be NULL only if
3066  * trace is exiting via an assertion (see ModelChecker::set_assert and
3067  * ModelChecker::has_asserted).
3068  * @return Return the value returned by the current action
3069  */
3070 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3071 {
3072         DBG();
3073         Thread *old = thread_current();
3074         scheduler->set_current_thread(NULL);
3075         ASSERT(!old->get_pending());
3076         old->set_pending(act);
3077         if (Thread::swap(old, &system_context) < 0) {
3078                 perror("swap threads");
3079                 exit(EXIT_FAILURE);
3080         }
3081         return old->get_return_value();
3082 }
3083
3084 /**
3085  * Takes the next step in the execution, if possible.
3086  * @param curr The current step to take
3087  * @return Returns the next Thread to run, if any; NULL if this execution
3088  * should terminate
3089  */
3090 Thread * ModelChecker::take_step(ModelAction *curr)
3091 {
3092         Thread *curr_thrd = get_thread(curr);
3093         ASSERT(curr_thrd->get_state() == THREAD_READY);
3094
3095         ASSERT(check_action_enabled(curr)); /* May have side effects? */
3096         curr = check_current_action(curr);
3097         ASSERT(curr);
3098
3099         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3100                 scheduler->remove_thread(curr_thrd);
3101
3102         return action_select_next_thread(curr);
3103 }
3104
3105 /** Wrapper to run the user's main function, with appropriate arguments */
3106 void user_main_wrapper(void *)
3107 {
3108         user_main(model->params.argc, model->params.argv);
3109 }
3110
3111 /** @return True if the execution has taken too many steps */
3112 bool ModelChecker::too_many_steps() const
3113 {
3114         return params.bound != 0 && priv->used_sequence_numbers > params.bound;
3115 }
3116
3117 bool ModelChecker::should_terminate_execution()
3118 {
3119         /* Infeasible -> don't take any more steps */
3120         if (is_infeasible())
3121                 return true;
3122         else if (isfeasibleprefix() && have_bug_reports()) {
3123                 set_assert();
3124                 return true;
3125         }
3126
3127         if (too_many_steps())
3128                 return true;
3129         return false;
3130 }
3131
3132 /** @brief Run ModelChecker for the user program */
3133 void ModelChecker::run()
3134 {
3135         do {
3136                 thrd_t user_thread;
3137                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3138                 add_thread(t);
3139
3140                 do {
3141                         /*
3142                          * Stash next pending action(s) for thread(s). There
3143                          * should only need to stash one thread's action--the
3144                          * thread which just took a step--plus the first step
3145                          * for any newly-created thread
3146                          */
3147                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3148                                 thread_id_t tid = int_to_id(i);
3149                                 Thread *thr = get_thread(tid);
3150                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3151                                         switch_from_master(thr);
3152                                         if (thr->is_waiting_on(thr))
3153                                                 assert_bug("Deadlock detected (thread %u)", i);
3154                                 }
3155                         }
3156
3157                         /* Don't schedule threads which should be disabled */
3158                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3159                                 Thread *th = get_thread(int_to_id(i));
3160                                 ModelAction *act = th->get_pending();
3161                                 if (act && is_enabled(th) && !check_action_enabled(act)) {
3162                                         scheduler->sleep(th);
3163                                 }
3164                         }
3165
3166                         /* Catch assertions from prior take_step or from
3167                          * between-ModelAction bugs (e.g., data races) */
3168                         if (has_asserted())
3169                                 break;
3170
3171                         if (!t)
3172                                 t = get_next_thread();
3173                         if (!t || t->is_model_thread())
3174                                 break;
3175
3176                         /* Consume the next action for a Thread */
3177                         ModelAction *curr = t->get_pending();
3178                         t->set_pending(NULL);
3179                         t = take_step(curr);
3180                 } while (!should_terminate_execution());
3181
3182                 /*
3183                  * Launch end-of-execution release sequence fixups only when
3184                  * the execution is otherwise feasible AND there are:
3185                  *
3186                  * (1) pending release sequences
3187                  * (2) pending assertions that could be invalidated by a change
3188                  * in clock vectors (i.e., data races)
3189                  * (3) no pending promises
3190                  */
3191                 while (!pending_rel_seqs->empty() &&
3192                                 is_feasible_prefix_ignore_relseq() &&
3193                                 !unrealizedraces.empty()) {
3194                         model_print("*** WARNING: release sequence fixup action "
3195                                         "(%zu pending release seuqence(s)) ***\n",
3196                                         pending_rel_seqs->size());
3197                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3198                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3199                                         model_thread);
3200                         take_step(fixup);
3201                 };
3202         } while (next_execution());
3203
3204         model_print("******* Model-checking complete: *******\n");
3205         print_stats();
3206 }