model: remove DEBUG action print
[cdsspec-compiler.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "action.h"
9 #include "nodestack.h"
10 #include "schedule.h"
11 #include "snapshot-interface.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "output.h"
19
20 #define INITIAL_THREAD_ID       0
21
22 ModelChecker *model;
23
24 struct bug_message {
25         bug_message(const char *str) {
26                 const char *fmt = "  [BUG] %s\n";
27                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
28                 sprintf(msg, fmt, str);
29         }
30         ~bug_message() { if (msg) snapshot_free(msg); }
31
32         char *msg;
33         void print() { model_print("%s", msg); }
34
35         SNAPSHOTALLOC
36 };
37
38 /**
39  * Structure for holding small ModelChecker members that should be snapshotted
40  */
41 struct model_snapshot_members {
42         model_snapshot_members() :
43                 /* First thread created will have id INITIAL_THREAD_ID */
44                 next_thread_id(INITIAL_THREAD_ID),
45                 used_sequence_numbers(0),
46                 next_backtrack(NULL),
47                 bugs(),
48                 stats(),
49                 failed_promise(false),
50                 too_many_reads(false),
51                 no_valid_reads(false),
52                 bad_synchronization(false),
53                 asserted(false)
54         { }
55
56         ~model_snapshot_members() {
57                 for (unsigned int i = 0; i < bugs.size(); i++)
58                         delete bugs[i];
59                 bugs.clear();
60         }
61
62         unsigned int next_thread_id;
63         modelclock_t used_sequence_numbers;
64         ModelAction *next_backtrack;
65         SnapVector<bug_message *> bugs;
66         struct execution_stats stats;
67         bool failed_promise;
68         bool too_many_reads;
69         bool no_valid_reads;
70         /** @brief Incorrectly-ordered synchronization was made */
71         bool bad_synchronization;
72         bool asserted;
73
74         SNAPSHOTALLOC
75 };
76
77 /** @brief Constructor */
78 ModelChecker::ModelChecker(struct model_params params) :
79         /* Initialize default scheduler */
80         params(params),
81         scheduler(new Scheduler()),
82         diverge(NULL),
83         earliest_diverge(NULL),
84         action_trace(new action_list_t()),
85         thread_map(new HashTable<int, Thread *, int>()),
86         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new SnapVector<Promise *>()),
90         futurevalues(new SnapVector<struct PendingFutureValue>()),
91         pending_rel_seqs(new SnapVector<struct release_seq *>()),
92         thrd_last_action(new SnapVector<ModelAction *>(1)),
93         thrd_last_fence_release(new SnapVector<ModelAction *>()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete condvar_waiters_map;
113         delete action_trace;
114
115         for (unsigned int i = 0; i < promises->size(); i++)
116                 delete (*promises)[i];
117         delete promises;
118
119         delete pending_rel_seqs;
120
121         delete thrd_last_action;
122         delete thrd_last_fence_release;
123         delete node_stack;
124         delete scheduler;
125         delete mo_graph;
126         delete priv;
127 }
128
129 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
130 {
131         action_list_t *tmp = hash->get(ptr);
132         if (tmp == NULL) {
133                 tmp = new action_list_t();
134                 hash->put(ptr, tmp);
135         }
136         return tmp;
137 }
138
139 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
140 {
141         SnapVector<action_list_t> *tmp = hash->get(ptr);
142         if (tmp == NULL) {
143                 tmp = new SnapVector<action_list_t>();
144                 hash->put(ptr, tmp);
145         }
146         return tmp;
147 }
148
149 /**
150  * Restores user program to initial state and resets all model-checker data
151  * structures.
152  */
153 void ModelChecker::reset_to_initial_state()
154 {
155         DEBUG("+++ Resetting to initial state +++\n");
156         node_stack->reset_execution();
157
158         /**
159          * FIXME: if we utilize partial rollback, we will need to free only
160          * those pending actions which were NOT pending before the rollback
161          * point
162          */
163         for (unsigned int i = 0; i < get_num_threads(); i++)
164                 delete get_thread(int_to_id(i))->get_pending();
165
166         snapshot_backtrack_before(0);
167 }
168
169 /** @return a thread ID for a new Thread */
170 thread_id_t ModelChecker::get_next_id()
171 {
172         return priv->next_thread_id++;
173 }
174
175 /** @return the number of user threads created during this execution */
176 unsigned int ModelChecker::get_num_threads() const
177 {
178         return priv->next_thread_id;
179 }
180
181 /**
182  * Must be called from user-thread context (e.g., through the global
183  * thread_current() interface)
184  *
185  * @return The currently executing Thread.
186  */
187 Thread * ModelChecker::get_current_thread() const
188 {
189         return scheduler->get_current_thread();
190 }
191
192 /** @return a sequence number for a new ModelAction */
193 modelclock_t ModelChecker::get_next_seq_num()
194 {
195         return ++priv->used_sequence_numbers;
196 }
197
198 Node * ModelChecker::get_curr_node() const
199 {
200         return node_stack->get_head();
201 }
202
203 /**
204  * @brief Select the next thread to execute based on the curren action
205  *
206  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
207  * actions should be followed by the execution of their child thread. In either
208  * case, the current action should determine the next thread schedule.
209  *
210  * @param curr The current action
211  * @return The next thread to run, if the current action will determine this
212  * selection; otherwise NULL
213  */
214 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
215 {
216         /* Do not split atomic RMW */
217         if (curr->is_rmwr())
218                 return get_thread(curr);
219         /* Follow CREATE with the created thread */
220         if (curr->get_type() == THREAD_CREATE)
221                 return curr->get_thread_operand();
222         return NULL;
223 }
224
225 /**
226  * @brief Choose the next thread to execute.
227  *
228  * This function chooses the next thread that should execute. It can enforce
229  * execution replay/backtracking or, if the model-checker has no preference
230  * regarding the next thread (i.e., when exploring a new execution ordering),
231  * we defer to the scheduler.
232  *
233  * @return The next chosen thread to run, if any exist. Or else if the current
234  * execution should terminate, return NULL.
235  */
236 Thread * ModelChecker::get_next_thread()
237 {
238         thread_id_t tid;
239
240         /*
241          * Have we completed exploring the preselected path? Then let the
242          * scheduler decide
243          */
244         if (diverge == NULL)
245                 return scheduler->select_next_thread();
246
247         /* Else, we are trying to replay an execution */
248         ModelAction *next = node_stack->get_next()->get_action();
249
250         if (next == diverge) {
251                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
252                         earliest_diverge = diverge;
253
254                 Node *nextnode = next->get_node();
255                 Node *prevnode = nextnode->get_parent();
256                 scheduler->update_sleep_set(prevnode);
257
258                 /* Reached divergence point */
259                 if (nextnode->increment_behaviors()) {
260                         /* Execute the same thread with a new behavior */
261                         tid = next->get_tid();
262                         node_stack->pop_restofstack(2);
263                 } else {
264                         ASSERT(prevnode);
265                         /* Make a different thread execute for next step */
266                         scheduler->add_sleep(get_thread(next->get_tid()));
267                         tid = prevnode->get_next_backtrack();
268                         /* Make sure the backtracked thread isn't sleeping. */
269                         node_stack->pop_restofstack(1);
270                         if (diverge == earliest_diverge) {
271                                 earliest_diverge = prevnode->get_action();
272                         }
273                 }
274                 /* Start the round robin scheduler from this thread id */
275                 scheduler->set_scheduler_thread(tid);
276                 /* The correct sleep set is in the parent node. */
277                 execute_sleep_set();
278
279                 DEBUG("*** Divergence point ***\n");
280
281                 diverge = NULL;
282         } else {
283                 tid = next->get_tid();
284         }
285         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
286         ASSERT(tid != THREAD_ID_T_NONE);
287         return get_thread(id_to_int(tid));
288 }
289
290 /**
291  * We need to know what the next actions of all threads in the sleep
292  * set will be.  This method computes them and stores the actions at
293  * the corresponding thread object's pending action.
294  */
295
296 void ModelChecker::execute_sleep_set()
297 {
298         for (unsigned int i = 0; i < get_num_threads(); i++) {
299                 thread_id_t tid = int_to_id(i);
300                 Thread *thr = get_thread(tid);
301                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
302                         thr->get_pending()->set_sleep_flag();
303                 }
304         }
305 }
306
307 /**
308  * @brief Should the current action wake up a given thread?
309  *
310  * @param curr The current action
311  * @param thread The thread that we might wake up
312  * @return True, if we should wake up the sleeping thread; false otherwise
313  */
314 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
315 {
316         const ModelAction *asleep = thread->get_pending();
317         /* Don't allow partial RMW to wake anyone up */
318         if (curr->is_rmwr())
319                 return false;
320         /* Synchronizing actions may have been backtracked */
321         if (asleep->could_synchronize_with(curr))
322                 return true;
323         /* All acquire/release fences and fence-acquire/store-release */
324         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
325                 return true;
326         /* Fence-release + store can awake load-acquire on the same location */
327         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
328                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
329                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
330                         return true;
331         }
332         return false;
333 }
334
335 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
336 {
337         for (unsigned int i = 0; i < get_num_threads(); i++) {
338                 Thread *thr = get_thread(int_to_id(i));
339                 if (scheduler->is_sleep_set(thr)) {
340                         if (should_wake_up(curr, thr))
341                                 /* Remove this thread from sleep set */
342                                 scheduler->remove_sleep(thr);
343                 }
344         }
345 }
346
347 /** @brief Alert the model-checker that an incorrectly-ordered
348  * synchronization was made */
349 void ModelChecker::set_bad_synchronization()
350 {
351         priv->bad_synchronization = true;
352 }
353
354 /**
355  * Check whether the current trace has triggered an assertion which should halt
356  * its execution.
357  *
358  * @return True, if the execution should be aborted; false otherwise
359  */
360 bool ModelChecker::has_asserted() const
361 {
362         return priv->asserted;
363 }
364
365 /**
366  * Trigger a trace assertion which should cause this execution to be halted.
367  * This can be due to a detected bug or due to an infeasibility that should
368  * halt ASAP.
369  */
370 void ModelChecker::set_assert()
371 {
372         priv->asserted = true;
373 }
374
375 /**
376  * Check if we are in a deadlock. Should only be called at the end of an
377  * execution, although it should not give false positives in the middle of an
378  * execution (there should be some ENABLED thread).
379  *
380  * @return True if program is in a deadlock; false otherwise
381  */
382 bool ModelChecker::is_deadlocked() const
383 {
384         bool blocking_threads = false;
385         for (unsigned int i = 0; i < get_num_threads(); i++) {
386                 thread_id_t tid = int_to_id(i);
387                 if (is_enabled(tid))
388                         return false;
389                 Thread *t = get_thread(tid);
390                 if (!t->is_model_thread() && t->get_pending())
391                         blocking_threads = true;
392         }
393         return blocking_threads;
394 }
395
396 /**
397  * Check if this is a complete execution. That is, have all thread completed
398  * execution (rather than exiting because sleep sets have forced a redundant
399  * execution).
400  *
401  * @return True if the execution is complete.
402  */
403 bool ModelChecker::is_complete_execution() const
404 {
405         for (unsigned int i = 0; i < get_num_threads(); i++)
406                 if (is_enabled(int_to_id(i)))
407                         return false;
408         return true;
409 }
410
411 /**
412  * @brief Assert a bug in the executing program.
413  *
414  * Use this function to assert any sort of bug in the user program. If the
415  * current trace is feasible (actually, a prefix of some feasible execution),
416  * then this execution will be aborted, printing the appropriate message. If
417  * the current trace is not yet feasible, the error message will be stashed and
418  * printed if the execution ever becomes feasible.
419  *
420  * @param msg Descriptive message for the bug (do not include newline char)
421  * @return True if bug is immediately-feasible
422  */
423 bool ModelChecker::assert_bug(const char *msg, ...)
424 {
425         char str[800];
426
427         va_list ap;
428         va_start(ap, msg);
429         vsnprintf(str, sizeof(str), msg, ap);
430         va_end(ap);
431
432         priv->bugs.push_back(new bug_message(str));
433
434         if (isfeasibleprefix()) {
435                 set_assert();
436                 return true;
437         }
438         return false;
439 }
440
441 /**
442  * @brief Assert a bug in the executing program, asserted by a user thread
443  * @see ModelChecker::assert_bug
444  * @param msg Descriptive message for the bug (do not include newline char)
445  */
446 void ModelChecker::assert_user_bug(const char *msg)
447 {
448         /* If feasible bug, bail out now */
449         if (assert_bug(msg))
450                 switch_to_master(NULL);
451 }
452
453 /** @return True, if any bugs have been reported for this execution */
454 bool ModelChecker::have_bug_reports() const
455 {
456         return priv->bugs.size() != 0;
457 }
458
459 /** @brief Print bug report listing for this execution (if any bugs exist) */
460 void ModelChecker::print_bugs() const
461 {
462         if (have_bug_reports()) {
463                 model_print("Bug report: %zu bug%s detected\n",
464                                 priv->bugs.size(),
465                                 priv->bugs.size() > 1 ? "s" : "");
466                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
467                         priv->bugs[i]->print();
468         }
469 }
470
471 /**
472  * @brief Record end-of-execution stats
473  *
474  * Must be run when exiting an execution. Records various stats.
475  * @see struct execution_stats
476  */
477 void ModelChecker::record_stats()
478 {
479         stats.num_total++;
480         if (!isfeasibleprefix())
481                 stats.num_infeasible++;
482         else if (have_bug_reports())
483                 stats.num_buggy_executions++;
484         else if (is_complete_execution())
485                 stats.num_complete++;
486         else {
487                 stats.num_redundant++;
488
489                 /**
490                  * @todo We can violate this ASSERT() when fairness/sleep sets
491                  * conflict to cause an execution to terminate, e.g. with:
492                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
493                  */
494                 //ASSERT(scheduler->all_threads_sleeping());
495         }
496 }
497
498 /** @brief Print execution stats */
499 void ModelChecker::print_stats() const
500 {
501         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
502         model_print("Number of redundant executions: %d\n", stats.num_redundant);
503         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
504         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
505         model_print("Total executions: %d\n", stats.num_total);
506         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
507 }
508
509 /**
510  * @brief End-of-exeuction print
511  * @param printbugs Should any existing bugs be printed?
512  */
513 void ModelChecker::print_execution(bool printbugs) const
514 {
515         print_program_output();
516
517         if (params.verbose) {
518                 model_print("Earliest divergence point since last feasible execution:\n");
519                 if (earliest_diverge)
520                         earliest_diverge->print();
521                 else
522                         model_print("(Not set)\n");
523
524                 model_print("\n");
525                 print_stats();
526         }
527
528         /* Don't print invalid bugs */
529         if (printbugs)
530                 print_bugs();
531
532         model_print("\n");
533         print_summary();
534 }
535
536 /**
537  * Queries the model-checker for more executions to explore and, if one
538  * exists, resets the model-checker state to execute a new execution.
539  *
540  * @return If there are more executions to explore, return true. Otherwise,
541  * return false.
542  */
543 bool ModelChecker::next_execution()
544 {
545         DBG();
546         /* Is this execution a feasible execution that's worth bug-checking? */
547         bool complete = isfeasibleprefix() && (is_complete_execution() ||
548                         have_bug_reports());
549
550         /* End-of-execution bug checks */
551         if (complete) {
552                 if (is_deadlocked())
553                         assert_bug("Deadlock detected");
554
555                 checkDataRaces();
556         }
557
558         record_stats();
559
560         /* Output */
561         if (params.verbose || (complete && have_bug_reports()))
562                 print_execution(complete);
563         else
564                 clear_program_output();
565
566         if (complete)
567                 earliest_diverge = NULL;
568
569         if ((diverge = get_next_backtrack()) == NULL)
570                 return false;
571
572         if (DBG_ENABLED()) {
573                 model_print("Next execution will diverge at:\n");
574                 diverge->print();
575         }
576
577         reset_to_initial_state();
578         return true;
579 }
580
581 /**
582  * @brief Find the last fence-related backtracking conflict for a ModelAction
583  *
584  * This function performs the search for the most recent conflicting action
585  * against which we should perform backtracking, as affected by fence
586  * operations. This includes pairs of potentially-synchronizing actions which
587  * occur due to fence-acquire or fence-release, and hence should be explored in
588  * the opposite execution order.
589  *
590  * @param act The current action
591  * @return The most recent action which conflicts with act due to fences
592  */
593 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
594 {
595         /* Only perform release/acquire fence backtracking for stores */
596         if (!act->is_write())
597                 return NULL;
598
599         /* Find a fence-release (or, act is a release) */
600         ModelAction *last_release;
601         if (act->is_release())
602                 last_release = act;
603         else
604                 last_release = get_last_fence_release(act->get_tid());
605         if (!last_release)
606                 return NULL;
607
608         /* Skip past the release */
609         action_list_t *list = action_trace;
610         action_list_t::reverse_iterator rit;
611         for (rit = list->rbegin(); rit != list->rend(); rit++)
612                 if (*rit == last_release)
613                         break;
614         ASSERT(rit != list->rend());
615
616         /* Find a prior:
617          *   load-acquire
618          * or
619          *   load --sb-> fence-acquire */
620         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
621         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
622         bool found_acquire_fences = false;
623         for ( ; rit != list->rend(); rit++) {
624                 ModelAction *prev = *rit;
625                 if (act->same_thread(prev))
626                         continue;
627
628                 int tid = id_to_int(prev->get_tid());
629
630                 if (prev->is_read() && act->same_var(prev)) {
631                         if (prev->is_acquire()) {
632                                 /* Found most recent load-acquire, don't need
633                                  * to search for more fences */
634                                 if (!found_acquire_fences)
635                                         return NULL;
636                         } else {
637                                 prior_loads[tid] = prev;
638                         }
639                 }
640                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
641                         found_acquire_fences = true;
642                         acquire_fences[tid] = prev;
643                 }
644         }
645
646         ModelAction *latest_backtrack = NULL;
647         for (unsigned int i = 0; i < acquire_fences.size(); i++)
648                 if (acquire_fences[i] && prior_loads[i])
649                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
650                                 latest_backtrack = acquire_fences[i];
651         return latest_backtrack;
652 }
653
654 /**
655  * @brief Find the last backtracking conflict for a ModelAction
656  *
657  * This function performs the search for the most recent conflicting action
658  * against which we should perform backtracking. This primary includes pairs of
659  * synchronizing actions which should be explored in the opposite execution
660  * order.
661  *
662  * @param act The current action
663  * @return The most recent action which conflicts with act
664  */
665 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
666 {
667         switch (act->get_type()) {
668         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
669         case ATOMIC_READ:
670         case ATOMIC_WRITE:
671         case ATOMIC_RMW: {
672                 ModelAction *ret = NULL;
673
674                 /* linear search: from most recent to oldest */
675                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
676                 action_list_t::reverse_iterator rit;
677                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
678                         ModelAction *prev = *rit;
679                         if (prev->could_synchronize_with(act)) {
680                                 ret = prev;
681                                 break;
682                         }
683                 }
684
685                 ModelAction *ret2 = get_last_fence_conflict(act);
686                 if (!ret2)
687                         return ret;
688                 if (!ret)
689                         return ret2;
690                 if (*ret < *ret2)
691                         return ret2;
692                 return ret;
693         }
694         case ATOMIC_LOCK:
695         case ATOMIC_TRYLOCK: {
696                 /* linear search: from most recent to oldest */
697                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
698                 action_list_t::reverse_iterator rit;
699                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
700                         ModelAction *prev = *rit;
701                         if (act->is_conflicting_lock(prev))
702                                 return prev;
703                 }
704                 break;
705         }
706         case ATOMIC_UNLOCK: {
707                 /* linear search: from most recent to oldest */
708                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
709                 action_list_t::reverse_iterator rit;
710                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
711                         ModelAction *prev = *rit;
712                         if (!act->same_thread(prev) && prev->is_failed_trylock())
713                                 return prev;
714                 }
715                 break;
716         }
717         case ATOMIC_WAIT: {
718                 /* linear search: from most recent to oldest */
719                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
720                 action_list_t::reverse_iterator rit;
721                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
722                         ModelAction *prev = *rit;
723                         if (!act->same_thread(prev) && prev->is_failed_trylock())
724                                 return prev;
725                         if (!act->same_thread(prev) && prev->is_notify())
726                                 return prev;
727                 }
728                 break;
729         }
730
731         case ATOMIC_NOTIFY_ALL:
732         case ATOMIC_NOTIFY_ONE: {
733                 /* linear search: from most recent to oldest */
734                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
735                 action_list_t::reverse_iterator rit;
736                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
737                         ModelAction *prev = *rit;
738                         if (!act->same_thread(prev) && prev->is_wait())
739                                 return prev;
740                 }
741                 break;
742         }
743         default:
744                 break;
745         }
746         return NULL;
747 }
748
749 /** This method finds backtracking points where we should try to
750  * reorder the parameter ModelAction against.
751  *
752  * @param the ModelAction to find backtracking points for.
753  */
754 void ModelChecker::set_backtracking(ModelAction *act)
755 {
756         Thread *t = get_thread(act);
757         ModelAction *prev = get_last_conflict(act);
758         if (prev == NULL)
759                 return;
760
761         Node *node = prev->get_node()->get_parent();
762
763         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
764         int low_tid, high_tid;
765         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
766                 low_tid = id_to_int(act->get_tid());
767                 high_tid = low_tid + 1;
768         } else {
769                 low_tid = 0;
770                 high_tid = get_num_threads();
771         }
772
773         for (int i = low_tid; i < high_tid; i++) {
774                 thread_id_t tid = int_to_id(i);
775
776                 /* Make sure this thread can be enabled here. */
777                 if (i >= node->get_num_threads())
778                         break;
779
780                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
781                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
782                 if (node->enabled_status(tid) != THREAD_ENABLED)
783                         continue;
784
785                 /* Check if this has been explored already */
786                 if (node->has_been_explored(tid))
787                         continue;
788
789                 /* See if fairness allows */
790                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
791                         bool unfair = false;
792                         for (int t = 0; t < node->get_num_threads(); t++) {
793                                 thread_id_t tother = int_to_id(t);
794                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
795                                         unfair = true;
796                                         break;
797                                 }
798                         }
799                         if (unfair)
800                                 continue;
801                 }
802
803                 /* See if CHESS-like yield fairness allows */
804                 if (model->params.yieldon) {
805                         bool unfair = false;
806                         for (int t = 0; t < node->get_num_threads(); t++) {
807                                 thread_id_t tother = int_to_id(t);
808                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
809                                         unfair = true;
810                                         break;
811                                 }
812                         }
813                         if (unfair)
814                                 continue;
815                 }
816                 
817                 /* Cache the latest backtracking point */
818                 set_latest_backtrack(prev);
819
820                 /* If this is a new backtracking point, mark the tree */
821                 if (!node->set_backtrack(tid))
822                         continue;
823                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
824                                         id_to_int(prev->get_tid()),
825                                         id_to_int(t->get_id()));
826                 if (DBG_ENABLED()) {
827                         prev->print();
828                         act->print();
829                 }
830         }
831 }
832
833 /**
834  * @brief Cache the a backtracking point as the "most recent", if eligible
835  *
836  * Note that this does not prepare the NodeStack for this backtracking
837  * operation, it only caches the action on a per-execution basis
838  *
839  * @param act The operation at which we should explore a different next action
840  * (i.e., backtracking point)
841  * @return True, if this action is now the most recent backtracking point;
842  * false otherwise
843  */
844 bool ModelChecker::set_latest_backtrack(ModelAction *act)
845 {
846         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
847                 priv->next_backtrack = act;
848                 return true;
849         }
850         return false;
851 }
852
853 /**
854  * Returns last backtracking point. The model checker will explore a different
855  * path for this point in the next execution.
856  * @return The ModelAction at which the next execution should diverge.
857  */
858 ModelAction * ModelChecker::get_next_backtrack()
859 {
860         ModelAction *next = priv->next_backtrack;
861         priv->next_backtrack = NULL;
862         return next;
863 }
864
865 /**
866  * Processes a read model action.
867  * @param curr is the read model action to process.
868  * @return True if processing this read updates the mo_graph.
869  */
870 bool ModelChecker::process_read(ModelAction *curr)
871 {
872         Node *node = curr->get_node();
873         while (true) {
874                 bool updated = false;
875                 switch (node->get_read_from_status()) {
876                 case READ_FROM_PAST: {
877                         const ModelAction *rf = node->get_read_from_past();
878                         ASSERT(rf);
879
880                         mo_graph->startChanges();
881
882                         ASSERT(!is_infeasible());
883                         if (!check_recency(curr, rf)) {
884                                 if (node->increment_read_from()) {
885                                         mo_graph->rollbackChanges();
886                                         continue;
887                                 } else {
888                                         priv->too_many_reads = true;
889                                 }
890                         }
891
892                         updated = r_modification_order(curr, rf);
893                         read_from(curr, rf);
894                         mo_graph->commitChanges();
895                         mo_check_promises(curr, true);
896                         break;
897                 }
898                 case READ_FROM_PROMISE: {
899                         Promise *promise = curr->get_node()->get_read_from_promise();
900                         if (promise->add_reader(curr))
901                                 priv->failed_promise = true;
902                         curr->set_read_from_promise(promise);
903                         mo_graph->startChanges();
904                         if (!check_recency(curr, promise))
905                                 priv->too_many_reads = true;
906                         updated = r_modification_order(curr, promise);
907                         mo_graph->commitChanges();
908                         break;
909                 }
910                 case READ_FROM_FUTURE: {
911                         /* Read from future value */
912                         struct future_value fv = node->get_future_value();
913                         Promise *promise = new Promise(curr, fv);
914                         curr->set_read_from_promise(promise);
915                         promises->push_back(promise);
916                         mo_graph->startChanges();
917                         updated = r_modification_order(curr, promise);
918                         mo_graph->commitChanges();
919                         break;
920                 }
921                 default:
922                         ASSERT(false);
923                 }
924                 get_thread(curr)->set_return_value(curr->get_return_value());
925                 return updated;
926         }
927 }
928
929 /**
930  * Processes a lock, trylock, or unlock model action.  @param curr is
931  * the read model action to process.
932  *
933  * The try lock operation checks whether the lock is taken.  If not,
934  * it falls to the normal lock operation case.  If so, it returns
935  * fail.
936  *
937  * The lock operation has already been checked that it is enabled, so
938  * it just grabs the lock and synchronizes with the previous unlock.
939  *
940  * The unlock operation has to re-enable all of the threads that are
941  * waiting on the lock.
942  *
943  * @return True if synchronization was updated; false otherwise
944  */
945 bool ModelChecker::process_mutex(ModelAction *curr)
946 {
947         std::mutex *mutex = curr->get_mutex();
948         struct std::mutex_state *state = NULL;
949
950         if (mutex)
951                 state = mutex->get_state();
952
953         switch (curr->get_type()) {
954         case ATOMIC_TRYLOCK: {
955                 bool success = !state->locked;
956                 curr->set_try_lock(success);
957                 if (!success) {
958                         get_thread(curr)->set_return_value(0);
959                         break;
960                 }
961                 get_thread(curr)->set_return_value(1);
962         }
963                 //otherwise fall into the lock case
964         case ATOMIC_LOCK: {
965                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
966                         assert_bug("Lock access before initialization");
967                 state->locked = get_thread(curr);
968                 ModelAction *unlock = get_last_unlock(curr);
969                 //synchronize with the previous unlock statement
970                 if (unlock != NULL) {
971                         curr->synchronize_with(unlock);
972                         return true;
973                 }
974                 break;
975         }
976         case ATOMIC_WAIT:
977         case ATOMIC_UNLOCK: {
978                 /* wake up the other threads */
979                 for (unsigned int i = 0; i < get_num_threads(); i++) {
980                         Thread *t = get_thread(int_to_id(i));
981                         Thread *curr_thrd = get_thread(curr);
982                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
983                                 scheduler->wake(t);
984                 }
985
986                 /* unlock the lock - after checking who was waiting on it */
987                 state->locked = NULL;
988
989                 if (!curr->is_wait())
990                         break; /* The rest is only for ATOMIC_WAIT */
991
992                 /* Should we go to sleep? (simulate spurious failures) */
993                 if (curr->get_node()->get_misc() == 0) {
994                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
995                         /* disable us */
996                         scheduler->sleep(get_thread(curr));
997                 }
998                 break;
999         }
1000         case ATOMIC_NOTIFY_ALL: {
1001                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1002                 //activate all the waiting threads
1003                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1004                         scheduler->wake(get_thread(*rit));
1005                 }
1006                 waiters->clear();
1007                 break;
1008         }
1009         case ATOMIC_NOTIFY_ONE: {
1010                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1011                 int wakeupthread = curr->get_node()->get_misc();
1012                 action_list_t::iterator it = waiters->begin();
1013                 advance(it, wakeupthread);
1014                 scheduler->wake(get_thread(*it));
1015                 waiters->erase(it);
1016                 break;
1017         }
1018
1019         default:
1020                 ASSERT(0);
1021         }
1022         return false;
1023 }
1024
1025 /**
1026  * @brief Check if the current pending promises allow a future value to be sent
1027  *
1028  * If one of the following is true:
1029  *  (a) there are no pending promises
1030  *  (b) the reader and writer do not cross any promises
1031  * Then, it is safe to pass a future value back now.
1032  *
1033  * Otherwise, we must save the pending future value until (a) or (b) is true
1034  *
1035  * @param writer The operation which sends the future value. Must be a write.
1036  * @param reader The operation which will observe the value. Must be a read.
1037  * @return True if the future value can be sent now; false if it must wait.
1038  */
1039 bool ModelChecker::promises_may_allow(const ModelAction *writer,
1040                 const ModelAction *reader) const
1041 {
1042         if (promises->empty())
1043                 return true;
1044         for(int i=promises->size()-1;i>=0;i--) {
1045                 ModelAction *pr=(*promises)[i]->get_reader(0);
1046                 //reader is after promise...doesn't cross any promise
1047                 if (*reader > *pr)
1048                         return true;
1049                 //writer is after promise, reader before...bad...
1050                 if (*writer > *pr)
1051                         return false;
1052         }
1053         return true;
1054 }
1055
1056 /**
1057  * @brief Add a future value to a reader
1058  *
1059  * This function performs a few additional checks to ensure that the future
1060  * value can be feasibly observed by the reader
1061  *
1062  * @param writer The operation whose value is sent. Must be a write.
1063  * @param reader The read operation which may read the future value. Must be a read.
1064  */
1065 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1066 {
1067         /* Do more ambitious checks now that mo is more complete */
1068         if (!mo_may_allow(writer, reader))
1069                 return;
1070
1071         Node *node = reader->get_node();
1072
1073         /* Find an ancestor thread which exists at the time of the reader */
1074         Thread *write_thread = get_thread(writer);
1075         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1076                 write_thread = write_thread->get_parent();
1077
1078         struct future_value fv = {
1079                 writer->get_write_value(),
1080                 writer->get_seq_number() + params.maxfuturedelay,
1081                 write_thread->get_id(),
1082         };
1083         if (node->add_future_value(fv))
1084                 set_latest_backtrack(reader);
1085 }
1086
1087 /**
1088  * Process a write ModelAction
1089  * @param curr The ModelAction to process
1090  * @return True if the mo_graph was updated or promises were resolved
1091  */
1092 bool ModelChecker::process_write(ModelAction *curr)
1093 {
1094         /* Readers to which we may send our future value */
1095         ModelVector<ModelAction *> send_fv;
1096
1097         const ModelAction *earliest_promise_reader;
1098         bool updated_promises = false;
1099
1100         bool updated_mod_order = w_modification_order(curr, &send_fv);
1101         Promise *promise = pop_promise_to_resolve(curr);
1102
1103         if (promise) {
1104                 earliest_promise_reader = promise->get_reader(0);
1105                 updated_promises = resolve_promise(curr, promise);
1106         } else
1107                 earliest_promise_reader = NULL;
1108
1109         for (unsigned int i = 0; i < send_fv.size(); i++) {
1110                 ModelAction *read = send_fv[i];
1111
1112                 /* Don't send future values to reads after the Promise we resolve */
1113                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
1114                         /* Check if future value can be sent immediately */
1115                         if (promises_may_allow(curr, read)) {
1116                                 add_future_value(curr, read);
1117                         } else {
1118                                 futurevalues->push_back(PendingFutureValue(curr, read));
1119                         }
1120                 }
1121         }
1122
1123         /* Check the pending future values */
1124         for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
1125                 struct PendingFutureValue pfv = (*futurevalues)[i];
1126                 if (promises_may_allow(pfv.writer, pfv.reader)) {
1127                         add_future_value(pfv.writer, pfv.reader);
1128                         futurevalues->erase(futurevalues->begin() + i);
1129                 }
1130         }
1131
1132         mo_graph->commitChanges();
1133         mo_check_promises(curr, false);
1134
1135         get_thread(curr)->set_return_value(VALUE_NONE);
1136         return updated_mod_order || updated_promises;
1137 }
1138
1139 /**
1140  * Process a fence ModelAction
1141  * @param curr The ModelAction to process
1142  * @return True if synchronization was updated
1143  */
1144 bool ModelChecker::process_fence(ModelAction *curr)
1145 {
1146         /*
1147          * fence-relaxed: no-op
1148          * fence-release: only log the occurence (not in this function), for
1149          *   use in later synchronization
1150          * fence-acquire (this function): search for hypothetical release
1151          *   sequences
1152          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1153          */
1154         bool updated = false;
1155         if (curr->is_acquire()) {
1156                 action_list_t *list = action_trace;
1157                 action_list_t::reverse_iterator rit;
1158                 /* Find X : is_read(X) && X --sb-> curr */
1159                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1160                         ModelAction *act = *rit;
1161                         if (act == curr)
1162                                 continue;
1163                         if (act->get_tid() != curr->get_tid())
1164                                 continue;
1165                         /* Stop at the beginning of the thread */
1166                         if (act->is_thread_start())
1167                                 break;
1168                         /* Stop once we reach a prior fence-acquire */
1169                         if (act->is_fence() && act->is_acquire())
1170                                 break;
1171                         if (!act->is_read())
1172                                 continue;
1173                         /* read-acquire will find its own release sequences */
1174                         if (act->is_acquire())
1175                                 continue;
1176
1177                         /* Establish hypothetical release sequences */
1178                         rel_heads_list_t release_heads;
1179                         get_release_seq_heads(curr, act, &release_heads);
1180                         for (unsigned int i = 0; i < release_heads.size(); i++)
1181                                 if (!curr->synchronize_with(release_heads[i]))
1182                                         set_bad_synchronization();
1183                         if (release_heads.size() != 0)
1184                                 updated = true;
1185                 }
1186         }
1187         return updated;
1188 }
1189
1190 /**
1191  * @brief Process the current action for thread-related activity
1192  *
1193  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1194  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1195  * synchronization, etc.  This function is a no-op for non-THREAD actions
1196  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1197  *
1198  * @param curr The current action
1199  * @return True if synchronization was updated or a thread completed
1200  */
1201 bool ModelChecker::process_thread_action(ModelAction *curr)
1202 {
1203         bool updated = false;
1204
1205         switch (curr->get_type()) {
1206         case THREAD_CREATE: {
1207                 thrd_t *thrd = (thrd_t *)curr->get_location();
1208                 struct thread_params *params = (struct thread_params *)curr->get_value();
1209                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1210                 add_thread(th);
1211                 th->set_creation(curr);
1212                 /* Promises can be satisfied by children */
1213                 for (unsigned int i = 0; i < promises->size(); i++) {
1214                         Promise *promise = (*promises)[i];
1215                         if (promise->thread_is_available(curr->get_tid()))
1216                                 promise->add_thread(th->get_id());
1217                 }
1218                 break;
1219         }
1220         case THREAD_JOIN: {
1221                 Thread *blocking = curr->get_thread_operand();
1222                 ModelAction *act = get_last_action(blocking->get_id());
1223                 curr->synchronize_with(act);
1224                 updated = true; /* trigger rel-seq checks */
1225                 break;
1226         }
1227         case THREAD_FINISH: {
1228                 Thread *th = get_thread(curr);
1229                 /* Wake up any joining threads */
1230                 for (unsigned int i = 0; i < get_num_threads(); i++) {
1231                         Thread *waiting = get_thread(int_to_id(i));
1232                         if (waiting->waiting_on() == th &&
1233                                         waiting->get_pending()->is_thread_join())
1234                                 scheduler->wake(waiting);
1235                 }
1236                 th->complete();
1237                 /* Completed thread can't satisfy promises */
1238                 for (unsigned int i = 0; i < promises->size(); i++) {
1239                         Promise *promise = (*promises)[i];
1240                         if (promise->thread_is_available(th->get_id()))
1241                                 if (promise->eliminate_thread(th->get_id()))
1242                                         priv->failed_promise = true;
1243                 }
1244                 updated = true; /* trigger rel-seq checks */
1245                 break;
1246         }
1247         case THREAD_START: {
1248                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1249                 break;
1250         }
1251         default:
1252                 break;
1253         }
1254
1255         return updated;
1256 }
1257
1258 /**
1259  * @brief Process the current action for release sequence fixup activity
1260  *
1261  * Performs model-checker release sequence fixups for the current action,
1262  * forcing a single pending release sequence to break (with a given, potential
1263  * "loose" write) or to complete (i.e., synchronize). If a pending release
1264  * sequence forms a complete release sequence, then we must perform the fixup
1265  * synchronization, mo_graph additions, etc.
1266  *
1267  * @param curr The current action; must be a release sequence fixup action
1268  * @param work_queue The work queue to which to add work items as they are
1269  * generated
1270  */
1271 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1272 {
1273         const ModelAction *write = curr->get_node()->get_relseq_break();
1274         struct release_seq *sequence = pending_rel_seqs->back();
1275         pending_rel_seqs->pop_back();
1276         ASSERT(sequence);
1277         ModelAction *acquire = sequence->acquire;
1278         const ModelAction *rf = sequence->rf;
1279         const ModelAction *release = sequence->release;
1280         ASSERT(acquire);
1281         ASSERT(release);
1282         ASSERT(rf);
1283         ASSERT(release->same_thread(rf));
1284
1285         if (write == NULL) {
1286                 /**
1287                  * @todo Forcing a synchronization requires that we set
1288                  * modification order constraints. For instance, we can't allow
1289                  * a fixup sequence in which two separate read-acquire
1290                  * operations read from the same sequence, where the first one
1291                  * synchronizes and the other doesn't. Essentially, we can't
1292                  * allow any writes to insert themselves between 'release' and
1293                  * 'rf'
1294                  */
1295
1296                 /* Must synchronize */
1297                 if (!acquire->synchronize_with(release)) {
1298                         set_bad_synchronization();
1299                         return;
1300                 }
1301                 /* Re-check all pending release sequences */
1302                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1303                 /* Re-check act for mo_graph edges */
1304                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1305
1306                 /* propagate synchronization to later actions */
1307                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1308                 for (; (*rit) != acquire; rit++) {
1309                         ModelAction *propagate = *rit;
1310                         if (acquire->happens_before(propagate)) {
1311                                 propagate->synchronize_with(acquire);
1312                                 /* Re-check 'propagate' for mo_graph edges */
1313                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1314                         }
1315                 }
1316         } else {
1317                 /* Break release sequence with new edges:
1318                  *   release --mo--> write --mo--> rf */
1319                 mo_graph->addEdge(release, write);
1320                 mo_graph->addEdge(write, rf);
1321         }
1322
1323         /* See if we have realized a data race */
1324         checkDataRaces();
1325 }
1326
1327 /**
1328  * Initialize the current action by performing one or more of the following
1329  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1330  * in the NodeStack, manipulating backtracking sets, allocating and
1331  * initializing clock vectors, and computing the promises to fulfill.
1332  *
1333  * @param curr The current action, as passed from the user context; may be
1334  * freed/invalidated after the execution of this function, with a different
1335  * action "returned" its place (pass-by-reference)
1336  * @return True if curr is a newly-explored action; false otherwise
1337  */
1338 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1339 {
1340         ModelAction *newcurr;
1341
1342         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1343                 newcurr = process_rmw(*curr);
1344                 delete *curr;
1345
1346                 if (newcurr->is_rmw())
1347                         compute_promises(newcurr);
1348
1349                 *curr = newcurr;
1350                 return false;
1351         }
1352
1353         (*curr)->set_seq_number(get_next_seq_num());
1354
1355         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1356         if (newcurr) {
1357                 /* First restore type and order in case of RMW operation */
1358                 if ((*curr)->is_rmwr())
1359                         newcurr->copy_typeandorder(*curr);
1360
1361                 ASSERT((*curr)->get_location() == newcurr->get_location());
1362                 newcurr->copy_from_new(*curr);
1363
1364                 /* Discard duplicate ModelAction; use action from NodeStack */
1365                 delete *curr;
1366
1367                 /* Always compute new clock vector */
1368                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1369
1370                 *curr = newcurr;
1371                 return false; /* Action was explored previously */
1372         } else {
1373                 newcurr = *curr;
1374
1375                 /* Always compute new clock vector */
1376                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1377
1378                 /* Assign most recent release fence */
1379                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1380
1381                 /*
1382                  * Perform one-time actions when pushing new ModelAction onto
1383                  * NodeStack
1384                  */
1385                 if (newcurr->is_write())
1386                         compute_promises(newcurr);
1387                 else if (newcurr->is_relseq_fixup())
1388                         compute_relseq_breakwrites(newcurr);
1389                 else if (newcurr->is_wait())
1390                         newcurr->get_node()->set_misc_max(2);
1391                 else if (newcurr->is_notify_one()) {
1392                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1393                 }
1394                 return true; /* This was a new ModelAction */
1395         }
1396 }
1397
1398 /**
1399  * @brief Establish reads-from relation between two actions
1400  *
1401  * Perform basic operations involved with establishing a concrete rf relation,
1402  * including setting the ModelAction data and checking for release sequences.
1403  *
1404  * @param act The action that is reading (must be a read)
1405  * @param rf The action from which we are reading (must be a write)
1406  *
1407  * @return True if this read established synchronization
1408  */
1409 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1410 {
1411         ASSERT(rf);
1412         ASSERT(rf->is_write());
1413
1414         act->set_read_from(rf);
1415         if (act->is_acquire()) {
1416                 rel_heads_list_t release_heads;
1417                 get_release_seq_heads(act, act, &release_heads);
1418                 int num_heads = release_heads.size();
1419                 for (unsigned int i = 0; i < release_heads.size(); i++)
1420                         if (!act->synchronize_with(release_heads[i])) {
1421                                 set_bad_synchronization();
1422                                 num_heads--;
1423                         }
1424                 return num_heads > 0;
1425         }
1426         return false;
1427 }
1428
1429 /**
1430  * Check promises and eliminate potentially-satisfying threads when a thread is
1431  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1432  * no longer satisfy a promise generated from that thread.
1433  *
1434  * @param blocker The thread on which a thread is waiting
1435  * @param waiting The waiting thread
1436  */
1437 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1438 {
1439         for (unsigned int i = 0; i < promises->size(); i++) {
1440                 Promise *promise = (*promises)[i];
1441                 if (!promise->thread_is_available(waiting->get_id()))
1442                         continue;
1443                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1444                         ModelAction *reader = promise->get_reader(j);
1445                         if (reader->get_tid() != blocker->get_id())
1446                                 continue;
1447                         if (promise->eliminate_thread(waiting->get_id())) {
1448                                 /* Promise has failed */
1449                                 priv->failed_promise = true;
1450                         } else {
1451                                 /* Only eliminate the 'waiting' thread once */
1452                                 return;
1453                         }
1454                 }
1455         }
1456 }
1457
1458 /**
1459  * @brief Check whether a model action is enabled.
1460  *
1461  * Checks whether a lock or join operation would be successful (i.e., is the
1462  * lock already locked, or is the joined thread already complete). If not, put
1463  * the action in a waiter list.
1464  *
1465  * @param curr is the ModelAction to check whether it is enabled.
1466  * @return a bool that indicates whether the action is enabled.
1467  */
1468 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1469         if (curr->is_lock()) {
1470                 std::mutex *lock = curr->get_mutex();
1471                 struct std::mutex_state *state = lock->get_state();
1472                 if (state->locked)
1473                         return false;
1474         } else if (curr->is_thread_join()) {
1475                 Thread *blocking = curr->get_thread_operand();
1476                 if (!blocking->is_complete()) {
1477                         thread_blocking_check_promises(blocking, get_thread(curr));
1478                         return false;
1479                 }
1480         }
1481
1482         return true;
1483 }
1484
1485 /**
1486  * This is the heart of the model checker routine. It performs model-checking
1487  * actions corresponding to a given "current action." Among other processes, it
1488  * calculates reads-from relationships, updates synchronization clock vectors,
1489  * forms a memory_order constraints graph, and handles replay/backtrack
1490  * execution when running permutations of previously-observed executions.
1491  *
1492  * @param curr The current action to process
1493  * @return The ModelAction that is actually executed; may be different than
1494  * curr; may be NULL, if the current action is not enabled to run
1495  */
1496 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1497 {
1498         ASSERT(curr);
1499         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1500         bool newly_explored = initialize_curr_action(&curr);
1501
1502         DBG();
1503
1504         wake_up_sleeping_actions(curr);
1505
1506         /* Compute fairness information for CHESS yield algorithm */
1507         if (model->params.yieldon) {
1508                 curr->get_node()->update_yield(scheduler);
1509         }
1510
1511         /* Add the action to lists before any other model-checking tasks */
1512         if (!second_part_of_rmw)
1513                 add_action_to_lists(curr);
1514
1515         /* Build may_read_from set for newly-created actions */
1516         if (newly_explored && curr->is_read())
1517                 build_may_read_from(curr);
1518
1519         /* Initialize work_queue with the "current action" work */
1520         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1521         while (!work_queue.empty() && !has_asserted()) {
1522                 WorkQueueEntry work = work_queue.front();
1523                 work_queue.pop_front();
1524
1525                 switch (work.type) {
1526                 case WORK_CHECK_CURR_ACTION: {
1527                         ModelAction *act = work.action;
1528                         bool update = false; /* update this location's release seq's */
1529                         bool update_all = false; /* update all release seq's */
1530
1531                         if (process_thread_action(curr))
1532                                 update_all = true;
1533
1534                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1535                                 update = true;
1536
1537                         if (act->is_write() && process_write(act))
1538                                 update = true;
1539
1540                         if (act->is_fence() && process_fence(act))
1541                                 update_all = true;
1542
1543                         if (act->is_mutex_op() && process_mutex(act))
1544                                 update_all = true;
1545
1546                         if (act->is_relseq_fixup())
1547                                 process_relseq_fixup(curr, &work_queue);
1548
1549                         if (update_all)
1550                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1551                         else if (update)
1552                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1553                         break;
1554                 }
1555                 case WORK_CHECK_RELEASE_SEQ:
1556                         resolve_release_sequences(work.location, &work_queue);
1557                         break;
1558                 case WORK_CHECK_MO_EDGES: {
1559                         /** @todo Complete verification of work_queue */
1560                         ModelAction *act = work.action;
1561                         bool updated = false;
1562
1563                         if (act->is_read()) {
1564                                 const ModelAction *rf = act->get_reads_from();
1565                                 const Promise *promise = act->get_reads_from_promise();
1566                                 if (rf) {
1567                                         if (r_modification_order(act, rf))
1568                                                 updated = true;
1569                                 } else if (promise) {
1570                                         if (r_modification_order(act, promise))
1571                                                 updated = true;
1572                                 }
1573                         }
1574                         if (act->is_write()) {
1575                                 if (w_modification_order(act, NULL))
1576                                         updated = true;
1577                         }
1578                         mo_graph->commitChanges();
1579
1580                         if (updated)
1581                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1582                         break;
1583                 }
1584                 default:
1585                         ASSERT(false);
1586                         break;
1587                 }
1588         }
1589
1590         check_curr_backtracking(curr);
1591         set_backtracking(curr);
1592         return curr;
1593 }
1594
1595 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1596 {
1597         Node *currnode = curr->get_node();
1598         Node *parnode = currnode->get_parent();
1599
1600         if ((parnode && !parnode->backtrack_empty()) ||
1601                          !currnode->misc_empty() ||
1602                          !currnode->read_from_empty() ||
1603                          !currnode->promise_empty() ||
1604                          !currnode->relseq_break_empty()) {
1605                 set_latest_backtrack(curr);
1606         }
1607 }
1608
1609 bool ModelChecker::promises_expired() const
1610 {
1611         for (unsigned int i = 0; i < promises->size(); i++) {
1612                 Promise *promise = (*promises)[i];
1613                 if (promise->get_expiration() < priv->used_sequence_numbers)
1614                         return true;
1615         }
1616         return false;
1617 }
1618
1619 /**
1620  * This is the strongest feasibility check available.
1621  * @return whether the current trace (partial or complete) must be a prefix of
1622  * a feasible trace.
1623  */
1624 bool ModelChecker::isfeasibleprefix() const
1625 {
1626         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1627 }
1628
1629 /**
1630  * Print disagnostic information about an infeasible execution
1631  * @param prefix A string to prefix the output with; if NULL, then a default
1632  * message prefix will be provided
1633  */
1634 void ModelChecker::print_infeasibility(const char *prefix) const
1635 {
1636         char buf[100];
1637         char *ptr = buf;
1638         if (mo_graph->checkForCycles())
1639                 ptr += sprintf(ptr, "[mo cycle]");
1640         if (priv->failed_promise)
1641                 ptr += sprintf(ptr, "[failed promise]");
1642         if (priv->too_many_reads)
1643                 ptr += sprintf(ptr, "[too many reads]");
1644         if (priv->no_valid_reads)
1645                 ptr += sprintf(ptr, "[no valid reads-from]");
1646         if (priv->bad_synchronization)
1647                 ptr += sprintf(ptr, "[bad sw ordering]");
1648         if (promises_expired())
1649                 ptr += sprintf(ptr, "[promise expired]");
1650         if (promises->size() != 0)
1651                 ptr += sprintf(ptr, "[unresolved promise]");
1652         if (ptr != buf)
1653                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1654 }
1655
1656 /**
1657  * Returns whether the current completed trace is feasible, except for pending
1658  * release sequences.
1659  */
1660 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1661 {
1662         return !is_infeasible() && promises->size() == 0;
1663 }
1664
1665 /**
1666  * Check if the current partial trace is infeasible. Does not check any
1667  * end-of-execution flags, which might rule out the execution. Thus, this is
1668  * useful only for ruling an execution as infeasible.
1669  * @return whether the current partial trace is infeasible.
1670  */
1671 bool ModelChecker::is_infeasible() const
1672 {
1673         return mo_graph->checkForCycles() ||
1674                 priv->no_valid_reads ||
1675                 priv->failed_promise ||
1676                 priv->too_many_reads ||
1677                 priv->bad_synchronization ||
1678                 promises_expired();
1679 }
1680
1681 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1682 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1683         ModelAction *lastread = get_last_action(act->get_tid());
1684         lastread->process_rmw(act);
1685         if (act->is_rmw()) {
1686                 if (lastread->get_reads_from())
1687                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1688                 else
1689                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1690                 mo_graph->commitChanges();
1691         }
1692         return lastread;
1693 }
1694
1695 /**
1696  * A helper function for ModelChecker::check_recency, to check if the current
1697  * thread is able to read from a different write/promise for 'params.maxreads'
1698  * number of steps and if that write/promise should become visible (i.e., is
1699  * ordered later in the modification order). This helps model memory liveness.
1700  *
1701  * @param curr The current action. Must be a read.
1702  * @param rf The write/promise from which we plan to read
1703  * @param other_rf The write/promise from which we may read
1704  * @return True if we were able to read from other_rf for params.maxreads steps
1705  */
1706 template <typename T, typename U>
1707 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1708 {
1709         /* Need a different write/promise */
1710         if (other_rf->equals(rf))
1711                 return false;
1712
1713         /* Only look for "newer" writes/promises */
1714         if (!mo_graph->checkReachable(rf, other_rf))
1715                 return false;
1716
1717         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1718         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1719         action_list_t::reverse_iterator rit = list->rbegin();
1720         ASSERT((*rit) == curr);
1721         /* Skip past curr */
1722         rit++;
1723
1724         /* Does this write/promise work for everyone? */
1725         for (int i = 0; i < params.maxreads; i++, rit++) {
1726                 ModelAction *act = *rit;
1727                 if (!act->may_read_from(other_rf))
1728                         return false;
1729         }
1730         return true;
1731 }
1732
1733 /**
1734  * Checks whether a thread has read from the same write or Promise for too many
1735  * times without seeing the effects of a later write/Promise.
1736  *
1737  * Basic idea:
1738  * 1) there must a different write/promise that we could read from,
1739  * 2) we must have read from the same write/promise in excess of maxreads times,
1740  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1741  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1742  *
1743  * If so, we decide that the execution is no longer feasible.
1744  *
1745  * @param curr The current action. Must be a read.
1746  * @param rf The ModelAction/Promise from which we might read.
1747  * @return True if the read should succeed; false otherwise
1748  */
1749 template <typename T>
1750 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1751 {
1752         if (!params.maxreads)
1753                 return true;
1754
1755         //NOTE: Next check is just optimization, not really necessary....
1756         if (curr->get_node()->get_read_from_past_size() +
1757                         curr->get_node()->get_read_from_promise_size() <= 1)
1758                 return true;
1759
1760         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1761         int tid = id_to_int(curr->get_tid());
1762         ASSERT(tid < (int)thrd_lists->size());
1763         action_list_t *list = &(*thrd_lists)[tid];
1764         action_list_t::reverse_iterator rit = list->rbegin();
1765         ASSERT((*rit) == curr);
1766         /* Skip past curr */
1767         rit++;
1768
1769         action_list_t::reverse_iterator ritcopy = rit;
1770         /* See if we have enough reads from the same value */
1771         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1772                 if (ritcopy == list->rend())
1773                         return true;
1774                 ModelAction *act = *ritcopy;
1775                 if (!act->is_read())
1776                         return true;
1777                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1778                         return true;
1779                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1780                         return true;
1781                 if (act->get_node()->get_read_from_past_size() +
1782                                 act->get_node()->get_read_from_promise_size() <= 1)
1783                         return true;
1784         }
1785         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1786                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1787                 if (should_read_instead(curr, rf, write))
1788                         return false; /* liveness failure */
1789         }
1790         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1791                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1792                 if (should_read_instead(curr, rf, promise))
1793                         return false; /* liveness failure */
1794         }
1795         return true;
1796 }
1797
1798 /**
1799  * Updates the mo_graph with the constraints imposed from the current
1800  * read.
1801  *
1802  * Basic idea is the following: Go through each other thread and find
1803  * the last action that happened before our read.  Two cases:
1804  *
1805  * (1) The action is a write => that write must either occur before
1806  * the write we read from or be the write we read from.
1807  *
1808  * (2) The action is a read => the write that that action read from
1809  * must occur before the write we read from or be the same write.
1810  *
1811  * @param curr The current action. Must be a read.
1812  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1813  * @return True if modification order edges were added; false otherwise
1814  */
1815 template <typename rf_type>
1816 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1817 {
1818         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1819         unsigned int i;
1820         bool added = false;
1821         ASSERT(curr->is_read());
1822
1823         /* Last SC fence in the current thread */
1824         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1825         ModelAction *last_sc_write = NULL;
1826         if (curr->is_seqcst())
1827                 last_sc_write = get_last_seq_cst_write(curr);
1828
1829         /* Iterate over all threads */
1830         for (i = 0; i < thrd_lists->size(); i++) {
1831                 /* Last SC fence in thread i */
1832                 ModelAction *last_sc_fence_thread_local = NULL;
1833                 if (int_to_id((int)i) != curr->get_tid())
1834                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1835
1836                 /* Last SC fence in thread i, before last SC fence in current thread */
1837                 ModelAction *last_sc_fence_thread_before = NULL;
1838                 if (last_sc_fence_local)
1839                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1840
1841                 /* Iterate over actions in thread, starting from most recent */
1842                 action_list_t *list = &(*thrd_lists)[i];
1843                 action_list_t::reverse_iterator rit;
1844                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1845                         ModelAction *act = *rit;
1846
1847                         /* Skip curr */
1848                         if (act == curr)
1849                                 continue;
1850                         /* Don't want to add reflexive edges on 'rf' */
1851                         if (act->equals(rf)) {
1852                                 if (act->happens_before(curr))
1853                                         break;
1854                                 else
1855                                         continue;
1856                         }
1857
1858                         if (act->is_write()) {
1859                                 /* C++, Section 29.3 statement 5 */
1860                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1861                                                 *act < *last_sc_fence_thread_local) {
1862                                         added = mo_graph->addEdge(act, rf) || added;
1863                                         break;
1864                                 }
1865                                 /* C++, Section 29.3 statement 4 */
1866                                 else if (act->is_seqcst() && last_sc_fence_local &&
1867                                                 *act < *last_sc_fence_local) {
1868                                         added = mo_graph->addEdge(act, rf) || added;
1869                                         break;
1870                                 }
1871                                 /* C++, Section 29.3 statement 6 */
1872                                 else if (last_sc_fence_thread_before &&
1873                                                 *act < *last_sc_fence_thread_before) {
1874                                         added = mo_graph->addEdge(act, rf) || added;
1875                                         break;
1876                                 }
1877                         }
1878
1879                         /* C++, Section 29.3 statement 3 (second subpoint) */
1880                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1881                                 added = mo_graph->addEdge(act, rf) || added;
1882                                 break;
1883                         }
1884
1885                         /*
1886                          * Include at most one act per-thread that "happens
1887                          * before" curr
1888                          */
1889                         if (act->happens_before(curr)) {
1890                                 if (act->is_write()) {
1891                                         added = mo_graph->addEdge(act, rf) || added;
1892                                 } else {
1893                                         const ModelAction *prevrf = act->get_reads_from();
1894                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1895                                         if (prevrf) {
1896                                                 if (!prevrf->equals(rf))
1897                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1898                                         } else if (!prevrf_promise->equals(rf)) {
1899                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1900                                         }
1901                                 }
1902                                 break;
1903                         }
1904                 }
1905         }
1906
1907         /*
1908          * All compatible, thread-exclusive promises must be ordered after any
1909          * concrete loads from the same thread
1910          */
1911         for (unsigned int i = 0; i < promises->size(); i++)
1912                 if ((*promises)[i]->is_compatible_exclusive(curr))
1913                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1914
1915         return added;
1916 }
1917
1918 /**
1919  * Updates the mo_graph with the constraints imposed from the current write.
1920  *
1921  * Basic idea is the following: Go through each other thread and find
1922  * the lastest action that happened before our write.  Two cases:
1923  *
1924  * (1) The action is a write => that write must occur before
1925  * the current write
1926  *
1927  * (2) The action is a read => the write that that action read from
1928  * must occur before the current write.
1929  *
1930  * This method also handles two other issues:
1931  *
1932  * (I) Sequential Consistency: Making sure that if the current write is
1933  * seq_cst, that it occurs after the previous seq_cst write.
1934  *
1935  * (II) Sending the write back to non-synchronizing reads.
1936  *
1937  * @param curr The current action. Must be a write.
1938  * @param send_fv A vector for stashing reads to which we may pass our future
1939  * value. If NULL, then don't record any future values.
1940  * @return True if modification order edges were added; false otherwise
1941  */
1942 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1943 {
1944         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1945         unsigned int i;
1946         bool added = false;
1947         ASSERT(curr->is_write());
1948
1949         if (curr->is_seqcst()) {
1950                 /* We have to at least see the last sequentially consistent write,
1951                          so we are initialized. */
1952                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1953                 if (last_seq_cst != NULL) {
1954                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1955                 }
1956         }
1957
1958         /* Last SC fence in the current thread */
1959         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1960
1961         /* Iterate over all threads */
1962         for (i = 0; i < thrd_lists->size(); i++) {
1963                 /* Last SC fence in thread i, before last SC fence in current thread */
1964                 ModelAction *last_sc_fence_thread_before = NULL;
1965                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1966                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1967
1968                 /* Iterate over actions in thread, starting from most recent */
1969                 action_list_t *list = &(*thrd_lists)[i];
1970                 action_list_t::reverse_iterator rit;
1971                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1972                         ModelAction *act = *rit;
1973                         if (act == curr) {
1974                                 /*
1975                                  * 1) If RMW and it actually read from something, then we
1976                                  * already have all relevant edges, so just skip to next
1977                                  * thread.
1978                                  *
1979                                  * 2) If RMW and it didn't read from anything, we should
1980                                  * whatever edge we can get to speed up convergence.
1981                                  *
1982                                  * 3) If normal write, we need to look at earlier actions, so
1983                                  * continue processing list.
1984                                  */
1985                                 if (curr->is_rmw()) {
1986                                         if (curr->get_reads_from() != NULL)
1987                                                 break;
1988                                         else
1989                                                 continue;
1990                                 } else
1991                                         continue;
1992                         }
1993
1994                         /* C++, Section 29.3 statement 7 */
1995                         if (last_sc_fence_thread_before && act->is_write() &&
1996                                         *act < *last_sc_fence_thread_before) {
1997                                 added = mo_graph->addEdge(act, curr) || added;
1998                                 break;
1999                         }
2000
2001                         /*
2002                          * Include at most one act per-thread that "happens
2003                          * before" curr
2004                          */
2005                         if (act->happens_before(curr)) {
2006                                 /*
2007                                  * Note: if act is RMW, just add edge:
2008                                  *   act --mo--> curr
2009                                  * The following edge should be handled elsewhere:
2010                                  *   readfrom(act) --mo--> act
2011                                  */
2012                                 if (act->is_write())
2013                                         added = mo_graph->addEdge(act, curr) || added;
2014                                 else if (act->is_read()) {
2015                                         //if previous read accessed a null, just keep going
2016                                         if (act->get_reads_from() == NULL)
2017                                                 continue;
2018                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2019                                 }
2020                                 break;
2021                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2022                                                      !act->same_thread(curr)) {
2023                                 /* We have an action that:
2024                                    (1) did not happen before us
2025                                    (2) is a read and we are a write
2026                                    (3) cannot synchronize with us
2027                                    (4) is in a different thread
2028                                    =>
2029                                    that read could potentially read from our write.  Note that
2030                                    these checks are overly conservative at this point, we'll
2031                                    do more checks before actually removing the
2032                                    pendingfuturevalue.
2033
2034                                  */
2035                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2036                                         if (!is_infeasible())
2037                                                 send_fv->push_back(act);
2038                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2039                                                 add_future_value(curr, act);
2040                                 }
2041                         }
2042                 }
2043         }
2044
2045         /*
2046          * All compatible, thread-exclusive promises must be ordered after any
2047          * concrete stores to the same thread, or else they can be merged with
2048          * this store later
2049          */
2050         for (unsigned int i = 0; i < promises->size(); i++)
2051                 if ((*promises)[i]->is_compatible_exclusive(curr))
2052                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2053
2054         return added;
2055 }
2056
2057 /** Arbitrary reads from the future are not allowed.  Section 29.3
2058  * part 9 places some constraints.  This method checks one result of constraint
2059  * constraint.  Others require compiler support. */
2060 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
2061 {
2062         if (!writer->is_rmw())
2063                 return true;
2064
2065         if (!reader->is_rmw())
2066                 return true;
2067
2068         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2069                 if (search == reader)
2070                         return false;
2071                 if (search->get_tid() == reader->get_tid() &&
2072                                 search->happens_before(reader))
2073                         break;
2074         }
2075
2076         return true;
2077 }
2078
2079 /**
2080  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2081  * some constraints. This method checks one the following constraint (others
2082  * require compiler support):
2083  *
2084  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2085  */
2086 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2087 {
2088         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2089         unsigned int i;
2090         /* Iterate over all threads */
2091         for (i = 0; i < thrd_lists->size(); i++) {
2092                 const ModelAction *write_after_read = NULL;
2093
2094                 /* Iterate over actions in thread, starting from most recent */
2095                 action_list_t *list = &(*thrd_lists)[i];
2096                 action_list_t::reverse_iterator rit;
2097                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2098                         ModelAction *act = *rit;
2099
2100                         /* Don't disallow due to act == reader */
2101                         if (!reader->happens_before(act) || reader == act)
2102                                 break;
2103                         else if (act->is_write())
2104                                 write_after_read = act;
2105                         else if (act->is_read() && act->get_reads_from() != NULL)
2106                                 write_after_read = act->get_reads_from();
2107                 }
2108
2109                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2110                         return false;
2111         }
2112         return true;
2113 }
2114
2115 /**
2116  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2117  * The ModelAction under consideration is expected to be taking part in
2118  * release/acquire synchronization as an object of the "reads from" relation.
2119  * Note that this can only provide release sequence support for RMW chains
2120  * which do not read from the future, as those actions cannot be traced until
2121  * their "promise" is fulfilled. Similarly, we may not even establish the
2122  * presence of a release sequence with certainty, as some modification order
2123  * constraints may be decided further in the future. Thus, this function
2124  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2125  * and a boolean representing certainty.
2126  *
2127  * @param rf The action that might be part of a release sequence. Must be a
2128  * write.
2129  * @param release_heads A pass-by-reference style return parameter. After
2130  * execution of this function, release_heads will contain the heads of all the
2131  * relevant release sequences, if any exists with certainty
2132  * @param pending A pass-by-reference style return parameter which is only used
2133  * when returning false (i.e., uncertain). Returns most information regarding
2134  * an uncertain release sequence, including any write operations that might
2135  * break the sequence.
2136  * @return true, if the ModelChecker is certain that release_heads is complete;
2137  * false otherwise
2138  */
2139 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2140                 rel_heads_list_t *release_heads,
2141                 struct release_seq *pending) const
2142 {
2143         /* Only check for release sequences if there are no cycles */
2144         if (mo_graph->checkForCycles())
2145                 return false;
2146
2147         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2148                 ASSERT(rf->is_write());
2149
2150                 if (rf->is_release())
2151                         release_heads->push_back(rf);
2152                 else if (rf->get_last_fence_release())
2153                         release_heads->push_back(rf->get_last_fence_release());
2154                 if (!rf->is_rmw())
2155                         break; /* End of RMW chain */
2156
2157                 /** @todo Need to be smarter here...  In the linux lock
2158                  * example, this will run to the beginning of the program for
2159                  * every acquire. */
2160                 /** @todo The way to be smarter here is to keep going until 1
2161                  * thread has a release preceded by an acquire and you've seen
2162                  *       both. */
2163
2164                 /* acq_rel RMW is a sufficient stopping condition */
2165                 if (rf->is_acquire() && rf->is_release())
2166                         return true; /* complete */
2167         };
2168         if (!rf) {
2169                 /* read from future: need to settle this later */
2170                 pending->rf = NULL;
2171                 return false; /* incomplete */
2172         }
2173
2174         if (rf->is_release())
2175                 return true; /* complete */
2176
2177         /* else relaxed write
2178          * - check for fence-release in the same thread (29.8, stmt. 3)
2179          * - check modification order for contiguous subsequence
2180          *   -> rf must be same thread as release */
2181
2182         const ModelAction *fence_release = rf->get_last_fence_release();
2183         /* Synchronize with a fence-release unconditionally; we don't need to
2184          * find any more "contiguous subsequence..." for it */
2185         if (fence_release)
2186                 release_heads->push_back(fence_release);
2187
2188         int tid = id_to_int(rf->get_tid());
2189         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2190         action_list_t *list = &(*thrd_lists)[tid];
2191         action_list_t::const_reverse_iterator rit;
2192
2193         /* Find rf in the thread list */
2194         rit = std::find(list->rbegin(), list->rend(), rf);
2195         ASSERT(rit != list->rend());
2196
2197         /* Find the last {write,fence}-release */
2198         for (; rit != list->rend(); rit++) {
2199                 if (fence_release && *(*rit) < *fence_release)
2200                         break;
2201                 if ((*rit)->is_release())
2202                         break;
2203         }
2204         if (rit == list->rend()) {
2205                 /* No write-release in this thread */
2206                 return true; /* complete */
2207         } else if (fence_release && *(*rit) < *fence_release) {
2208                 /* The fence-release is more recent (and so, "stronger") than
2209                  * the most recent write-release */
2210                 return true; /* complete */
2211         } /* else, need to establish contiguous release sequence */
2212         ModelAction *release = *rit;
2213
2214         ASSERT(rf->same_thread(release));
2215
2216         pending->writes.clear();
2217
2218         bool certain = true;
2219         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2220                 if (id_to_int(rf->get_tid()) == (int)i)
2221                         continue;
2222                 list = &(*thrd_lists)[i];
2223
2224                 /* Can we ensure no future writes from this thread may break
2225                  * the release seq? */
2226                 bool future_ordered = false;
2227
2228                 ModelAction *last = get_last_action(int_to_id(i));
2229                 Thread *th = get_thread(int_to_id(i));
2230                 if ((last && rf->happens_before(last)) ||
2231                                 !is_enabled(th) ||
2232                                 th->is_complete())
2233                         future_ordered = true;
2234
2235                 ASSERT(!th->is_model_thread() || future_ordered);
2236
2237                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2238                         const ModelAction *act = *rit;
2239                         /* Reach synchronization -> this thread is complete */
2240                         if (act->happens_before(release))
2241                                 break;
2242                         if (rf->happens_before(act)) {
2243                                 future_ordered = true;
2244                                 continue;
2245                         }
2246
2247                         /* Only non-RMW writes can break release sequences */
2248                         if (!act->is_write() || act->is_rmw())
2249                                 continue;
2250
2251                         /* Check modification order */
2252                         if (mo_graph->checkReachable(rf, act)) {
2253                                 /* rf --mo--> act */
2254                                 future_ordered = true;
2255                                 continue;
2256                         }
2257                         if (mo_graph->checkReachable(act, release))
2258                                 /* act --mo--> release */
2259                                 break;
2260                         if (mo_graph->checkReachable(release, act) &&
2261                                       mo_graph->checkReachable(act, rf)) {
2262                                 /* release --mo-> act --mo--> rf */
2263                                 return true; /* complete */
2264                         }
2265                         /* act may break release sequence */
2266                         pending->writes.push_back(act);
2267                         certain = false;
2268                 }
2269                 if (!future_ordered)
2270                         certain = false; /* This thread is uncertain */
2271         }
2272
2273         if (certain) {
2274                 release_heads->push_back(release);
2275                 pending->writes.clear();
2276         } else {
2277                 pending->release = release;
2278                 pending->rf = rf;
2279         }
2280         return certain;
2281 }
2282
2283 /**
2284  * An interface for getting the release sequence head(s) with which a
2285  * given ModelAction must synchronize. This function only returns a non-empty
2286  * result when it can locate a release sequence head with certainty. Otherwise,
2287  * it may mark the internal state of the ModelChecker so that it will handle
2288  * the release sequence at a later time, causing @a acquire to update its
2289  * synchronization at some later point in execution.
2290  *
2291  * @param acquire The 'acquire' action that may synchronize with a release
2292  * sequence
2293  * @param read The read action that may read from a release sequence; this may
2294  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2295  * when 'acquire' is a fence-acquire)
2296  * @param release_heads A pass-by-reference return parameter. Will be filled
2297  * with the head(s) of the release sequence(s), if they exists with certainty.
2298  * @see ModelChecker::release_seq_heads
2299  */
2300 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2301                 ModelAction *read, rel_heads_list_t *release_heads)
2302 {
2303         const ModelAction *rf = read->get_reads_from();
2304         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2305         sequence->acquire = acquire;
2306         sequence->read = read;
2307
2308         if (!release_seq_heads(rf, release_heads, sequence)) {
2309                 /* add act to 'lazy checking' list */
2310                 pending_rel_seqs->push_back(sequence);
2311         } else {
2312                 snapshot_free(sequence);
2313         }
2314 }
2315
2316 /**
2317  * Attempt to resolve all stashed operations that might synchronize with a
2318  * release sequence for a given location. This implements the "lazy" portion of
2319  * determining whether or not a release sequence was contiguous, since not all
2320  * modification order information is present at the time an action occurs.
2321  *
2322  * @param location The location/object that should be checked for release
2323  * sequence resolutions. A NULL value means to check all locations.
2324  * @param work_queue The work queue to which to add work items as they are
2325  * generated
2326  * @return True if any updates occurred (new synchronization, new mo_graph
2327  * edges)
2328  */
2329 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2330 {
2331         bool updated = false;
2332         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2333         while (it != pending_rel_seqs->end()) {
2334                 struct release_seq *pending = *it;
2335                 ModelAction *acquire = pending->acquire;
2336                 const ModelAction *read = pending->read;
2337
2338                 /* Only resolve sequences on the given location, if provided */
2339                 if (location && read->get_location() != location) {
2340                         it++;
2341                         continue;
2342                 }
2343
2344                 const ModelAction *rf = read->get_reads_from();
2345                 rel_heads_list_t release_heads;
2346                 bool complete;
2347                 complete = release_seq_heads(rf, &release_heads, pending);
2348                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2349                         if (!acquire->has_synchronized_with(release_heads[i])) {
2350                                 if (acquire->synchronize_with(release_heads[i]))
2351                                         updated = true;
2352                                 else
2353                                         set_bad_synchronization();
2354                         }
2355                 }
2356
2357                 if (updated) {
2358                         /* Re-check all pending release sequences */
2359                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2360                         /* Re-check read-acquire for mo_graph edges */
2361                         if (acquire->is_read())
2362                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2363
2364                         /* propagate synchronization to later actions */
2365                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2366                         for (; (*rit) != acquire; rit++) {
2367                                 ModelAction *propagate = *rit;
2368                                 if (acquire->happens_before(propagate)) {
2369                                         propagate->synchronize_with(acquire);
2370                                         /* Re-check 'propagate' for mo_graph edges */
2371                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2372                                 }
2373                         }
2374                 }
2375                 if (complete) {
2376                         it = pending_rel_seqs->erase(it);
2377                         snapshot_free(pending);
2378                 } else {
2379                         it++;
2380                 }
2381         }
2382
2383         // If we resolved promises or data races, see if we have realized a data race.
2384         checkDataRaces();
2385
2386         return updated;
2387 }
2388
2389 /**
2390  * Performs various bookkeeping operations for the current ModelAction. For
2391  * instance, adds action to the per-object, per-thread action vector and to the
2392  * action trace list of all thread actions.
2393  *
2394  * @param act is the ModelAction to add.
2395  */
2396 void ModelChecker::add_action_to_lists(ModelAction *act)
2397 {
2398         int tid = id_to_int(act->get_tid());
2399         ModelAction *uninit = NULL;
2400         int uninit_id = -1;
2401         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2402         if (list->empty() && act->is_atomic_var()) {
2403                 uninit = get_uninitialized_action(act);
2404                 uninit_id = id_to_int(uninit->get_tid());
2405                 list->push_front(uninit);
2406         }
2407         list->push_back(act);
2408
2409         action_trace->push_back(act);
2410         if (uninit)
2411                 action_trace->push_front(uninit);
2412
2413         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2414         if (tid >= (int)vec->size())
2415                 vec->resize(priv->next_thread_id);
2416         (*vec)[tid].push_back(act);
2417         if (uninit)
2418                 (*vec)[uninit_id].push_front(uninit);
2419
2420         if ((int)thrd_last_action->size() <= tid)
2421                 thrd_last_action->resize(get_num_threads());
2422         (*thrd_last_action)[tid] = act;
2423         if (uninit)
2424                 (*thrd_last_action)[uninit_id] = uninit;
2425
2426         if (act->is_fence() && act->is_release()) {
2427                 if ((int)thrd_last_fence_release->size() <= tid)
2428                         thrd_last_fence_release->resize(get_num_threads());
2429                 (*thrd_last_fence_release)[tid] = act;
2430         }
2431
2432         if (act->is_wait()) {
2433                 void *mutex_loc = (void *) act->get_value();
2434                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2435
2436                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2437                 if (tid >= (int)vec->size())
2438                         vec->resize(priv->next_thread_id);
2439                 (*vec)[tid].push_back(act);
2440         }
2441 }
2442
2443 /**
2444  * @brief Get the last action performed by a particular Thread
2445  * @param tid The thread ID of the Thread in question
2446  * @return The last action in the thread
2447  */
2448 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2449 {
2450         int threadid = id_to_int(tid);
2451         if (threadid < (int)thrd_last_action->size())
2452                 return (*thrd_last_action)[id_to_int(tid)];
2453         else
2454                 return NULL;
2455 }
2456
2457 /**
2458  * @brief Get the last fence release performed by a particular Thread
2459  * @param tid The thread ID of the Thread in question
2460  * @return The last fence release in the thread, if one exists; NULL otherwise
2461  */
2462 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2463 {
2464         int threadid = id_to_int(tid);
2465         if (threadid < (int)thrd_last_fence_release->size())
2466                 return (*thrd_last_fence_release)[id_to_int(tid)];
2467         else
2468                 return NULL;
2469 }
2470
2471 /**
2472  * Gets the last memory_order_seq_cst write (in the total global sequence)
2473  * performed on a particular object (i.e., memory location), not including the
2474  * current action.
2475  * @param curr The current ModelAction; also denotes the object location to
2476  * check
2477  * @return The last seq_cst write
2478  */
2479 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2480 {
2481         void *location = curr->get_location();
2482         action_list_t *list = get_safe_ptr_action(obj_map, location);
2483         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2484         action_list_t::reverse_iterator rit;
2485         for (rit = list->rbegin(); (*rit) != curr; rit++)
2486                 ;
2487         rit++; /* Skip past curr */
2488         for ( ; rit != list->rend(); rit++)
2489                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2490                         return *rit;
2491         return NULL;
2492 }
2493
2494 /**
2495  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2496  * performed in a particular thread, prior to a particular fence.
2497  * @param tid The ID of the thread to check
2498  * @param before_fence The fence from which to begin the search; if NULL, then
2499  * search for the most recent fence in the thread.
2500  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2501  */
2502 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2503 {
2504         /* All fences should have NULL location */
2505         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2506         action_list_t::reverse_iterator rit = list->rbegin();
2507
2508         if (before_fence) {
2509                 for (; rit != list->rend(); rit++)
2510                         if (*rit == before_fence)
2511                                 break;
2512
2513                 ASSERT(*rit == before_fence);
2514                 rit++;
2515         }
2516
2517         for (; rit != list->rend(); rit++)
2518                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2519                         return *rit;
2520         return NULL;
2521 }
2522
2523 /**
2524  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2525  * location). This function identifies the mutex according to the current
2526  * action, which is presumed to perform on the same mutex.
2527  * @param curr The current ModelAction; also denotes the object location to
2528  * check
2529  * @return The last unlock operation
2530  */
2531 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2532 {
2533         void *location = curr->get_location();
2534         action_list_t *list = get_safe_ptr_action(obj_map, location);
2535         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2536         action_list_t::reverse_iterator rit;
2537         for (rit = list->rbegin(); rit != list->rend(); rit++)
2538                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2539                         return *rit;
2540         return NULL;
2541 }
2542
2543 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2544 {
2545         ModelAction *parent = get_last_action(tid);
2546         if (!parent)
2547                 parent = get_thread(tid)->get_creation();
2548         return parent;
2549 }
2550
2551 /**
2552  * Returns the clock vector for a given thread.
2553  * @param tid The thread whose clock vector we want
2554  * @return Desired clock vector
2555  */
2556 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2557 {
2558         return get_parent_action(tid)->get_cv();
2559 }
2560
2561 /**
2562  * @brief Find the promise (if any) to resolve for the current action and
2563  * remove it from the pending promise vector
2564  * @param curr The current ModelAction. Should be a write.
2565  * @return The Promise to resolve, if any; otherwise NULL
2566  */
2567 Promise * ModelChecker::pop_promise_to_resolve(const ModelAction *curr)
2568 {
2569         for (unsigned int i = 0; i < promises->size(); i++)
2570                 if (curr->get_node()->get_promise(i)) {
2571                         Promise *ret = (*promises)[i];
2572                         promises->erase(promises->begin() + i);
2573                         return ret;
2574                 }
2575         return NULL;
2576 }
2577
2578 /**
2579  * Resolve a Promise with a current write.
2580  * @param write The ModelAction that is fulfilling Promises
2581  * @param promise The Promise to resolve
2582  * @return True if the Promise was successfully resolved; false otherwise
2583  */
2584 bool ModelChecker::resolve_promise(ModelAction *write, Promise *promise)
2585 {
2586         ModelVector<ModelAction *> actions_to_check;
2587
2588         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2589                 ModelAction *read = promise->get_reader(i);
2590                 read_from(read, write);
2591                 actions_to_check.push_back(read);
2592         }
2593         /* Make sure the promise's value matches the write's value */
2594         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2595         if (!mo_graph->resolvePromise(promise, write))
2596                 priv->failed_promise = true;
2597
2598         /**
2599          * @todo  It is possible to end up in an inconsistent state, where a
2600          * "resolved" promise may still be referenced if
2601          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2602          *
2603          * Note that the inconsistency only matters when dumping mo_graph to
2604          * file.
2605          *
2606          * delete promise;
2607          */
2608
2609         //Check whether reading these writes has made threads unable to
2610         //resolve promises
2611         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2612                 ModelAction *read = actions_to_check[i];
2613                 mo_check_promises(read, true);
2614         }
2615
2616         return true;
2617 }
2618
2619 /**
2620  * Compute the set of promises that could potentially be satisfied by this
2621  * action. Note that the set computation actually appears in the Node, not in
2622  * ModelChecker.
2623  * @param curr The ModelAction that may satisfy promises
2624  */
2625 void ModelChecker::compute_promises(ModelAction *curr)
2626 {
2627         for (unsigned int i = 0; i < promises->size(); i++) {
2628                 Promise *promise = (*promises)[i];
2629                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2630                         continue;
2631
2632                 bool satisfy = true;
2633                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2634                         const ModelAction *act = promise->get_reader(j);
2635                         if (act->happens_before(curr) ||
2636                                         act->could_synchronize_with(curr)) {
2637                                 satisfy = false;
2638                                 break;
2639                         }
2640                 }
2641                 if (satisfy)
2642                         curr->get_node()->set_promise(i);
2643         }
2644 }
2645
2646 /** Checks promises in response to change in ClockVector Threads. */
2647 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2648 {
2649         for (unsigned int i = 0; i < promises->size(); i++) {
2650                 Promise *promise = (*promises)[i];
2651                 if (!promise->thread_is_available(tid))
2652                         continue;
2653                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2654                         const ModelAction *act = promise->get_reader(j);
2655                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2656                                         merge_cv->synchronized_since(act)) {
2657                                 if (promise->eliminate_thread(tid)) {
2658                                         /* Promise has failed */
2659                                         priv->failed_promise = true;
2660                                         return;
2661                                 }
2662                         }
2663                 }
2664         }
2665 }
2666
2667 void ModelChecker::check_promises_thread_disabled()
2668 {
2669         for (unsigned int i = 0; i < promises->size(); i++) {
2670                 Promise *promise = (*promises)[i];
2671                 if (promise->has_failed()) {
2672                         priv->failed_promise = true;
2673                         return;
2674                 }
2675         }
2676 }
2677
2678 /**
2679  * @brief Checks promises in response to addition to modification order for
2680  * threads.
2681  *
2682  * We test whether threads are still available for satisfying promises after an
2683  * addition to our modification order constraints. Those that are unavailable
2684  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2685  * that promise has failed.
2686  *
2687  * @param act The ModelAction which updated the modification order
2688  * @param is_read_check Should be true if act is a read and we must check for
2689  * updates to the store from which it read (there is a distinction here for
2690  * RMW's, which are both a load and a store)
2691  */
2692 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2693 {
2694         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2695
2696         for (unsigned int i = 0; i < promises->size(); i++) {
2697                 Promise *promise = (*promises)[i];
2698
2699                 // Is this promise on the same location?
2700                 if (!promise->same_location(write))
2701                         continue;
2702
2703                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2704                         const ModelAction *pread = promise->get_reader(j);
2705                         if (!pread->happens_before(act))
2706                                continue;
2707                         if (mo_graph->checkPromise(write, promise)) {
2708                                 priv->failed_promise = true;
2709                                 return;
2710                         }
2711                         break;
2712                 }
2713
2714                 // Don't do any lookups twice for the same thread
2715                 if (!promise->thread_is_available(act->get_tid()))
2716                         continue;
2717
2718                 if (mo_graph->checkReachable(promise, write)) {
2719                         if (mo_graph->checkPromise(write, promise)) {
2720                                 priv->failed_promise = true;
2721                                 return;
2722                         }
2723                 }
2724         }
2725 }
2726
2727 /**
2728  * Compute the set of writes that may break the current pending release
2729  * sequence. This information is extracted from previou release sequence
2730  * calculations.
2731  *
2732  * @param curr The current ModelAction. Must be a release sequence fixup
2733  * action.
2734  */
2735 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2736 {
2737         if (pending_rel_seqs->empty())
2738                 return;
2739
2740         struct release_seq *pending = pending_rel_seqs->back();
2741         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2742                 const ModelAction *write = pending->writes[i];
2743                 curr->get_node()->add_relseq_break(write);
2744         }
2745
2746         /* NULL means don't break the sequence; just synchronize */
2747         curr->get_node()->add_relseq_break(NULL);
2748 }
2749
2750 /**
2751  * Build up an initial set of all past writes that this 'read' action may read
2752  * from, as well as any previously-observed future values that must still be valid.
2753  *
2754  * @param curr is the current ModelAction that we are exploring; it must be a
2755  * 'read' operation.
2756  */
2757 void ModelChecker::build_may_read_from(ModelAction *curr)
2758 {
2759         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2760         unsigned int i;
2761         ASSERT(curr->is_read());
2762
2763         ModelAction *last_sc_write = NULL;
2764
2765         if (curr->is_seqcst())
2766                 last_sc_write = get_last_seq_cst_write(curr);
2767
2768         /* Iterate over all threads */
2769         for (i = 0; i < thrd_lists->size(); i++) {
2770                 /* Iterate over actions in thread, starting from most recent */
2771                 action_list_t *list = &(*thrd_lists)[i];
2772                 action_list_t::reverse_iterator rit;
2773                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2774                         ModelAction *act = *rit;
2775
2776                         /* Only consider 'write' actions */
2777                         if (!act->is_write() || act == curr)
2778                                 continue;
2779
2780                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2781                         bool allow_read = true;
2782
2783                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2784                                 allow_read = false;
2785                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2786                                 allow_read = false;
2787
2788                         if (allow_read) {
2789                                 /* Only add feasible reads */
2790                                 mo_graph->startChanges();
2791                                 r_modification_order(curr, act);
2792                                 if (!is_infeasible())
2793                                         curr->get_node()->add_read_from_past(act);
2794                                 mo_graph->rollbackChanges();
2795                         }
2796
2797                         /* Include at most one act per-thread that "happens before" curr */
2798                         if (act->happens_before(curr))
2799                                 break;
2800                 }
2801         }
2802
2803         /* Inherit existing, promised future values */
2804         for (i = 0; i < promises->size(); i++) {
2805                 const Promise *promise = (*promises)[i];
2806                 const ModelAction *promise_read = promise->get_reader(0);
2807                 if (promise_read->same_var(curr)) {
2808                         /* Only add feasible future-values */
2809                         mo_graph->startChanges();
2810                         r_modification_order(curr, promise);
2811                         if (!is_infeasible())
2812                                 curr->get_node()->add_read_from_promise(promise_read);
2813                         mo_graph->rollbackChanges();
2814                 }
2815         }
2816
2817         /* We may find no valid may-read-from only if the execution is doomed */
2818         if (!curr->get_node()->read_from_size()) {
2819                 priv->no_valid_reads = true;
2820                 set_assert();
2821         }
2822
2823         if (DBG_ENABLED()) {
2824                 model_print("Reached read action:\n");
2825                 curr->print();
2826                 model_print("Printing read_from_past\n");
2827                 curr->get_node()->print_read_from_past();
2828                 model_print("End printing read_from_past\n");
2829         }
2830 }
2831
2832 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2833 {
2834         for ( ; write != NULL; write = write->get_reads_from()) {
2835                 /* UNINIT actions don't have a Node, and they never sleep */
2836                 if (write->is_uninitialized())
2837                         return true;
2838                 Node *prevnode = write->get_node()->get_parent();
2839
2840                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2841                 if (write->is_release() && thread_sleep)
2842                         return true;
2843                 if (!write->is_rmw())
2844                         return false;
2845         }
2846         return true;
2847 }
2848
2849 /**
2850  * @brief Get an action representing an uninitialized atomic
2851  *
2852  * This function may create a new one or try to retrieve one from the NodeStack
2853  *
2854  * @param curr The current action, which prompts the creation of an UNINIT action
2855  * @return A pointer to the UNINIT ModelAction
2856  */
2857 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2858 {
2859         Node *node = curr->get_node();
2860         ModelAction *act = node->get_uninit_action();
2861         if (!act) {
2862                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2863                 node->set_uninit_action(act);
2864         }
2865         act->create_cv(NULL);
2866         return act;
2867 }
2868
2869 static void print_list(action_list_t *list)
2870 {
2871         action_list_t::iterator it;
2872
2873         model_print("---------------------------------------------------------------------\n");
2874
2875         unsigned int hash = 0;
2876
2877         for (it = list->begin(); it != list->end(); it++) {
2878                 const ModelAction *act = *it;
2879                 if (act->get_seq_number() > 0)
2880                         act->print();
2881                 hash = hash^(hash<<3)^((*it)->hash());
2882         }
2883         model_print("HASH %u\n", hash);
2884         model_print("---------------------------------------------------------------------\n");
2885 }
2886
2887 #if SUPPORT_MOD_ORDER_DUMP
2888 void ModelChecker::dumpGraph(char *filename) const
2889 {
2890         char buffer[200];
2891         sprintf(buffer, "%s.dot", filename);
2892         FILE *file = fopen(buffer, "w");
2893         fprintf(file, "digraph %s {\n", filename);
2894         mo_graph->dumpNodes(file);
2895         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2896
2897         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2898                 ModelAction *act = *it;
2899                 if (act->is_read()) {
2900                         mo_graph->dot_print_node(file, act);
2901                         if (act->get_reads_from())
2902                                 mo_graph->dot_print_edge(file,
2903                                                 act->get_reads_from(),
2904                                                 act,
2905                                                 "label=\"rf\", color=red, weight=2");
2906                         else
2907                                 mo_graph->dot_print_edge(file,
2908                                                 act->get_reads_from_promise(),
2909                                                 act,
2910                                                 "label=\"rf\", color=red");
2911                 }
2912                 if (thread_array[act->get_tid()]) {
2913                         mo_graph->dot_print_edge(file,
2914                                         thread_array[id_to_int(act->get_tid())],
2915                                         act,
2916                                         "label=\"sb\", color=blue, weight=400");
2917                 }
2918
2919                 thread_array[act->get_tid()] = act;
2920         }
2921         fprintf(file, "}\n");
2922         model_free(thread_array);
2923         fclose(file);
2924 }
2925 #endif
2926
2927 /** @brief Prints an execution trace summary. */
2928 void ModelChecker::print_summary() const
2929 {
2930 #if SUPPORT_MOD_ORDER_DUMP
2931         char buffername[100];
2932         sprintf(buffername, "exec%04u", stats.num_total);
2933         mo_graph->dumpGraphToFile(buffername);
2934         sprintf(buffername, "graph%04u", stats.num_total);
2935         dumpGraph(buffername);
2936 #endif
2937
2938         model_print("Execution %d:", stats.num_total);
2939         if (isfeasibleprefix()) {
2940                 if (scheduler->all_threads_sleeping())
2941                         model_print(" SLEEP-SET REDUNDANT");
2942                 model_print("\n");
2943         } else
2944                 print_infeasibility(" INFEASIBLE");
2945         print_list(action_trace);
2946         model_print("\n");
2947         if (!promises->empty()) {
2948                 model_print("Pending promises:\n");
2949                 for (unsigned int i = 0; i < promises->size(); i++) {
2950                         model_print(" [P%u] ", i);
2951                         (*promises)[i]->print();
2952                 }
2953                 model_print("\n");
2954         }
2955 }
2956
2957 /**
2958  * Add a Thread to the system for the first time. Should only be called once
2959  * per thread.
2960  * @param t The Thread to add
2961  */
2962 void ModelChecker::add_thread(Thread *t)
2963 {
2964         thread_map->put(id_to_int(t->get_id()), t);
2965         scheduler->add_thread(t);
2966 }
2967
2968 /**
2969  * @brief Get a Thread reference by its ID
2970  * @param tid The Thread's ID
2971  * @return A Thread reference
2972  */
2973 Thread * ModelChecker::get_thread(thread_id_t tid) const
2974 {
2975         return thread_map->get(id_to_int(tid));
2976 }
2977
2978 /**
2979  * @brief Get a reference to the Thread in which a ModelAction was executed
2980  * @param act The ModelAction
2981  * @return A Thread reference
2982  */
2983 Thread * ModelChecker::get_thread(const ModelAction *act) const
2984 {
2985         return get_thread(act->get_tid());
2986 }
2987
2988 /**
2989  * @brief Get a Promise's "promise number"
2990  *
2991  * A "promise number" is an index number that is unique to a promise, valid
2992  * only for a specific snapshot of an execution trace. Promises may come and go
2993  * as they are generated an resolved, so an index only retains meaning for the
2994  * current snapshot.
2995  *
2996  * @param promise The Promise to check
2997  * @return The promise index, if the promise still is valid; otherwise -1
2998  */
2999 int ModelChecker::get_promise_number(const Promise *promise) const
3000 {
3001         for (unsigned int i = 0; i < promises->size(); i++)
3002                 if ((*promises)[i] == promise)
3003                         return i;
3004         /* Not found */
3005         return -1;
3006 }
3007
3008 /**
3009  * @brief Check if a Thread is currently enabled
3010  * @param t The Thread to check
3011  * @return True if the Thread is currently enabled
3012  */
3013 bool ModelChecker::is_enabled(Thread *t) const
3014 {
3015         return scheduler->is_enabled(t);
3016 }
3017
3018 /**
3019  * @brief Check if a Thread is currently enabled
3020  * @param tid The ID of the Thread to check
3021  * @return True if the Thread is currently enabled
3022  */
3023 bool ModelChecker::is_enabled(thread_id_t tid) const
3024 {
3025         return scheduler->is_enabled(tid);
3026 }
3027
3028 /**
3029  * Switch from a model-checker context to a user-thread context. This is the
3030  * complement of ModelChecker::switch_to_master and must be called from the
3031  * model-checker context
3032  *
3033  * @param thread The user-thread to switch to
3034  */
3035 void ModelChecker::switch_from_master(Thread *thread)
3036 {
3037         scheduler->set_current_thread(thread);
3038         Thread::swap(&system_context, thread);
3039 }
3040
3041 /**
3042  * Switch from a user-context to the "master thread" context (a.k.a. system
3043  * context). This switch is made with the intention of exploring a particular
3044  * model-checking action (described by a ModelAction object). Must be called
3045  * from a user-thread context.
3046  *
3047  * @param act The current action that will be explored. May be NULL only if
3048  * trace is exiting via an assertion (see ModelChecker::set_assert and
3049  * ModelChecker::has_asserted).
3050  * @return Return the value returned by the current action
3051  */
3052 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3053 {
3054         DBG();
3055         Thread *old = thread_current();
3056         scheduler->set_current_thread(NULL);
3057         ASSERT(!old->get_pending());
3058         old->set_pending(act);
3059         if (Thread::swap(old, &system_context) < 0) {
3060                 perror("swap threads");
3061                 exit(EXIT_FAILURE);
3062         }
3063         return old->get_return_value();
3064 }
3065
3066 /**
3067  * Takes the next step in the execution, if possible.
3068  * @param curr The current step to take
3069  * @return Returns the next Thread to run, if any; NULL if this execution
3070  * should terminate
3071  */
3072 Thread * ModelChecker::take_step(ModelAction *curr)
3073 {
3074         Thread *curr_thrd = get_thread(curr);
3075         ASSERT(curr_thrd->get_state() == THREAD_READY);
3076
3077         ASSERT(check_action_enabled(curr)); /* May have side effects? */
3078         curr = check_current_action(curr);
3079         ASSERT(curr);
3080
3081         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3082                 scheduler->remove_thread(curr_thrd);
3083
3084         return action_select_next_thread(curr);
3085 }
3086
3087 /** Wrapper to run the user's main function, with appropriate arguments */
3088 void user_main_wrapper(void *)
3089 {
3090         user_main(model->params.argc, model->params.argv);
3091 }
3092
3093 bool ModelChecker::should_terminate_execution()
3094 {
3095         /* Infeasible -> don't take any more steps */
3096         if (is_infeasible())
3097                 return true;
3098         else if (isfeasibleprefix() && have_bug_reports()) {
3099                 set_assert();
3100                 return true;
3101         }
3102
3103         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3104                 return true;
3105         return false;
3106 }
3107
3108 /** @brief Run ModelChecker for the user program */
3109 void ModelChecker::run()
3110 {
3111         do {
3112                 thrd_t user_thread;
3113                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3114                 add_thread(t);
3115
3116                 do {
3117                         /*
3118                          * Stash next pending action(s) for thread(s). There
3119                          * should only need to stash one thread's action--the
3120                          * thread which just took a step--plus the first step
3121                          * for any newly-created thread
3122                          */
3123                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3124                                 thread_id_t tid = int_to_id(i);
3125                                 Thread *thr = get_thread(tid);
3126                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3127                                         switch_from_master(thr);
3128                                         if (thr->is_waiting_on(thr))
3129                                                 assert_bug("Deadlock detected (thread %u)", i);
3130                                 }
3131                         }
3132
3133                         /* Don't schedule threads which should be disabled */
3134                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3135                                 Thread *th = get_thread(int_to_id(i));
3136                                 ModelAction *act = th->get_pending();
3137                                 if (act && is_enabled(th) && !check_action_enabled(act)) {
3138                                         scheduler->sleep(th);
3139                                 }
3140                         }
3141
3142                         /* Catch assertions from prior take_step or from
3143                          * between-ModelAction bugs (e.g., data races) */
3144                         if (has_asserted())
3145                                 break;
3146
3147                         if (!t)
3148                                 t = get_next_thread();
3149                         if (!t || t->is_model_thread())
3150                                 break;
3151
3152                         /* Consume the next action for a Thread */
3153                         ModelAction *curr = t->get_pending();
3154                         t->set_pending(NULL);
3155                         t = take_step(curr);
3156                 } while (!should_terminate_execution());
3157
3158                 /*
3159                  * Launch end-of-execution release sequence fixups only when
3160                  * the execution is otherwise feasible AND there are:
3161                  *
3162                  * (1) pending release sequences
3163                  * (2) pending assertions that could be invalidated by a change
3164                  * in clock vectors (i.e., data races)
3165                  * (3) no pending promises
3166                  */
3167                 while (!pending_rel_seqs->empty() &&
3168                                 is_feasible_prefix_ignore_relseq() &&
3169                                 !unrealizedraces.empty()) {
3170                         model_print("*** WARNING: release sequence fixup action "
3171                                         "(%zu pending release seuqence(s)) ***\n",
3172                                         pending_rel_seqs->size());
3173                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3174                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3175                                         model_thread);
3176                         take_step(fixup);
3177                 };
3178         } while (next_execution());
3179
3180         model_print("******* Model-checking complete: *******\n");
3181         print_stats();
3182 }