common: make model_print() use OS file descriptor, not C library FILE*
[cdsspec-compiler.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         SnapVector<bug_message *> bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
88         promises(new SnapVector<Promise *>()),
89         futurevalues(new SnapVector<struct PendingFutureValue>()),
90         pending_rel_seqs(new SnapVector<struct release_seq *>()),
91         thrd_last_action(new SnapVector<ModelAction *>(1)),
92         thrd_last_fence_release(new SnapVector<ModelAction *>()),
93         node_stack(new NodeStack()),
94         priv(new struct model_snapshot_members()),
95         mo_graph(new CycleGraph())
96 {
97         /* Initialize a model-checker thread, for special ModelActions */
98         model_thread = new Thread(get_next_id());
99         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
100 }
101
102 /** @brief Destructor */
103 ModelChecker::~ModelChecker()
104 {
105         for (unsigned int i = 0; i < get_num_threads(); i++)
106                 delete thread_map->get(i);
107         delete thread_map;
108
109         delete obj_thrd_map;
110         delete obj_map;
111         delete condvar_waiters_map;
112         delete action_trace;
113
114         for (unsigned int i = 0; i < promises->size(); i++)
115                 delete (*promises)[i];
116         delete promises;
117
118         delete pending_rel_seqs;
119
120         delete thrd_last_action;
121         delete thrd_last_fence_release;
122         delete node_stack;
123         delete scheduler;
124         delete mo_graph;
125         delete priv;
126 }
127
128 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
129 {
130         action_list_t *tmp = hash->get(ptr);
131         if (tmp == NULL) {
132                 tmp = new action_list_t();
133                 hash->put(ptr, tmp);
134         }
135         return tmp;
136 }
137
138 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
139 {
140         SnapVector<action_list_t> *tmp = hash->get(ptr);
141         if (tmp == NULL) {
142                 tmp = new SnapVector<action_list_t>();
143                 hash->put(ptr, tmp);
144         }
145         return tmp;
146 }
147
148 /**
149  * Restores user program to initial state and resets all model-checker data
150  * structures.
151  */
152 void ModelChecker::reset_to_initial_state()
153 {
154         DEBUG("+++ Resetting to initial state +++\n");
155         node_stack->reset_execution();
156
157         /**
158          * FIXME: if we utilize partial rollback, we will need to free only
159          * those pending actions which were NOT pending before the rollback
160          * point
161          */
162         for (unsigned int i = 0; i < get_num_threads(); i++)
163                 delete get_thread(int_to_id(i))->get_pending();
164
165         snapshot_backtrack_before(0);
166 }
167
168 /** @return a thread ID for a new Thread */
169 thread_id_t ModelChecker::get_next_id()
170 {
171         return priv->next_thread_id++;
172 }
173
174 /** @return the number of user threads created during this execution */
175 unsigned int ModelChecker::get_num_threads() const
176 {
177         return priv->next_thread_id;
178 }
179
180 /**
181  * Must be called from user-thread context (e.g., through the global
182  * thread_current() interface)
183  *
184  * @return The currently executing Thread.
185  */
186 Thread * ModelChecker::get_current_thread() const
187 {
188         return scheduler->get_current_thread();
189 }
190
191 /** @return a sequence number for a new ModelAction */
192 modelclock_t ModelChecker::get_next_seq_num()
193 {
194         return ++priv->used_sequence_numbers;
195 }
196
197 Node * ModelChecker::get_curr_node() const
198 {
199         return node_stack->get_head();
200 }
201
202 /**
203  * @brief Select the next thread to execute based on the curren action
204  *
205  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
206  * actions should be followed by the execution of their child thread. In either
207  * case, the current action should determine the next thread schedule.
208  *
209  * @param curr The current action
210  * @return The next thread to run, if the current action will determine this
211  * selection; otherwise NULL
212  */
213 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
214 {
215         /* Do not split atomic RMW */
216         if (curr->is_rmwr())
217                 return get_thread(curr);
218         /* Follow CREATE with the created thread */
219         if (curr->get_type() == THREAD_CREATE)
220                 return curr->get_thread_operand();
221         return NULL;
222 }
223
224 /**
225  * @brief Choose the next thread to execute.
226  *
227  * This function chooses the next thread that should execute. It can enforce
228  * execution replay/backtracking or, if the model-checker has no preference
229  * regarding the next thread (i.e., when exploring a new execution ordering),
230  * we defer to the scheduler.
231  *
232  * @return The next chosen thread to run, if any exist. Or else if the current
233  * execution should terminate, return NULL.
234  */
235 Thread * ModelChecker::get_next_thread()
236 {
237         thread_id_t tid;
238
239         /*
240          * Have we completed exploring the preselected path? Then let the
241          * scheduler decide
242          */
243         if (diverge == NULL)
244                 return scheduler->select_next_thread();
245
246         /* Else, we are trying to replay an execution */
247         ModelAction *next = node_stack->get_next()->get_action();
248
249         if (next == diverge) {
250                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
251                         earliest_diverge = diverge;
252
253                 Node *nextnode = next->get_node();
254                 Node *prevnode = nextnode->get_parent();
255                 scheduler->update_sleep_set(prevnode);
256
257                 /* Reached divergence point */
258                 if (nextnode->increment_behaviors()) {
259                         /* Execute the same thread with a new behavior */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else {
263                         ASSERT(prevnode);
264                         /* Make a different thread execute for next step */
265                         scheduler->add_sleep(get_thread(next->get_tid()));
266                         tid = prevnode->get_next_backtrack();
267                         /* Make sure the backtracked thread isn't sleeping. */
268                         node_stack->pop_restofstack(1);
269                         if (diverge == earliest_diverge) {
270                                 earliest_diverge = prevnode->get_action();
271                         }
272                 }
273                 /* Start the round robin scheduler from this thread id */
274                 scheduler->set_scheduler_thread(tid);
275                 /* The correct sleep set is in the parent node. */
276                 execute_sleep_set();
277
278                 DEBUG("*** Divergence point ***\n");
279
280                 diverge = NULL;
281         } else {
282                 tid = next->get_tid();
283         }
284         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
285         ASSERT(tid != THREAD_ID_T_NONE);
286         return get_thread(id_to_int(tid));
287 }
288
289 /**
290  * We need to know what the next actions of all threads in the sleep
291  * set will be.  This method computes them and stores the actions at
292  * the corresponding thread object's pending action.
293  */
294
295 void ModelChecker::execute_sleep_set()
296 {
297         for (unsigned int i = 0; i < get_num_threads(); i++) {
298                 thread_id_t tid = int_to_id(i);
299                 Thread *thr = get_thread(tid);
300                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
301                         thr->get_pending()->set_sleep_flag();
302                 }
303         }
304 }
305
306 /**
307  * @brief Should the current action wake up a given thread?
308  *
309  * @param curr The current action
310  * @param thread The thread that we might wake up
311  * @return True, if we should wake up the sleeping thread; false otherwise
312  */
313 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
314 {
315         const ModelAction *asleep = thread->get_pending();
316         /* Don't allow partial RMW to wake anyone up */
317         if (curr->is_rmwr())
318                 return false;
319         /* Synchronizing actions may have been backtracked */
320         if (asleep->could_synchronize_with(curr))
321                 return true;
322         /* All acquire/release fences and fence-acquire/store-release */
323         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
324                 return true;
325         /* Fence-release + store can awake load-acquire on the same location */
326         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
327                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
328                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
329                         return true;
330         }
331         return false;
332 }
333
334 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
335 {
336         for (unsigned int i = 0; i < get_num_threads(); i++) {
337                 Thread *thr = get_thread(int_to_id(i));
338                 if (scheduler->is_sleep_set(thr)) {
339                         if (should_wake_up(curr, thr))
340                                 /* Remove this thread from sleep set */
341                                 scheduler->remove_sleep(thr);
342                 }
343         }
344 }
345
346 /** @brief Alert the model-checker that an incorrectly-ordered
347  * synchronization was made */
348 void ModelChecker::set_bad_synchronization()
349 {
350         priv->bad_synchronization = true;
351 }
352
353 /**
354  * Check whether the current trace has triggered an assertion which should halt
355  * its execution.
356  *
357  * @return True, if the execution should be aborted; false otherwise
358  */
359 bool ModelChecker::has_asserted() const
360 {
361         return priv->asserted;
362 }
363
364 /**
365  * Trigger a trace assertion which should cause this execution to be halted.
366  * This can be due to a detected bug or due to an infeasibility that should
367  * halt ASAP.
368  */
369 void ModelChecker::set_assert()
370 {
371         priv->asserted = true;
372 }
373
374 /**
375  * Check if we are in a deadlock. Should only be called at the end of an
376  * execution, although it should not give false positives in the middle of an
377  * execution (there should be some ENABLED thread).
378  *
379  * @return True if program is in a deadlock; false otherwise
380  */
381 bool ModelChecker::is_deadlocked() const
382 {
383         bool blocking_threads = false;
384         for (unsigned int i = 0; i < get_num_threads(); i++) {
385                 thread_id_t tid = int_to_id(i);
386                 if (is_enabled(tid))
387                         return false;
388                 Thread *t = get_thread(tid);
389                 if (!t->is_model_thread() && t->get_pending())
390                         blocking_threads = true;
391         }
392         return blocking_threads;
393 }
394
395 /**
396  * Check if this is a complete execution. That is, have all thread completed
397  * execution (rather than exiting because sleep sets have forced a redundant
398  * execution).
399  *
400  * @return True if the execution is complete.
401  */
402 bool ModelChecker::is_complete_execution() const
403 {
404         for (unsigned int i = 0; i < get_num_threads(); i++)
405                 if (is_enabled(int_to_id(i)))
406                         return false;
407         return true;
408 }
409
410 /**
411  * @brief Assert a bug in the executing program.
412  *
413  * Use this function to assert any sort of bug in the user program. If the
414  * current trace is feasible (actually, a prefix of some feasible execution),
415  * then this execution will be aborted, printing the appropriate message. If
416  * the current trace is not yet feasible, the error message will be stashed and
417  * printed if the execution ever becomes feasible.
418  *
419  * @param msg Descriptive message for the bug (do not include newline char)
420  * @return True if bug is immediately-feasible
421  */
422 bool ModelChecker::assert_bug(const char *msg)
423 {
424         priv->bugs.push_back(new bug_message(msg));
425
426         if (isfeasibleprefix()) {
427                 set_assert();
428                 return true;
429         }
430         return false;
431 }
432
433 /**
434  * @brief Assert a bug in the executing program, asserted by a user thread
435  * @see ModelChecker::assert_bug
436  * @param msg Descriptive message for the bug (do not include newline char)
437  */
438 void ModelChecker::assert_user_bug(const char *msg)
439 {
440         /* If feasible bug, bail out now */
441         if (assert_bug(msg))
442                 switch_to_master(NULL);
443 }
444
445 /** @return True, if any bugs have been reported for this execution */
446 bool ModelChecker::have_bug_reports() const
447 {
448         return priv->bugs.size() != 0;
449 }
450
451 /** @brief Print bug report listing for this execution (if any bugs exist) */
452 void ModelChecker::print_bugs() const
453 {
454         if (have_bug_reports()) {
455                 model_print("Bug report: %zu bug%s detected\n",
456                                 priv->bugs.size(),
457                                 priv->bugs.size() > 1 ? "s" : "");
458                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
459                         priv->bugs[i]->print();
460         }
461 }
462
463 /**
464  * @brief Record end-of-execution stats
465  *
466  * Must be run when exiting an execution. Records various stats.
467  * @see struct execution_stats
468  */
469 void ModelChecker::record_stats()
470 {
471         stats.num_total++;
472         if (!isfeasibleprefix())
473                 stats.num_infeasible++;
474         else if (have_bug_reports())
475                 stats.num_buggy_executions++;
476         else if (is_complete_execution())
477                 stats.num_complete++;
478         else {
479                 stats.num_redundant++;
480
481                 /**
482                  * @todo We can violate this ASSERT() when fairness/sleep sets
483                  * conflict to cause an execution to terminate, e.g. with:
484                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
485                  */
486                 //ASSERT(scheduler->all_threads_sleeping());
487         }
488 }
489
490 /** @brief Print execution stats */
491 void ModelChecker::print_stats() const
492 {
493         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
494         model_print("Number of redundant executions: %d\n", stats.num_redundant);
495         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
496         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
497         model_print("Total executions: %d\n", stats.num_total);
498         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
499 }
500
501 /**
502  * @brief End-of-exeuction print
503  * @param printbugs Should any existing bugs be printed?
504  */
505 void ModelChecker::print_execution(bool printbugs) const
506 {
507         print_program_output();
508
509         if (params.verbose) {
510                 model_print("Earliest divergence point since last feasible execution:\n");
511                 if (earliest_diverge)
512                         earliest_diverge->print();
513                 else
514                         model_print("(Not set)\n");
515
516                 model_print("\n");
517                 print_stats();
518         }
519
520         /* Don't print invalid bugs */
521         if (printbugs)
522                 print_bugs();
523
524         model_print("\n");
525         print_summary();
526 }
527
528 /**
529  * Queries the model-checker for more executions to explore and, if one
530  * exists, resets the model-checker state to execute a new execution.
531  *
532  * @return If there are more executions to explore, return true. Otherwise,
533  * return false.
534  */
535 bool ModelChecker::next_execution()
536 {
537         DBG();
538         /* Is this execution a feasible execution that's worth bug-checking? */
539         bool complete = isfeasibleprefix() && (is_complete_execution() ||
540                         have_bug_reports());
541
542         /* End-of-execution bug checks */
543         if (complete) {
544                 if (is_deadlocked())
545                         assert_bug("Deadlock detected");
546
547                 checkDataRaces();
548         }
549
550         record_stats();
551
552         /* Output */
553         if (params.verbose || (complete && have_bug_reports()))
554                 print_execution(complete);
555         else
556                 clear_program_output();
557
558         if (complete)
559                 earliest_diverge = NULL;
560
561         if ((diverge = get_next_backtrack()) == NULL)
562                 return false;
563
564         if (DBG_ENABLED()) {
565                 model_print("Next execution will diverge at:\n");
566                 diverge->print();
567         }
568
569         reset_to_initial_state();
570         return true;
571 }
572
573 /**
574  * @brief Find the last fence-related backtracking conflict for a ModelAction
575  *
576  * This function performs the search for the most recent conflicting action
577  * against which we should perform backtracking, as affected by fence
578  * operations. This includes pairs of potentially-synchronizing actions which
579  * occur due to fence-acquire or fence-release, and hence should be explored in
580  * the opposite execution order.
581  *
582  * @param act The current action
583  * @return The most recent action which conflicts with act due to fences
584  */
585 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
586 {
587         /* Only perform release/acquire fence backtracking for stores */
588         if (!act->is_write())
589                 return NULL;
590
591         /* Find a fence-release (or, act is a release) */
592         ModelAction *last_release;
593         if (act->is_release())
594                 last_release = act;
595         else
596                 last_release = get_last_fence_release(act->get_tid());
597         if (!last_release)
598                 return NULL;
599
600         /* Skip past the release */
601         action_list_t *list = action_trace;
602         action_list_t::reverse_iterator rit;
603         for (rit = list->rbegin(); rit != list->rend(); rit++)
604                 if (*rit == last_release)
605                         break;
606         ASSERT(rit != list->rend());
607
608         /* Find a prior:
609          *   load-acquire
610          * or
611          *   load --sb-> fence-acquire */
612         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
613         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
614         bool found_acquire_fences = false;
615         for ( ; rit != list->rend(); rit++) {
616                 ModelAction *prev = *rit;
617                 if (act->same_thread(prev))
618                         continue;
619
620                 int tid = id_to_int(prev->get_tid());
621
622                 if (prev->is_read() && act->same_var(prev)) {
623                         if (prev->is_acquire()) {
624                                 /* Found most recent load-acquire, don't need
625                                  * to search for more fences */
626                                 if (!found_acquire_fences)
627                                         return NULL;
628                         } else {
629                                 prior_loads[tid] = prev;
630                         }
631                 }
632                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
633                         found_acquire_fences = true;
634                         acquire_fences[tid] = prev;
635                 }
636         }
637
638         ModelAction *latest_backtrack = NULL;
639         for (unsigned int i = 0; i < acquire_fences.size(); i++)
640                 if (acquire_fences[i] && prior_loads[i])
641                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
642                                 latest_backtrack = acquire_fences[i];
643         return latest_backtrack;
644 }
645
646 /**
647  * @brief Find the last backtracking conflict for a ModelAction
648  *
649  * This function performs the search for the most recent conflicting action
650  * against which we should perform backtracking. This primary includes pairs of
651  * synchronizing actions which should be explored in the opposite execution
652  * order.
653  *
654  * @param act The current action
655  * @return The most recent action which conflicts with act
656  */
657 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
658 {
659         switch (act->get_type()) {
660         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
661         case ATOMIC_READ:
662         case ATOMIC_WRITE:
663         case ATOMIC_RMW: {
664                 ModelAction *ret = NULL;
665
666                 /* linear search: from most recent to oldest */
667                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
668                 action_list_t::reverse_iterator rit;
669                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
670                         ModelAction *prev = *rit;
671                         if (prev->could_synchronize_with(act)) {
672                                 ret = prev;
673                                 break;
674                         }
675                 }
676
677                 ModelAction *ret2 = get_last_fence_conflict(act);
678                 if (!ret2)
679                         return ret;
680                 if (!ret)
681                         return ret2;
682                 if (*ret < *ret2)
683                         return ret2;
684                 return ret;
685         }
686         case ATOMIC_LOCK:
687         case ATOMIC_TRYLOCK: {
688                 /* linear search: from most recent to oldest */
689                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
690                 action_list_t::reverse_iterator rit;
691                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
692                         ModelAction *prev = *rit;
693                         if (act->is_conflicting_lock(prev))
694                                 return prev;
695                 }
696                 break;
697         }
698         case ATOMIC_UNLOCK: {
699                 /* linear search: from most recent to oldest */
700                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
701                 action_list_t::reverse_iterator rit;
702                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
703                         ModelAction *prev = *rit;
704                         if (!act->same_thread(prev) && prev->is_failed_trylock())
705                                 return prev;
706                 }
707                 break;
708         }
709         case ATOMIC_WAIT: {
710                 /* linear search: from most recent to oldest */
711                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
712                 action_list_t::reverse_iterator rit;
713                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
714                         ModelAction *prev = *rit;
715                         if (!act->same_thread(prev) && prev->is_failed_trylock())
716                                 return prev;
717                         if (!act->same_thread(prev) && prev->is_notify())
718                                 return prev;
719                 }
720                 break;
721         }
722
723         case ATOMIC_NOTIFY_ALL:
724         case ATOMIC_NOTIFY_ONE: {
725                 /* linear search: from most recent to oldest */
726                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
727                 action_list_t::reverse_iterator rit;
728                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
729                         ModelAction *prev = *rit;
730                         if (!act->same_thread(prev) && prev->is_wait())
731                                 return prev;
732                 }
733                 break;
734         }
735         default:
736                 break;
737         }
738         return NULL;
739 }
740
741 /** This method finds backtracking points where we should try to
742  * reorder the parameter ModelAction against.
743  *
744  * @param the ModelAction to find backtracking points for.
745  */
746 void ModelChecker::set_backtracking(ModelAction *act)
747 {
748         Thread *t = get_thread(act);
749         ModelAction *prev = get_last_conflict(act);
750         if (prev == NULL)
751                 return;
752
753         Node *node = prev->get_node()->get_parent();
754
755         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
756         int low_tid, high_tid;
757         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
758                 low_tid = id_to_int(act->get_tid());
759                 high_tid = low_tid + 1;
760         } else {
761                 low_tid = 0;
762                 high_tid = get_num_threads();
763         }
764
765         for (int i = low_tid; i < high_tid; i++) {
766                 thread_id_t tid = int_to_id(i);
767
768                 /* Make sure this thread can be enabled here. */
769                 if (i >= node->get_num_threads())
770                         break;
771
772                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
773                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
774                 if (node->enabled_status(tid) != THREAD_ENABLED)
775                         continue;
776
777                 /* Check if this has been explored already */
778                 if (node->has_been_explored(tid))
779                         continue;
780
781                 /* See if fairness allows */
782                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
783                         bool unfair = false;
784                         for (int t = 0; t < node->get_num_threads(); t++) {
785                                 thread_id_t tother = int_to_id(t);
786                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
787                                         unfair = true;
788                                         break;
789                                 }
790                         }
791                         if (unfair)
792                                 continue;
793                 }
794
795                 /* See if CHESS-like yield fairness allows */
796                 if (model->params.yieldon) {
797                         bool unfair = false;
798                         for (int t = 0; t < node->get_num_threads(); t++) {
799                                 thread_id_t tother = int_to_id(t);
800                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
801                                         unfair = true;
802                                         break;
803                                 }
804                         }
805                         if (unfair)
806                                 continue;
807                 }
808                 
809                 /* Cache the latest backtracking point */
810                 set_latest_backtrack(prev);
811
812                 /* If this is a new backtracking point, mark the tree */
813                 if (!node->set_backtrack(tid))
814                         continue;
815                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
816                                         id_to_int(prev->get_tid()),
817                                         id_to_int(t->get_id()));
818                 if (DBG_ENABLED()) {
819                         prev->print();
820                         act->print();
821                 }
822         }
823 }
824
825 /**
826  * @brief Cache the a backtracking point as the "most recent", if eligible
827  *
828  * Note that this does not prepare the NodeStack for this backtracking
829  * operation, it only caches the action on a per-execution basis
830  *
831  * @param act The operation at which we should explore a different next action
832  * (i.e., backtracking point)
833  * @return True, if this action is now the most recent backtracking point;
834  * false otherwise
835  */
836 bool ModelChecker::set_latest_backtrack(ModelAction *act)
837 {
838         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
839                 priv->next_backtrack = act;
840                 return true;
841         }
842         return false;
843 }
844
845 /**
846  * Returns last backtracking point. The model checker will explore a different
847  * path for this point in the next execution.
848  * @return The ModelAction at which the next execution should diverge.
849  */
850 ModelAction * ModelChecker::get_next_backtrack()
851 {
852         ModelAction *next = priv->next_backtrack;
853         priv->next_backtrack = NULL;
854         return next;
855 }
856
857 /**
858  * Processes a read model action.
859  * @param curr is the read model action to process.
860  * @return True if processing this read updates the mo_graph.
861  */
862 bool ModelChecker::process_read(ModelAction *curr)
863 {
864         Node *node = curr->get_node();
865         while (true) {
866                 bool updated = false;
867                 switch (node->get_read_from_status()) {
868                 case READ_FROM_PAST: {
869                         const ModelAction *rf = node->get_read_from_past();
870                         ASSERT(rf);
871
872                         mo_graph->startChanges();
873
874                         ASSERT(!is_infeasible());
875                         if (!check_recency(curr, rf)) {
876                                 if (node->increment_read_from()) {
877                                         mo_graph->rollbackChanges();
878                                         continue;
879                                 } else {
880                                         priv->too_many_reads = true;
881                                 }
882                         }
883
884                         updated = r_modification_order(curr, rf);
885                         read_from(curr, rf);
886                         mo_graph->commitChanges();
887                         mo_check_promises(curr, true);
888                         break;
889                 }
890                 case READ_FROM_PROMISE: {
891                         Promise *promise = curr->get_node()->get_read_from_promise();
892                         if (promise->add_reader(curr))
893                                 priv->failed_promise = true;
894                         curr->set_read_from_promise(promise);
895                         mo_graph->startChanges();
896                         if (!check_recency(curr, promise))
897                                 priv->too_many_reads = true;
898                         updated = r_modification_order(curr, promise);
899                         mo_graph->commitChanges();
900                         break;
901                 }
902                 case READ_FROM_FUTURE: {
903                         /* Read from future value */
904                         struct future_value fv = node->get_future_value();
905                         Promise *promise = new Promise(curr, fv);
906                         curr->set_read_from_promise(promise);
907                         promises->push_back(promise);
908                         mo_graph->startChanges();
909                         updated = r_modification_order(curr, promise);
910                         mo_graph->commitChanges();
911                         break;
912                 }
913                 default:
914                         ASSERT(false);
915                 }
916                 get_thread(curr)->set_return_value(curr->get_return_value());
917                 return updated;
918         }
919 }
920
921 /**
922  * Processes a lock, trylock, or unlock model action.  @param curr is
923  * the read model action to process.
924  *
925  * The try lock operation checks whether the lock is taken.  If not,
926  * it falls to the normal lock operation case.  If so, it returns
927  * fail.
928  *
929  * The lock operation has already been checked that it is enabled, so
930  * it just grabs the lock and synchronizes with the previous unlock.
931  *
932  * The unlock operation has to re-enable all of the threads that are
933  * waiting on the lock.
934  *
935  * @return True if synchronization was updated; false otherwise
936  */
937 bool ModelChecker::process_mutex(ModelAction *curr)
938 {
939         std::mutex *mutex = curr->get_mutex();
940         struct std::mutex_state *state = NULL;
941
942         if (mutex)
943                 state = mutex->get_state();
944
945         switch (curr->get_type()) {
946         case ATOMIC_TRYLOCK: {
947                 bool success = !state->locked;
948                 curr->set_try_lock(success);
949                 if (!success) {
950                         get_thread(curr)->set_return_value(0);
951                         break;
952                 }
953                 get_thread(curr)->set_return_value(1);
954         }
955                 //otherwise fall into the lock case
956         case ATOMIC_LOCK: {
957                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
958                         assert_bug("Lock access before initialization");
959                 state->locked = get_thread(curr);
960                 ModelAction *unlock = get_last_unlock(curr);
961                 //synchronize with the previous unlock statement
962                 if (unlock != NULL) {
963                         curr->synchronize_with(unlock);
964                         return true;
965                 }
966                 break;
967         }
968         case ATOMIC_WAIT:
969         case ATOMIC_UNLOCK: {
970                 /* wake up the other threads */
971                 for (unsigned int i = 0; i < get_num_threads(); i++) {
972                         Thread *t = get_thread(int_to_id(i));
973                         Thread *curr_thrd = get_thread(curr);
974                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
975                                 scheduler->wake(t);
976                 }
977
978                 /* unlock the lock - after checking who was waiting on it */
979                 state->locked = NULL;
980
981                 if (!curr->is_wait())
982                         break; /* The rest is only for ATOMIC_WAIT */
983
984                 /* Should we go to sleep? (simulate spurious failures) */
985                 if (curr->get_node()->get_misc() == 0) {
986                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
987                         /* disable us */
988                         scheduler->sleep(get_thread(curr));
989                 }
990                 break;
991         }
992         case ATOMIC_NOTIFY_ALL: {
993                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
994                 //activate all the waiting threads
995                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
996                         scheduler->wake(get_thread(*rit));
997                 }
998                 waiters->clear();
999                 break;
1000         }
1001         case ATOMIC_NOTIFY_ONE: {
1002                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1003                 int wakeupthread = curr->get_node()->get_misc();
1004                 action_list_t::iterator it = waiters->begin();
1005                 advance(it, wakeupthread);
1006                 scheduler->wake(get_thread(*it));
1007                 waiters->erase(it);
1008                 break;
1009         }
1010
1011         default:
1012                 ASSERT(0);
1013         }
1014         return false;
1015 }
1016
1017 /**
1018  * @brief Check if the current pending promises allow a future value to be sent
1019  *
1020  * If one of the following is true:
1021  *  (a) there are no pending promises
1022  *  (b) the reader and writer do not cross any promises
1023  * Then, it is safe to pass a future value back now.
1024  *
1025  * Otherwise, we must save the pending future value until (a) or (b) is true
1026  *
1027  * @param writer The operation which sends the future value. Must be a write.
1028  * @param reader The operation which will observe the value. Must be a read.
1029  * @return True if the future value can be sent now; false if it must wait.
1030  */
1031 bool ModelChecker::promises_may_allow(const ModelAction *writer,
1032                 const ModelAction *reader) const
1033 {
1034         if (promises->empty())
1035                 return true;
1036         for(int i=promises->size()-1;i>=0;i--) {
1037                 ModelAction *pr=(*promises)[i]->get_reader(0);
1038                 //reader is after promise...doesn't cross any promise
1039                 if (*reader > *pr)
1040                         return true;
1041                 //writer is after promise, reader before...bad...
1042                 if (*writer > *pr)
1043                         return false;
1044         }
1045         return true;
1046 }
1047
1048 /**
1049  * @brief Add a future value to a reader
1050  *
1051  * This function performs a few additional checks to ensure that the future
1052  * value can be feasibly observed by the reader
1053  *
1054  * @param writer The operation whose value is sent. Must be a write.
1055  * @param reader The read operation which may read the future value. Must be a read.
1056  */
1057 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1058 {
1059         /* Do more ambitious checks now that mo is more complete */
1060         if (!mo_may_allow(writer, reader))
1061                 return;
1062
1063         Node *node = reader->get_node();
1064
1065         /* Find an ancestor thread which exists at the time of the reader */
1066         Thread *write_thread = get_thread(writer);
1067         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1068                 write_thread = write_thread->get_parent();
1069
1070         struct future_value fv = {
1071                 writer->get_write_value(),
1072                 writer->get_seq_number() + params.maxfuturedelay,
1073                 write_thread->get_id(),
1074         };
1075         if (node->add_future_value(fv))
1076                 set_latest_backtrack(reader);
1077 }
1078
1079 /**
1080  * Process a write ModelAction
1081  * @param curr The ModelAction to process
1082  * @return True if the mo_graph was updated or promises were resolved
1083  */
1084 bool ModelChecker::process_write(ModelAction *curr)
1085 {
1086         /* Readers to which we may send our future value */
1087         ModelVector<ModelAction *> send_fv;
1088
1089         const ModelAction *earliest_promise_reader;
1090         bool updated_promises = false;
1091
1092         bool updated_mod_order = w_modification_order(curr, &send_fv);
1093         Promise *promise = pop_promise_to_resolve(curr);
1094
1095         if (promise) {
1096                 earliest_promise_reader = promise->get_reader(0);
1097                 updated_promises = resolve_promise(curr, promise);
1098         } else
1099                 earliest_promise_reader = NULL;
1100
1101         for (unsigned int i = 0; i < send_fv.size(); i++) {
1102                 ModelAction *read = send_fv[i];
1103
1104                 /* Don't send future values to reads after the Promise we resolve */
1105                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
1106                         /* Check if future value can be sent immediately */
1107                         if (promises_may_allow(curr, read)) {
1108                                 add_future_value(curr, read);
1109                         } else {
1110                                 futurevalues->push_back(PendingFutureValue(curr, read));
1111                         }
1112                 }
1113         }
1114
1115         /* Check the pending future values */
1116         for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
1117                 struct PendingFutureValue pfv = (*futurevalues)[i];
1118                 if (promises_may_allow(pfv.writer, pfv.reader)) {
1119                         add_future_value(pfv.writer, pfv.reader);
1120                         futurevalues->erase(futurevalues->begin() + i);
1121                 }
1122         }
1123
1124         mo_graph->commitChanges();
1125         mo_check_promises(curr, false);
1126
1127         get_thread(curr)->set_return_value(VALUE_NONE);
1128         return updated_mod_order || updated_promises;
1129 }
1130
1131 /**
1132  * Process a fence ModelAction
1133  * @param curr The ModelAction to process
1134  * @return True if synchronization was updated
1135  */
1136 bool ModelChecker::process_fence(ModelAction *curr)
1137 {
1138         /*
1139          * fence-relaxed: no-op
1140          * fence-release: only log the occurence (not in this function), for
1141          *   use in later synchronization
1142          * fence-acquire (this function): search for hypothetical release
1143          *   sequences
1144          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1145          */
1146         bool updated = false;
1147         if (curr->is_acquire()) {
1148                 action_list_t *list = action_trace;
1149                 action_list_t::reverse_iterator rit;
1150                 /* Find X : is_read(X) && X --sb-> curr */
1151                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1152                         ModelAction *act = *rit;
1153                         if (act == curr)
1154                                 continue;
1155                         if (act->get_tid() != curr->get_tid())
1156                                 continue;
1157                         /* Stop at the beginning of the thread */
1158                         if (act->is_thread_start())
1159                                 break;
1160                         /* Stop once we reach a prior fence-acquire */
1161                         if (act->is_fence() && act->is_acquire())
1162                                 break;
1163                         if (!act->is_read())
1164                                 continue;
1165                         /* read-acquire will find its own release sequences */
1166                         if (act->is_acquire())
1167                                 continue;
1168
1169                         /* Establish hypothetical release sequences */
1170                         rel_heads_list_t release_heads;
1171                         get_release_seq_heads(curr, act, &release_heads);
1172                         for (unsigned int i = 0; i < release_heads.size(); i++)
1173                                 if (!curr->synchronize_with(release_heads[i]))
1174                                         set_bad_synchronization();
1175                         if (release_heads.size() != 0)
1176                                 updated = true;
1177                 }
1178         }
1179         return updated;
1180 }
1181
1182 /**
1183  * @brief Process the current action for thread-related activity
1184  *
1185  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1186  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1187  * synchronization, etc.  This function is a no-op for non-THREAD actions
1188  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1189  *
1190  * @param curr The current action
1191  * @return True if synchronization was updated or a thread completed
1192  */
1193 bool ModelChecker::process_thread_action(ModelAction *curr)
1194 {
1195         bool updated = false;
1196
1197         switch (curr->get_type()) {
1198         case THREAD_CREATE: {
1199                 thrd_t *thrd = (thrd_t *)curr->get_location();
1200                 struct thread_params *params = (struct thread_params *)curr->get_value();
1201                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1202                 add_thread(th);
1203                 th->set_creation(curr);
1204                 /* Promises can be satisfied by children */
1205                 for (unsigned int i = 0; i < promises->size(); i++) {
1206                         Promise *promise = (*promises)[i];
1207                         if (promise->thread_is_available(curr->get_tid()))
1208                                 promise->add_thread(th->get_id());
1209                 }
1210                 break;
1211         }
1212         case THREAD_JOIN: {
1213                 Thread *blocking = curr->get_thread_operand();
1214                 ModelAction *act = get_last_action(blocking->get_id());
1215                 curr->synchronize_with(act);
1216                 updated = true; /* trigger rel-seq checks */
1217                 break;
1218         }
1219         case THREAD_FINISH: {
1220                 Thread *th = get_thread(curr);
1221                 /* Wake up any joining threads */
1222                 for (unsigned int i = 0; i < get_num_threads(); i++) {
1223                         Thread *waiting = get_thread(int_to_id(i));
1224                         if (waiting->waiting_on() == th &&
1225                                         waiting->get_pending()->is_thread_join())
1226                                 scheduler->wake(waiting);
1227                 }
1228                 th->complete();
1229                 /* Completed thread can't satisfy promises */
1230                 for (unsigned int i = 0; i < promises->size(); i++) {
1231                         Promise *promise = (*promises)[i];
1232                         if (promise->thread_is_available(th->get_id()))
1233                                 if (promise->eliminate_thread(th->get_id()))
1234                                         priv->failed_promise = true;
1235                 }
1236                 updated = true; /* trigger rel-seq checks */
1237                 break;
1238         }
1239         case THREAD_START: {
1240                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1241                 break;
1242         }
1243         default:
1244                 break;
1245         }
1246
1247         return updated;
1248 }
1249
1250 /**
1251  * @brief Process the current action for release sequence fixup activity
1252  *
1253  * Performs model-checker release sequence fixups for the current action,
1254  * forcing a single pending release sequence to break (with a given, potential
1255  * "loose" write) or to complete (i.e., synchronize). If a pending release
1256  * sequence forms a complete release sequence, then we must perform the fixup
1257  * synchronization, mo_graph additions, etc.
1258  *
1259  * @param curr The current action; must be a release sequence fixup action
1260  * @param work_queue The work queue to which to add work items as they are
1261  * generated
1262  */
1263 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1264 {
1265         const ModelAction *write = curr->get_node()->get_relseq_break();
1266         struct release_seq *sequence = pending_rel_seqs->back();
1267         pending_rel_seqs->pop_back();
1268         ASSERT(sequence);
1269         ModelAction *acquire = sequence->acquire;
1270         const ModelAction *rf = sequence->rf;
1271         const ModelAction *release = sequence->release;
1272         ASSERT(acquire);
1273         ASSERT(release);
1274         ASSERT(rf);
1275         ASSERT(release->same_thread(rf));
1276
1277         if (write == NULL) {
1278                 /**
1279                  * @todo Forcing a synchronization requires that we set
1280                  * modification order constraints. For instance, we can't allow
1281                  * a fixup sequence in which two separate read-acquire
1282                  * operations read from the same sequence, where the first one
1283                  * synchronizes and the other doesn't. Essentially, we can't
1284                  * allow any writes to insert themselves between 'release' and
1285                  * 'rf'
1286                  */
1287
1288                 /* Must synchronize */
1289                 if (!acquire->synchronize_with(release)) {
1290                         set_bad_synchronization();
1291                         return;
1292                 }
1293                 /* Re-check all pending release sequences */
1294                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1295                 /* Re-check act for mo_graph edges */
1296                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1297
1298                 /* propagate synchronization to later actions */
1299                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1300                 for (; (*rit) != acquire; rit++) {
1301                         ModelAction *propagate = *rit;
1302                         if (acquire->happens_before(propagate)) {
1303                                 propagate->synchronize_with(acquire);
1304                                 /* Re-check 'propagate' for mo_graph edges */
1305                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1306                         }
1307                 }
1308         } else {
1309                 /* Break release sequence with new edges:
1310                  *   release --mo--> write --mo--> rf */
1311                 mo_graph->addEdge(release, write);
1312                 mo_graph->addEdge(write, rf);
1313         }
1314
1315         /* See if we have realized a data race */
1316         checkDataRaces();
1317 }
1318
1319 /**
1320  * Initialize the current action by performing one or more of the following
1321  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1322  * in the NodeStack, manipulating backtracking sets, allocating and
1323  * initializing clock vectors, and computing the promises to fulfill.
1324  *
1325  * @param curr The current action, as passed from the user context; may be
1326  * freed/invalidated after the execution of this function, with a different
1327  * action "returned" its place (pass-by-reference)
1328  * @return True if curr is a newly-explored action; false otherwise
1329  */
1330 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1331 {
1332         ModelAction *newcurr;
1333
1334         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1335                 newcurr = process_rmw(*curr);
1336                 delete *curr;
1337
1338                 if (newcurr->is_rmw())
1339                         compute_promises(newcurr);
1340
1341                 *curr = newcurr;
1342                 return false;
1343         }
1344
1345         (*curr)->set_seq_number(get_next_seq_num());
1346
1347         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1348         if (newcurr) {
1349                 /* First restore type and order in case of RMW operation */
1350                 if ((*curr)->is_rmwr())
1351                         newcurr->copy_typeandorder(*curr);
1352
1353                 ASSERT((*curr)->get_location() == newcurr->get_location());
1354                 newcurr->copy_from_new(*curr);
1355
1356                 /* Discard duplicate ModelAction; use action from NodeStack */
1357                 delete *curr;
1358
1359                 /* Always compute new clock vector */
1360                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1361
1362                 *curr = newcurr;
1363                 return false; /* Action was explored previously */
1364         } else {
1365                 newcurr = *curr;
1366
1367                 /* Always compute new clock vector */
1368                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1369
1370                 /* Assign most recent release fence */
1371                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1372
1373                 /*
1374                  * Perform one-time actions when pushing new ModelAction onto
1375                  * NodeStack
1376                  */
1377                 if (newcurr->is_write())
1378                         compute_promises(newcurr);
1379                 else if (newcurr->is_relseq_fixup())
1380                         compute_relseq_breakwrites(newcurr);
1381                 else if (newcurr->is_wait())
1382                         newcurr->get_node()->set_misc_max(2);
1383                 else if (newcurr->is_notify_one()) {
1384                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1385                 }
1386                 return true; /* This was a new ModelAction */
1387         }
1388 }
1389
1390 /**
1391  * @brief Establish reads-from relation between two actions
1392  *
1393  * Perform basic operations involved with establishing a concrete rf relation,
1394  * including setting the ModelAction data and checking for release sequences.
1395  *
1396  * @param act The action that is reading (must be a read)
1397  * @param rf The action from which we are reading (must be a write)
1398  *
1399  * @return True if this read established synchronization
1400  */
1401 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1402 {
1403         ASSERT(rf);
1404         ASSERT(rf->is_write());
1405
1406         act->set_read_from(rf);
1407         if (act->is_acquire()) {
1408                 rel_heads_list_t release_heads;
1409                 get_release_seq_heads(act, act, &release_heads);
1410                 int num_heads = release_heads.size();
1411                 for (unsigned int i = 0; i < release_heads.size(); i++)
1412                         if (!act->synchronize_with(release_heads[i])) {
1413                                 set_bad_synchronization();
1414                                 num_heads--;
1415                         }
1416                 return num_heads > 0;
1417         }
1418         return false;
1419 }
1420
1421 /**
1422  * Check promises and eliminate potentially-satisfying threads when a thread is
1423  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1424  * no longer satisfy a promise generated from that thread.
1425  *
1426  * @param blocker The thread on which a thread is waiting
1427  * @param waiting The waiting thread
1428  */
1429 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1430 {
1431         for (unsigned int i = 0; i < promises->size(); i++) {
1432                 Promise *promise = (*promises)[i];
1433                 if (!promise->thread_is_available(waiting->get_id()))
1434                         continue;
1435                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1436                         ModelAction *reader = promise->get_reader(j);
1437                         if (reader->get_tid() != blocker->get_id())
1438                                 continue;
1439                         if (promise->eliminate_thread(waiting->get_id())) {
1440                                 /* Promise has failed */
1441                                 priv->failed_promise = true;
1442                         } else {
1443                                 /* Only eliminate the 'waiting' thread once */
1444                                 return;
1445                         }
1446                 }
1447         }
1448 }
1449
1450 /**
1451  * @brief Check whether a model action is enabled.
1452  *
1453  * Checks whether a lock or join operation would be successful (i.e., is the
1454  * lock already locked, or is the joined thread already complete). If not, put
1455  * the action in a waiter list.
1456  *
1457  * @param curr is the ModelAction to check whether it is enabled.
1458  * @return a bool that indicates whether the action is enabled.
1459  */
1460 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1461         if (curr->is_lock()) {
1462                 std::mutex *lock = curr->get_mutex();
1463                 struct std::mutex_state *state = lock->get_state();
1464                 if (state->locked)
1465                         return false;
1466         } else if (curr->is_thread_join()) {
1467                 Thread *blocking = curr->get_thread_operand();
1468                 if (!blocking->is_complete()) {
1469                         thread_blocking_check_promises(blocking, get_thread(curr));
1470                         return false;
1471                 }
1472         }
1473
1474         return true;
1475 }
1476
1477 /**
1478  * This is the heart of the model checker routine. It performs model-checking
1479  * actions corresponding to a given "current action." Among other processes, it
1480  * calculates reads-from relationships, updates synchronization clock vectors,
1481  * forms a memory_order constraints graph, and handles replay/backtrack
1482  * execution when running permutations of previously-observed executions.
1483  *
1484  * @param curr The current action to process
1485  * @return The ModelAction that is actually executed; may be different than
1486  * curr; may be NULL, if the current action is not enabled to run
1487  */
1488 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1489 {
1490         ASSERT(curr);
1491         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1492
1493         if (!check_action_enabled(curr)) {
1494                 /* Make the execution look like we chose to run this action
1495                  * much later, when a lock/join can succeed */
1496                 get_thread(curr)->set_pending(curr);
1497                 scheduler->sleep(get_thread(curr));
1498                 return NULL;
1499         }
1500
1501         bool newly_explored = initialize_curr_action(&curr);
1502
1503         DBG();
1504         if (DBG_ENABLED())
1505                 curr->print();
1506
1507         wake_up_sleeping_actions(curr);
1508
1509         /* Compute fairness information for CHESS yield algorithm */
1510         if (model->params.yieldon) {
1511                 curr->get_node()->update_yield(scheduler);
1512         }
1513
1514         /* Add the action to lists before any other model-checking tasks */
1515         if (!second_part_of_rmw)
1516                 add_action_to_lists(curr);
1517
1518         /* Build may_read_from set for newly-created actions */
1519         if (newly_explored && curr->is_read())
1520                 build_may_read_from(curr);
1521
1522         /* Initialize work_queue with the "current action" work */
1523         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1524         while (!work_queue.empty() && !has_asserted()) {
1525                 WorkQueueEntry work = work_queue.front();
1526                 work_queue.pop_front();
1527
1528                 switch (work.type) {
1529                 case WORK_CHECK_CURR_ACTION: {
1530                         ModelAction *act = work.action;
1531                         bool update = false; /* update this location's release seq's */
1532                         bool update_all = false; /* update all release seq's */
1533
1534                         if (process_thread_action(curr))
1535                                 update_all = true;
1536
1537                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1538                                 update = true;
1539
1540                         if (act->is_write() && process_write(act))
1541                                 update = true;
1542
1543                         if (act->is_fence() && process_fence(act))
1544                                 update_all = true;
1545
1546                         if (act->is_mutex_op() && process_mutex(act))
1547                                 update_all = true;
1548
1549                         if (act->is_relseq_fixup())
1550                                 process_relseq_fixup(curr, &work_queue);
1551
1552                         if (update_all)
1553                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1554                         else if (update)
1555                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1556                         break;
1557                 }
1558                 case WORK_CHECK_RELEASE_SEQ:
1559                         resolve_release_sequences(work.location, &work_queue);
1560                         break;
1561                 case WORK_CHECK_MO_EDGES: {
1562                         /** @todo Complete verification of work_queue */
1563                         ModelAction *act = work.action;
1564                         bool updated = false;
1565
1566                         if (act->is_read()) {
1567                                 const ModelAction *rf = act->get_reads_from();
1568                                 const Promise *promise = act->get_reads_from_promise();
1569                                 if (rf) {
1570                                         if (r_modification_order(act, rf))
1571                                                 updated = true;
1572                                 } else if (promise) {
1573                                         if (r_modification_order(act, promise))
1574                                                 updated = true;
1575                                 }
1576                         }
1577                         if (act->is_write()) {
1578                                 if (w_modification_order(act, NULL))
1579                                         updated = true;
1580                         }
1581                         mo_graph->commitChanges();
1582
1583                         if (updated)
1584                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1585                         break;
1586                 }
1587                 default:
1588                         ASSERT(false);
1589                         break;
1590                 }
1591         }
1592
1593         check_curr_backtracking(curr);
1594         set_backtracking(curr);
1595         return curr;
1596 }
1597
1598 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1599 {
1600         Node *currnode = curr->get_node();
1601         Node *parnode = currnode->get_parent();
1602
1603         if ((parnode && !parnode->backtrack_empty()) ||
1604                          !currnode->misc_empty() ||
1605                          !currnode->read_from_empty() ||
1606                          !currnode->promise_empty() ||
1607                          !currnode->relseq_break_empty()) {
1608                 set_latest_backtrack(curr);
1609         }
1610 }
1611
1612 bool ModelChecker::promises_expired() const
1613 {
1614         for (unsigned int i = 0; i < promises->size(); i++) {
1615                 Promise *promise = (*promises)[i];
1616                 if (promise->get_expiration() < priv->used_sequence_numbers)
1617                         return true;
1618         }
1619         return false;
1620 }
1621
1622 /**
1623  * This is the strongest feasibility check available.
1624  * @return whether the current trace (partial or complete) must be a prefix of
1625  * a feasible trace.
1626  */
1627 bool ModelChecker::isfeasibleprefix() const
1628 {
1629         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1630 }
1631
1632 /**
1633  * Print disagnostic information about an infeasible execution
1634  * @param prefix A string to prefix the output with; if NULL, then a default
1635  * message prefix will be provided
1636  */
1637 void ModelChecker::print_infeasibility(const char *prefix) const
1638 {
1639         char buf[100];
1640         char *ptr = buf;
1641         if (mo_graph->checkForCycles())
1642                 ptr += sprintf(ptr, "[mo cycle]");
1643         if (priv->failed_promise)
1644                 ptr += sprintf(ptr, "[failed promise]");
1645         if (priv->too_many_reads)
1646                 ptr += sprintf(ptr, "[too many reads]");
1647         if (priv->no_valid_reads)
1648                 ptr += sprintf(ptr, "[no valid reads-from]");
1649         if (priv->bad_synchronization)
1650                 ptr += sprintf(ptr, "[bad sw ordering]");
1651         if (promises_expired())
1652                 ptr += sprintf(ptr, "[promise expired]");
1653         if (promises->size() != 0)
1654                 ptr += sprintf(ptr, "[unresolved promise]");
1655         if (ptr != buf)
1656                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1657 }
1658
1659 /**
1660  * Returns whether the current completed trace is feasible, except for pending
1661  * release sequences.
1662  */
1663 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1664 {
1665         return !is_infeasible() && promises->size() == 0;
1666 }
1667
1668 /**
1669  * Check if the current partial trace is infeasible. Does not check any
1670  * end-of-execution flags, which might rule out the execution. Thus, this is
1671  * useful only for ruling an execution as infeasible.
1672  * @return whether the current partial trace is infeasible.
1673  */
1674 bool ModelChecker::is_infeasible() const
1675 {
1676         return mo_graph->checkForCycles() ||
1677                 priv->no_valid_reads ||
1678                 priv->failed_promise ||
1679                 priv->too_many_reads ||
1680                 priv->bad_synchronization ||
1681                 promises_expired();
1682 }
1683
1684 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1685 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1686         ModelAction *lastread = get_last_action(act->get_tid());
1687         lastread->process_rmw(act);
1688         if (act->is_rmw()) {
1689                 if (lastread->get_reads_from())
1690                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1691                 else
1692                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1693                 mo_graph->commitChanges();
1694         }
1695         return lastread;
1696 }
1697
1698 /**
1699  * A helper function for ModelChecker::check_recency, to check if the current
1700  * thread is able to read from a different write/promise for 'params.maxreads'
1701  * number of steps and if that write/promise should become visible (i.e., is
1702  * ordered later in the modification order). This helps model memory liveness.
1703  *
1704  * @param curr The current action. Must be a read.
1705  * @param rf The write/promise from which we plan to read
1706  * @param other_rf The write/promise from which we may read
1707  * @return True if we were able to read from other_rf for params.maxreads steps
1708  */
1709 template <typename T, typename U>
1710 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1711 {
1712         /* Need a different write/promise */
1713         if (other_rf->equals(rf))
1714                 return false;
1715
1716         /* Only look for "newer" writes/promises */
1717         if (!mo_graph->checkReachable(rf, other_rf))
1718                 return false;
1719
1720         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1721         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1722         action_list_t::reverse_iterator rit = list->rbegin();
1723         ASSERT((*rit) == curr);
1724         /* Skip past curr */
1725         rit++;
1726
1727         /* Does this write/promise work for everyone? */
1728         for (int i = 0; i < params.maxreads; i++, rit++) {
1729                 ModelAction *act = *rit;
1730                 if (!act->may_read_from(other_rf))
1731                         return false;
1732         }
1733         return true;
1734 }
1735
1736 /**
1737  * Checks whether a thread has read from the same write or Promise for too many
1738  * times without seeing the effects of a later write/Promise.
1739  *
1740  * Basic idea:
1741  * 1) there must a different write/promise that we could read from,
1742  * 2) we must have read from the same write/promise in excess of maxreads times,
1743  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1744  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1745  *
1746  * If so, we decide that the execution is no longer feasible.
1747  *
1748  * @param curr The current action. Must be a read.
1749  * @param rf The ModelAction/Promise from which we might read.
1750  * @return True if the read should succeed; false otherwise
1751  */
1752 template <typename T>
1753 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1754 {
1755         if (!params.maxreads)
1756                 return true;
1757
1758         //NOTE: Next check is just optimization, not really necessary....
1759         if (curr->get_node()->get_read_from_past_size() +
1760                         curr->get_node()->get_read_from_promise_size() <= 1)
1761                 return true;
1762
1763         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1764         int tid = id_to_int(curr->get_tid());
1765         ASSERT(tid < (int)thrd_lists->size());
1766         action_list_t *list = &(*thrd_lists)[tid];
1767         action_list_t::reverse_iterator rit = list->rbegin();
1768         ASSERT((*rit) == curr);
1769         /* Skip past curr */
1770         rit++;
1771
1772         action_list_t::reverse_iterator ritcopy = rit;
1773         /* See if we have enough reads from the same value */
1774         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1775                 if (ritcopy == list->rend())
1776                         return true;
1777                 ModelAction *act = *ritcopy;
1778                 if (!act->is_read())
1779                         return true;
1780                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1781                         return true;
1782                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1783                         return true;
1784                 if (act->get_node()->get_read_from_past_size() +
1785                                 act->get_node()->get_read_from_promise_size() <= 1)
1786                         return true;
1787         }
1788         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1789                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1790                 if (should_read_instead(curr, rf, write))
1791                         return false; /* liveness failure */
1792         }
1793         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1794                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1795                 if (should_read_instead(curr, rf, promise))
1796                         return false; /* liveness failure */
1797         }
1798         return true;
1799 }
1800
1801 /**
1802  * Updates the mo_graph with the constraints imposed from the current
1803  * read.
1804  *
1805  * Basic idea is the following: Go through each other thread and find
1806  * the last action that happened before our read.  Two cases:
1807  *
1808  * (1) The action is a write => that write must either occur before
1809  * the write we read from or be the write we read from.
1810  *
1811  * (2) The action is a read => the write that that action read from
1812  * must occur before the write we read from or be the same write.
1813  *
1814  * @param curr The current action. Must be a read.
1815  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1816  * @return True if modification order edges were added; false otherwise
1817  */
1818 template <typename rf_type>
1819 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1820 {
1821         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1822         unsigned int i;
1823         bool added = false;
1824         ASSERT(curr->is_read());
1825
1826         /* Last SC fence in the current thread */
1827         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1828         ModelAction *last_sc_write = NULL;
1829         if (curr->is_seqcst())
1830                 last_sc_write = get_last_seq_cst_write(curr);
1831
1832         /* Iterate over all threads */
1833         for (i = 0; i < thrd_lists->size(); i++) {
1834                 /* Last SC fence in thread i */
1835                 ModelAction *last_sc_fence_thread_local = NULL;
1836                 if (int_to_id((int)i) != curr->get_tid())
1837                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1838
1839                 /* Last SC fence in thread i, before last SC fence in current thread */
1840                 ModelAction *last_sc_fence_thread_before = NULL;
1841                 if (last_sc_fence_local)
1842                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1843
1844                 /* Iterate over actions in thread, starting from most recent */
1845                 action_list_t *list = &(*thrd_lists)[i];
1846                 action_list_t::reverse_iterator rit;
1847                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1848                         ModelAction *act = *rit;
1849
1850                         /* Skip curr */
1851                         if (act == curr)
1852                                 continue;
1853                         /* Don't want to add reflexive edges on 'rf' */
1854                         if (act->equals(rf)) {
1855                                 if (act->happens_before(curr))
1856                                         break;
1857                                 else
1858                                         continue;
1859                         }
1860
1861                         if (act->is_write()) {
1862                                 /* C++, Section 29.3 statement 5 */
1863                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1864                                                 *act < *last_sc_fence_thread_local) {
1865                                         added = mo_graph->addEdge(act, rf) || added;
1866                                         break;
1867                                 }
1868                                 /* C++, Section 29.3 statement 4 */
1869                                 else if (act->is_seqcst() && last_sc_fence_local &&
1870                                                 *act < *last_sc_fence_local) {
1871                                         added = mo_graph->addEdge(act, rf) || added;
1872                                         break;
1873                                 }
1874                                 /* C++, Section 29.3 statement 6 */
1875                                 else if (last_sc_fence_thread_before &&
1876                                                 *act < *last_sc_fence_thread_before) {
1877                                         added = mo_graph->addEdge(act, rf) || added;
1878                                         break;
1879                                 }
1880                         }
1881
1882                         /* C++, Section 29.3 statement 3 (second subpoint) */
1883                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1884                                 added = mo_graph->addEdge(act, rf) || added;
1885                                 break;
1886                         }
1887
1888                         /*
1889                          * Include at most one act per-thread that "happens
1890                          * before" curr
1891                          */
1892                         if (act->happens_before(curr)) {
1893                                 if (act->is_write()) {
1894                                         added = mo_graph->addEdge(act, rf) || added;
1895                                 } else {
1896                                         const ModelAction *prevrf = act->get_reads_from();
1897                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1898                                         if (prevrf) {
1899                                                 if (!prevrf->equals(rf))
1900                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1901                                         } else if (!prevrf_promise->equals(rf)) {
1902                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1903                                         }
1904                                 }
1905                                 break;
1906                         }
1907                 }
1908         }
1909
1910         /*
1911          * All compatible, thread-exclusive promises must be ordered after any
1912          * concrete loads from the same thread
1913          */
1914         for (unsigned int i = 0; i < promises->size(); i++)
1915                 if ((*promises)[i]->is_compatible_exclusive(curr))
1916                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1917
1918         return added;
1919 }
1920
1921 /**
1922  * Updates the mo_graph with the constraints imposed from the current write.
1923  *
1924  * Basic idea is the following: Go through each other thread and find
1925  * the lastest action that happened before our write.  Two cases:
1926  *
1927  * (1) The action is a write => that write must occur before
1928  * the current write
1929  *
1930  * (2) The action is a read => the write that that action read from
1931  * must occur before the current write.
1932  *
1933  * This method also handles two other issues:
1934  *
1935  * (I) Sequential Consistency: Making sure that if the current write is
1936  * seq_cst, that it occurs after the previous seq_cst write.
1937  *
1938  * (II) Sending the write back to non-synchronizing reads.
1939  *
1940  * @param curr The current action. Must be a write.
1941  * @param send_fv A vector for stashing reads to which we may pass our future
1942  * value. If NULL, then don't record any future values.
1943  * @return True if modification order edges were added; false otherwise
1944  */
1945 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1946 {
1947         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1948         unsigned int i;
1949         bool added = false;
1950         ASSERT(curr->is_write());
1951
1952         if (curr->is_seqcst()) {
1953                 /* We have to at least see the last sequentially consistent write,
1954                          so we are initialized. */
1955                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1956                 if (last_seq_cst != NULL) {
1957                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1958                 }
1959         }
1960
1961         /* Last SC fence in the current thread */
1962         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1963
1964         /* Iterate over all threads */
1965         for (i = 0; i < thrd_lists->size(); i++) {
1966                 /* Last SC fence in thread i, before last SC fence in current thread */
1967                 ModelAction *last_sc_fence_thread_before = NULL;
1968                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1969                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1970
1971                 /* Iterate over actions in thread, starting from most recent */
1972                 action_list_t *list = &(*thrd_lists)[i];
1973                 action_list_t::reverse_iterator rit;
1974                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1975                         ModelAction *act = *rit;
1976                         if (act == curr) {
1977                                 /*
1978                                  * 1) If RMW and it actually read from something, then we
1979                                  * already have all relevant edges, so just skip to next
1980                                  * thread.
1981                                  *
1982                                  * 2) If RMW and it didn't read from anything, we should
1983                                  * whatever edge we can get to speed up convergence.
1984                                  *
1985                                  * 3) If normal write, we need to look at earlier actions, so
1986                                  * continue processing list.
1987                                  */
1988                                 if (curr->is_rmw()) {
1989                                         if (curr->get_reads_from() != NULL)
1990                                                 break;
1991                                         else
1992                                                 continue;
1993                                 } else
1994                                         continue;
1995                         }
1996
1997                         /* C++, Section 29.3 statement 7 */
1998                         if (last_sc_fence_thread_before && act->is_write() &&
1999                                         *act < *last_sc_fence_thread_before) {
2000                                 added = mo_graph->addEdge(act, curr) || added;
2001                                 break;
2002                         }
2003
2004                         /*
2005                          * Include at most one act per-thread that "happens
2006                          * before" curr
2007                          */
2008                         if (act->happens_before(curr)) {
2009                                 /*
2010                                  * Note: if act is RMW, just add edge:
2011                                  *   act --mo--> curr
2012                                  * The following edge should be handled elsewhere:
2013                                  *   readfrom(act) --mo--> act
2014                                  */
2015                                 if (act->is_write())
2016                                         added = mo_graph->addEdge(act, curr) || added;
2017                                 else if (act->is_read()) {
2018                                         //if previous read accessed a null, just keep going
2019                                         if (act->get_reads_from() == NULL)
2020                                                 continue;
2021                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2022                                 }
2023                                 break;
2024                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2025                                                      !act->same_thread(curr)) {
2026                                 /* We have an action that:
2027                                    (1) did not happen before us
2028                                    (2) is a read and we are a write
2029                                    (3) cannot synchronize with us
2030                                    (4) is in a different thread
2031                                    =>
2032                                    that read could potentially read from our write.  Note that
2033                                    these checks are overly conservative at this point, we'll
2034                                    do more checks before actually removing the
2035                                    pendingfuturevalue.
2036
2037                                  */
2038                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2039                                         if (!is_infeasible())
2040                                                 send_fv->push_back(act);
2041                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2042                                                 add_future_value(curr, act);
2043                                 }
2044                         }
2045                 }
2046         }
2047
2048         /*
2049          * All compatible, thread-exclusive promises must be ordered after any
2050          * concrete stores to the same thread, or else they can be merged with
2051          * this store later
2052          */
2053         for (unsigned int i = 0; i < promises->size(); i++)
2054                 if ((*promises)[i]->is_compatible_exclusive(curr))
2055                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2056
2057         return added;
2058 }
2059
2060 /** Arbitrary reads from the future are not allowed.  Section 29.3
2061  * part 9 places some constraints.  This method checks one result of constraint
2062  * constraint.  Others require compiler support. */
2063 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
2064 {
2065         if (!writer->is_rmw())
2066                 return true;
2067
2068         if (!reader->is_rmw())
2069                 return true;
2070
2071         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2072                 if (search == reader)
2073                         return false;
2074                 if (search->get_tid() == reader->get_tid() &&
2075                                 search->happens_before(reader))
2076                         break;
2077         }
2078
2079         return true;
2080 }
2081
2082 /**
2083  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2084  * some constraints. This method checks one the following constraint (others
2085  * require compiler support):
2086  *
2087  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2088  */
2089 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2090 {
2091         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2092         unsigned int i;
2093         /* Iterate over all threads */
2094         for (i = 0; i < thrd_lists->size(); i++) {
2095                 const ModelAction *write_after_read = NULL;
2096
2097                 /* Iterate over actions in thread, starting from most recent */
2098                 action_list_t *list = &(*thrd_lists)[i];
2099                 action_list_t::reverse_iterator rit;
2100                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2101                         ModelAction *act = *rit;
2102
2103                         /* Don't disallow due to act == reader */
2104                         if (!reader->happens_before(act) || reader == act)
2105                                 break;
2106                         else if (act->is_write())
2107                                 write_after_read = act;
2108                         else if (act->is_read() && act->get_reads_from() != NULL)
2109                                 write_after_read = act->get_reads_from();
2110                 }
2111
2112                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2113                         return false;
2114         }
2115         return true;
2116 }
2117
2118 /**
2119  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2120  * The ModelAction under consideration is expected to be taking part in
2121  * release/acquire synchronization as an object of the "reads from" relation.
2122  * Note that this can only provide release sequence support for RMW chains
2123  * which do not read from the future, as those actions cannot be traced until
2124  * their "promise" is fulfilled. Similarly, we may not even establish the
2125  * presence of a release sequence with certainty, as some modification order
2126  * constraints may be decided further in the future. Thus, this function
2127  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2128  * and a boolean representing certainty.
2129  *
2130  * @param rf The action that might be part of a release sequence. Must be a
2131  * write.
2132  * @param release_heads A pass-by-reference style return parameter. After
2133  * execution of this function, release_heads will contain the heads of all the
2134  * relevant release sequences, if any exists with certainty
2135  * @param pending A pass-by-reference style return parameter which is only used
2136  * when returning false (i.e., uncertain). Returns most information regarding
2137  * an uncertain release sequence, including any write operations that might
2138  * break the sequence.
2139  * @return true, if the ModelChecker is certain that release_heads is complete;
2140  * false otherwise
2141  */
2142 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2143                 rel_heads_list_t *release_heads,
2144                 struct release_seq *pending) const
2145 {
2146         /* Only check for release sequences if there are no cycles */
2147         if (mo_graph->checkForCycles())
2148                 return false;
2149
2150         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2151                 ASSERT(rf->is_write());
2152
2153                 if (rf->is_release())
2154                         release_heads->push_back(rf);
2155                 else if (rf->get_last_fence_release())
2156                         release_heads->push_back(rf->get_last_fence_release());
2157                 if (!rf->is_rmw())
2158                         break; /* End of RMW chain */
2159
2160                 /** @todo Need to be smarter here...  In the linux lock
2161                  * example, this will run to the beginning of the program for
2162                  * every acquire. */
2163                 /** @todo The way to be smarter here is to keep going until 1
2164                  * thread has a release preceded by an acquire and you've seen
2165                  *       both. */
2166
2167                 /* acq_rel RMW is a sufficient stopping condition */
2168                 if (rf->is_acquire() && rf->is_release())
2169                         return true; /* complete */
2170         };
2171         if (!rf) {
2172                 /* read from future: need to settle this later */
2173                 pending->rf = NULL;
2174                 return false; /* incomplete */
2175         }
2176
2177         if (rf->is_release())
2178                 return true; /* complete */
2179
2180         /* else relaxed write
2181          * - check for fence-release in the same thread (29.8, stmt. 3)
2182          * - check modification order for contiguous subsequence
2183          *   -> rf must be same thread as release */
2184
2185         const ModelAction *fence_release = rf->get_last_fence_release();
2186         /* Synchronize with a fence-release unconditionally; we don't need to
2187          * find any more "contiguous subsequence..." for it */
2188         if (fence_release)
2189                 release_heads->push_back(fence_release);
2190
2191         int tid = id_to_int(rf->get_tid());
2192         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2193         action_list_t *list = &(*thrd_lists)[tid];
2194         action_list_t::const_reverse_iterator rit;
2195
2196         /* Find rf in the thread list */
2197         rit = std::find(list->rbegin(), list->rend(), rf);
2198         ASSERT(rit != list->rend());
2199
2200         /* Find the last {write,fence}-release */
2201         for (; rit != list->rend(); rit++) {
2202                 if (fence_release && *(*rit) < *fence_release)
2203                         break;
2204                 if ((*rit)->is_release())
2205                         break;
2206         }
2207         if (rit == list->rend()) {
2208                 /* No write-release in this thread */
2209                 return true; /* complete */
2210         } else if (fence_release && *(*rit) < *fence_release) {
2211                 /* The fence-release is more recent (and so, "stronger") than
2212                  * the most recent write-release */
2213                 return true; /* complete */
2214         } /* else, need to establish contiguous release sequence */
2215         ModelAction *release = *rit;
2216
2217         ASSERT(rf->same_thread(release));
2218
2219         pending->writes.clear();
2220
2221         bool certain = true;
2222         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2223                 if (id_to_int(rf->get_tid()) == (int)i)
2224                         continue;
2225                 list = &(*thrd_lists)[i];
2226
2227                 /* Can we ensure no future writes from this thread may break
2228                  * the release seq? */
2229                 bool future_ordered = false;
2230
2231                 ModelAction *last = get_last_action(int_to_id(i));
2232                 Thread *th = get_thread(int_to_id(i));
2233                 if ((last && rf->happens_before(last)) ||
2234                                 !is_enabled(th) ||
2235                                 th->is_complete())
2236                         future_ordered = true;
2237
2238                 ASSERT(!th->is_model_thread() || future_ordered);
2239
2240                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2241                         const ModelAction *act = *rit;
2242                         /* Reach synchronization -> this thread is complete */
2243                         if (act->happens_before(release))
2244                                 break;
2245                         if (rf->happens_before(act)) {
2246                                 future_ordered = true;
2247                                 continue;
2248                         }
2249
2250                         /* Only non-RMW writes can break release sequences */
2251                         if (!act->is_write() || act->is_rmw())
2252                                 continue;
2253
2254                         /* Check modification order */
2255                         if (mo_graph->checkReachable(rf, act)) {
2256                                 /* rf --mo--> act */
2257                                 future_ordered = true;
2258                                 continue;
2259                         }
2260                         if (mo_graph->checkReachable(act, release))
2261                                 /* act --mo--> release */
2262                                 break;
2263                         if (mo_graph->checkReachable(release, act) &&
2264                                       mo_graph->checkReachable(act, rf)) {
2265                                 /* release --mo-> act --mo--> rf */
2266                                 return true; /* complete */
2267                         }
2268                         /* act may break release sequence */
2269                         pending->writes.push_back(act);
2270                         certain = false;
2271                 }
2272                 if (!future_ordered)
2273                         certain = false; /* This thread is uncertain */
2274         }
2275
2276         if (certain) {
2277                 release_heads->push_back(release);
2278                 pending->writes.clear();
2279         } else {
2280                 pending->release = release;
2281                 pending->rf = rf;
2282         }
2283         return certain;
2284 }
2285
2286 /**
2287  * An interface for getting the release sequence head(s) with which a
2288  * given ModelAction must synchronize. This function only returns a non-empty
2289  * result when it can locate a release sequence head with certainty. Otherwise,
2290  * it may mark the internal state of the ModelChecker so that it will handle
2291  * the release sequence at a later time, causing @a acquire to update its
2292  * synchronization at some later point in execution.
2293  *
2294  * @param acquire The 'acquire' action that may synchronize with a release
2295  * sequence
2296  * @param read The read action that may read from a release sequence; this may
2297  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2298  * when 'acquire' is a fence-acquire)
2299  * @param release_heads A pass-by-reference return parameter. Will be filled
2300  * with the head(s) of the release sequence(s), if they exists with certainty.
2301  * @see ModelChecker::release_seq_heads
2302  */
2303 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2304                 ModelAction *read, rel_heads_list_t *release_heads)
2305 {
2306         const ModelAction *rf = read->get_reads_from();
2307         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2308         sequence->acquire = acquire;
2309         sequence->read = read;
2310
2311         if (!release_seq_heads(rf, release_heads, sequence)) {
2312                 /* add act to 'lazy checking' list */
2313                 pending_rel_seqs->push_back(sequence);
2314         } else {
2315                 snapshot_free(sequence);
2316         }
2317 }
2318
2319 /**
2320  * Attempt to resolve all stashed operations that might synchronize with a
2321  * release sequence for a given location. This implements the "lazy" portion of
2322  * determining whether or not a release sequence was contiguous, since not all
2323  * modification order information is present at the time an action occurs.
2324  *
2325  * @param location The location/object that should be checked for release
2326  * sequence resolutions. A NULL value means to check all locations.
2327  * @param work_queue The work queue to which to add work items as they are
2328  * generated
2329  * @return True if any updates occurred (new synchronization, new mo_graph
2330  * edges)
2331  */
2332 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2333 {
2334         bool updated = false;
2335         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2336         while (it != pending_rel_seqs->end()) {
2337                 struct release_seq *pending = *it;
2338                 ModelAction *acquire = pending->acquire;
2339                 const ModelAction *read = pending->read;
2340
2341                 /* Only resolve sequences on the given location, if provided */
2342                 if (location && read->get_location() != location) {
2343                         it++;
2344                         continue;
2345                 }
2346
2347                 const ModelAction *rf = read->get_reads_from();
2348                 rel_heads_list_t release_heads;
2349                 bool complete;
2350                 complete = release_seq_heads(rf, &release_heads, pending);
2351                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2352                         if (!acquire->has_synchronized_with(release_heads[i])) {
2353                                 if (acquire->synchronize_with(release_heads[i]))
2354                                         updated = true;
2355                                 else
2356                                         set_bad_synchronization();
2357                         }
2358                 }
2359
2360                 if (updated) {
2361                         /* Re-check all pending release sequences */
2362                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2363                         /* Re-check read-acquire for mo_graph edges */
2364                         if (acquire->is_read())
2365                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2366
2367                         /* propagate synchronization to later actions */
2368                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2369                         for (; (*rit) != acquire; rit++) {
2370                                 ModelAction *propagate = *rit;
2371                                 if (acquire->happens_before(propagate)) {
2372                                         propagate->synchronize_with(acquire);
2373                                         /* Re-check 'propagate' for mo_graph edges */
2374                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2375                                 }
2376                         }
2377                 }
2378                 if (complete) {
2379                         it = pending_rel_seqs->erase(it);
2380                         snapshot_free(pending);
2381                 } else {
2382                         it++;
2383                 }
2384         }
2385
2386         // If we resolved promises or data races, see if we have realized a data race.
2387         checkDataRaces();
2388
2389         return updated;
2390 }
2391
2392 /**
2393  * Performs various bookkeeping operations for the current ModelAction. For
2394  * instance, adds action to the per-object, per-thread action vector and to the
2395  * action trace list of all thread actions.
2396  *
2397  * @param act is the ModelAction to add.
2398  */
2399 void ModelChecker::add_action_to_lists(ModelAction *act)
2400 {
2401         int tid = id_to_int(act->get_tid());
2402         ModelAction *uninit = NULL;
2403         int uninit_id = -1;
2404         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2405         if (list->empty() && act->is_atomic_var()) {
2406                 uninit = get_uninitialized_action(act);
2407                 uninit_id = id_to_int(uninit->get_tid());
2408                 list->push_front(uninit);
2409         }
2410         list->push_back(act);
2411
2412         action_trace->push_back(act);
2413         if (uninit)
2414                 action_trace->push_front(uninit);
2415
2416         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2417         if (tid >= (int)vec->size())
2418                 vec->resize(priv->next_thread_id);
2419         (*vec)[tid].push_back(act);
2420         if (uninit)
2421                 (*vec)[uninit_id].push_front(uninit);
2422
2423         if ((int)thrd_last_action->size() <= tid)
2424                 thrd_last_action->resize(get_num_threads());
2425         (*thrd_last_action)[tid] = act;
2426         if (uninit)
2427                 (*thrd_last_action)[uninit_id] = uninit;
2428
2429         if (act->is_fence() && act->is_release()) {
2430                 if ((int)thrd_last_fence_release->size() <= tid)
2431                         thrd_last_fence_release->resize(get_num_threads());
2432                 (*thrd_last_fence_release)[tid] = act;
2433         }
2434
2435         if (act->is_wait()) {
2436                 void *mutex_loc = (void *) act->get_value();
2437                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2438
2439                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2440                 if (tid >= (int)vec->size())
2441                         vec->resize(priv->next_thread_id);
2442                 (*vec)[tid].push_back(act);
2443         }
2444 }
2445
2446 /**
2447  * @brief Get the last action performed by a particular Thread
2448  * @param tid The thread ID of the Thread in question
2449  * @return The last action in the thread
2450  */
2451 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2452 {
2453         int threadid = id_to_int(tid);
2454         if (threadid < (int)thrd_last_action->size())
2455                 return (*thrd_last_action)[id_to_int(tid)];
2456         else
2457                 return NULL;
2458 }
2459
2460 /**
2461  * @brief Get the last fence release performed by a particular Thread
2462  * @param tid The thread ID of the Thread in question
2463  * @return The last fence release in the thread, if one exists; NULL otherwise
2464  */
2465 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2466 {
2467         int threadid = id_to_int(tid);
2468         if (threadid < (int)thrd_last_fence_release->size())
2469                 return (*thrd_last_fence_release)[id_to_int(tid)];
2470         else
2471                 return NULL;
2472 }
2473
2474 /**
2475  * Gets the last memory_order_seq_cst write (in the total global sequence)
2476  * performed on a particular object (i.e., memory location), not including the
2477  * current action.
2478  * @param curr The current ModelAction; also denotes the object location to
2479  * check
2480  * @return The last seq_cst write
2481  */
2482 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2483 {
2484         void *location = curr->get_location();
2485         action_list_t *list = get_safe_ptr_action(obj_map, location);
2486         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2487         action_list_t::reverse_iterator rit;
2488         for (rit = list->rbegin(); (*rit) != curr; rit++)
2489                 ;
2490         rit++; /* Skip past curr */
2491         for ( ; rit != list->rend(); rit++)
2492                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2493                         return *rit;
2494         return NULL;
2495 }
2496
2497 /**
2498  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2499  * performed in a particular thread, prior to a particular fence.
2500  * @param tid The ID of the thread to check
2501  * @param before_fence The fence from which to begin the search; if NULL, then
2502  * search for the most recent fence in the thread.
2503  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2504  */
2505 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2506 {
2507         /* All fences should have NULL location */
2508         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2509         action_list_t::reverse_iterator rit = list->rbegin();
2510
2511         if (before_fence) {
2512                 for (; rit != list->rend(); rit++)
2513                         if (*rit == before_fence)
2514                                 break;
2515
2516                 ASSERT(*rit == before_fence);
2517                 rit++;
2518         }
2519
2520         for (; rit != list->rend(); rit++)
2521                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2522                         return *rit;
2523         return NULL;
2524 }
2525
2526 /**
2527  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2528  * location). This function identifies the mutex according to the current
2529  * action, which is presumed to perform on the same mutex.
2530  * @param curr The current ModelAction; also denotes the object location to
2531  * check
2532  * @return The last unlock operation
2533  */
2534 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2535 {
2536         void *location = curr->get_location();
2537         action_list_t *list = get_safe_ptr_action(obj_map, location);
2538         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2539         action_list_t::reverse_iterator rit;
2540         for (rit = list->rbegin(); rit != list->rend(); rit++)
2541                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2542                         return *rit;
2543         return NULL;
2544 }
2545
2546 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2547 {
2548         ModelAction *parent = get_last_action(tid);
2549         if (!parent)
2550                 parent = get_thread(tid)->get_creation();
2551         return parent;
2552 }
2553
2554 /**
2555  * Returns the clock vector for a given thread.
2556  * @param tid The thread whose clock vector we want
2557  * @return Desired clock vector
2558  */
2559 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2560 {
2561         return get_parent_action(tid)->get_cv();
2562 }
2563
2564 /**
2565  * @brief Find the promise (if any) to resolve for the current action and
2566  * remove it from the pending promise vector
2567  * @param curr The current ModelAction. Should be a write.
2568  * @return The Promise to resolve, if any; otherwise NULL
2569  */
2570 Promise * ModelChecker::pop_promise_to_resolve(const ModelAction *curr)
2571 {
2572         for (unsigned int i = 0; i < promises->size(); i++)
2573                 if (curr->get_node()->get_promise(i)) {
2574                         Promise *ret = (*promises)[i];
2575                         promises->erase(promises->begin() + i);
2576                         return ret;
2577                 }
2578         return NULL;
2579 }
2580
2581 /**
2582  * Resolve a Promise with a current write.
2583  * @param write The ModelAction that is fulfilling Promises
2584  * @param promise The Promise to resolve
2585  * @return True if the Promise was successfully resolved; false otherwise
2586  */
2587 bool ModelChecker::resolve_promise(ModelAction *write, Promise *promise)
2588 {
2589         ModelVector<ModelAction *> actions_to_check;
2590
2591         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2592                 ModelAction *read = promise->get_reader(i);
2593                 read_from(read, write);
2594                 actions_to_check.push_back(read);
2595         }
2596         /* Make sure the promise's value matches the write's value */
2597         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2598         if (!mo_graph->resolvePromise(promise, write))
2599                 priv->failed_promise = true;
2600
2601         /**
2602          * @todo  It is possible to end up in an inconsistent state, where a
2603          * "resolved" promise may still be referenced if
2604          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2605          *
2606          * Note that the inconsistency only matters when dumping mo_graph to
2607          * file.
2608          *
2609          * delete promise;
2610          */
2611
2612         //Check whether reading these writes has made threads unable to
2613         //resolve promises
2614         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2615                 ModelAction *read = actions_to_check[i];
2616                 mo_check_promises(read, true);
2617         }
2618
2619         return true;
2620 }
2621
2622 /**
2623  * Compute the set of promises that could potentially be satisfied by this
2624  * action. Note that the set computation actually appears in the Node, not in
2625  * ModelChecker.
2626  * @param curr The ModelAction that may satisfy promises
2627  */
2628 void ModelChecker::compute_promises(ModelAction *curr)
2629 {
2630         for (unsigned int i = 0; i < promises->size(); i++) {
2631                 Promise *promise = (*promises)[i];
2632                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2633                         continue;
2634
2635                 bool satisfy = true;
2636                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2637                         const ModelAction *act = promise->get_reader(j);
2638                         if (act->happens_before(curr) ||
2639                                         act->could_synchronize_with(curr)) {
2640                                 satisfy = false;
2641                                 break;
2642                         }
2643                 }
2644                 if (satisfy)
2645                         curr->get_node()->set_promise(i);
2646         }
2647 }
2648
2649 /** Checks promises in response to change in ClockVector Threads. */
2650 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2651 {
2652         for (unsigned int i = 0; i < promises->size(); i++) {
2653                 Promise *promise = (*promises)[i];
2654                 if (!promise->thread_is_available(tid))
2655                         continue;
2656                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2657                         const ModelAction *act = promise->get_reader(j);
2658                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2659                                         merge_cv->synchronized_since(act)) {
2660                                 if (promise->eliminate_thread(tid)) {
2661                                         /* Promise has failed */
2662                                         priv->failed_promise = true;
2663                                         return;
2664                                 }
2665                         }
2666                 }
2667         }
2668 }
2669
2670 void ModelChecker::check_promises_thread_disabled()
2671 {
2672         for (unsigned int i = 0; i < promises->size(); i++) {
2673                 Promise *promise = (*promises)[i];
2674                 if (promise->has_failed()) {
2675                         priv->failed_promise = true;
2676                         return;
2677                 }
2678         }
2679 }
2680
2681 /**
2682  * @brief Checks promises in response to addition to modification order for
2683  * threads.
2684  *
2685  * We test whether threads are still available for satisfying promises after an
2686  * addition to our modification order constraints. Those that are unavailable
2687  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2688  * that promise has failed.
2689  *
2690  * @param act The ModelAction which updated the modification order
2691  * @param is_read_check Should be true if act is a read and we must check for
2692  * updates to the store from which it read (there is a distinction here for
2693  * RMW's, which are both a load and a store)
2694  */
2695 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2696 {
2697         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2698
2699         for (unsigned int i = 0; i < promises->size(); i++) {
2700                 Promise *promise = (*promises)[i];
2701
2702                 // Is this promise on the same location?
2703                 if (!promise->same_location(write))
2704                         continue;
2705
2706                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2707                         const ModelAction *pread = promise->get_reader(j);
2708                         if (!pread->happens_before(act))
2709                                continue;
2710                         if (mo_graph->checkPromise(write, promise)) {
2711                                 priv->failed_promise = true;
2712                                 return;
2713                         }
2714                         break;
2715                 }
2716
2717                 // Don't do any lookups twice for the same thread
2718                 if (!promise->thread_is_available(act->get_tid()))
2719                         continue;
2720
2721                 if (mo_graph->checkReachable(promise, write)) {
2722                         if (mo_graph->checkPromise(write, promise)) {
2723                                 priv->failed_promise = true;
2724                                 return;
2725                         }
2726                 }
2727         }
2728 }
2729
2730 /**
2731  * Compute the set of writes that may break the current pending release
2732  * sequence. This information is extracted from previou release sequence
2733  * calculations.
2734  *
2735  * @param curr The current ModelAction. Must be a release sequence fixup
2736  * action.
2737  */
2738 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2739 {
2740         if (pending_rel_seqs->empty())
2741                 return;
2742
2743         struct release_seq *pending = pending_rel_seqs->back();
2744         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2745                 const ModelAction *write = pending->writes[i];
2746                 curr->get_node()->add_relseq_break(write);
2747         }
2748
2749         /* NULL means don't break the sequence; just synchronize */
2750         curr->get_node()->add_relseq_break(NULL);
2751 }
2752
2753 /**
2754  * Build up an initial set of all past writes that this 'read' action may read
2755  * from, as well as any previously-observed future values that must still be valid.
2756  *
2757  * @param curr is the current ModelAction that we are exploring; it must be a
2758  * 'read' operation.
2759  */
2760 void ModelChecker::build_may_read_from(ModelAction *curr)
2761 {
2762         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2763         unsigned int i;
2764         ASSERT(curr->is_read());
2765
2766         ModelAction *last_sc_write = NULL;
2767
2768         if (curr->is_seqcst())
2769                 last_sc_write = get_last_seq_cst_write(curr);
2770
2771         /* Iterate over all threads */
2772         for (i = 0; i < thrd_lists->size(); i++) {
2773                 /* Iterate over actions in thread, starting from most recent */
2774                 action_list_t *list = &(*thrd_lists)[i];
2775                 action_list_t::reverse_iterator rit;
2776                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2777                         ModelAction *act = *rit;
2778
2779                         /* Only consider 'write' actions */
2780                         if (!act->is_write() || act == curr)
2781                                 continue;
2782
2783                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2784                         bool allow_read = true;
2785
2786                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2787                                 allow_read = false;
2788                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2789                                 allow_read = false;
2790
2791                         if (allow_read) {
2792                                 /* Only add feasible reads */
2793                                 mo_graph->startChanges();
2794                                 r_modification_order(curr, act);
2795                                 if (!is_infeasible())
2796                                         curr->get_node()->add_read_from_past(act);
2797                                 mo_graph->rollbackChanges();
2798                         }
2799
2800                         /* Include at most one act per-thread that "happens before" curr */
2801                         if (act->happens_before(curr))
2802                                 break;
2803                 }
2804         }
2805
2806         /* Inherit existing, promised future values */
2807         for (i = 0; i < promises->size(); i++) {
2808                 const Promise *promise = (*promises)[i];
2809                 const ModelAction *promise_read = promise->get_reader(0);
2810                 if (promise_read->same_var(curr)) {
2811                         /* Only add feasible future-values */
2812                         mo_graph->startChanges();
2813                         r_modification_order(curr, promise);
2814                         if (!is_infeasible())
2815                                 curr->get_node()->add_read_from_promise(promise_read);
2816                         mo_graph->rollbackChanges();
2817                 }
2818         }
2819
2820         /* We may find no valid may-read-from only if the execution is doomed */
2821         if (!curr->get_node()->read_from_size()) {
2822                 priv->no_valid_reads = true;
2823                 set_assert();
2824         }
2825
2826         if (DBG_ENABLED()) {
2827                 model_print("Reached read action:\n");
2828                 curr->print();
2829                 model_print("Printing read_from_past\n");
2830                 curr->get_node()->print_read_from_past();
2831                 model_print("End printing read_from_past\n");
2832         }
2833 }
2834
2835 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2836 {
2837         for ( ; write != NULL; write = write->get_reads_from()) {
2838                 /* UNINIT actions don't have a Node, and they never sleep */
2839                 if (write->is_uninitialized())
2840                         return true;
2841                 Node *prevnode = write->get_node()->get_parent();
2842
2843                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2844                 if (write->is_release() && thread_sleep)
2845                         return true;
2846                 if (!write->is_rmw())
2847                         return false;
2848         }
2849         return true;
2850 }
2851
2852 /**
2853  * @brief Get an action representing an uninitialized atomic
2854  *
2855  * This function may create a new one or try to retrieve one from the NodeStack
2856  *
2857  * @param curr The current action, which prompts the creation of an UNINIT action
2858  * @return A pointer to the UNINIT ModelAction
2859  */
2860 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2861 {
2862         Node *node = curr->get_node();
2863         ModelAction *act = node->get_uninit_action();
2864         if (!act) {
2865                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2866                 node->set_uninit_action(act);
2867         }
2868         act->create_cv(NULL);
2869         return act;
2870 }
2871
2872 static void print_list(action_list_t *list)
2873 {
2874         action_list_t::iterator it;
2875
2876         model_print("---------------------------------------------------------------------\n");
2877
2878         unsigned int hash = 0;
2879
2880         for (it = list->begin(); it != list->end(); it++) {
2881                 const ModelAction *act = *it;
2882                 if (act->get_seq_number() > 0)
2883                         act->print();
2884                 hash = hash^(hash<<3)^((*it)->hash());
2885         }
2886         model_print("HASH %u\n", hash);
2887         model_print("---------------------------------------------------------------------\n");
2888 }
2889
2890 #if SUPPORT_MOD_ORDER_DUMP
2891 void ModelChecker::dumpGraph(char *filename) const
2892 {
2893         char buffer[200];
2894         sprintf(buffer, "%s.dot", filename);
2895         FILE *file = fopen(buffer, "w");
2896         fprintf(file, "digraph %s {\n", filename);
2897         mo_graph->dumpNodes(file);
2898         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2899
2900         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2901                 ModelAction *act = *it;
2902                 if (act->is_read()) {
2903                         mo_graph->dot_print_node(file, act);
2904                         if (act->get_reads_from())
2905                                 mo_graph->dot_print_edge(file,
2906                                                 act->get_reads_from(),
2907                                                 act,
2908                                                 "label=\"rf\", color=red, weight=2");
2909                         else
2910                                 mo_graph->dot_print_edge(file,
2911                                                 act->get_reads_from_promise(),
2912                                                 act,
2913                                                 "label=\"rf\", color=red");
2914                 }
2915                 if (thread_array[act->get_tid()]) {
2916                         mo_graph->dot_print_edge(file,
2917                                         thread_array[id_to_int(act->get_tid())],
2918                                         act,
2919                                         "label=\"sb\", color=blue, weight=400");
2920                 }
2921
2922                 thread_array[act->get_tid()] = act;
2923         }
2924         fprintf(file, "}\n");
2925         model_free(thread_array);
2926         fclose(file);
2927 }
2928 #endif
2929
2930 /** @brief Prints an execution trace summary. */
2931 void ModelChecker::print_summary() const
2932 {
2933 #if SUPPORT_MOD_ORDER_DUMP
2934         char buffername[100];
2935         sprintf(buffername, "exec%04u", stats.num_total);
2936         mo_graph->dumpGraphToFile(buffername);
2937         sprintf(buffername, "graph%04u", stats.num_total);
2938         dumpGraph(buffername);
2939 #endif
2940
2941         model_print("Execution %d:", stats.num_total);
2942         if (isfeasibleprefix()) {
2943                 if (scheduler->all_threads_sleeping())
2944                         model_print(" SLEEP-SET REDUNDANT");
2945                 model_print("\n");
2946         } else
2947                 print_infeasibility(" INFEASIBLE");
2948         print_list(action_trace);
2949         model_print("\n");
2950         if (!promises->empty()) {
2951                 model_print("Pending promises:\n");
2952                 for (unsigned int i = 0; i < promises->size(); i++) {
2953                         model_print(" [P%u] ", i);
2954                         (*promises)[i]->print();
2955                 }
2956                 model_print("\n");
2957         }
2958 }
2959
2960 /**
2961  * Add a Thread to the system for the first time. Should only be called once
2962  * per thread.
2963  * @param t The Thread to add
2964  */
2965 void ModelChecker::add_thread(Thread *t)
2966 {
2967         thread_map->put(id_to_int(t->get_id()), t);
2968         scheduler->add_thread(t);
2969 }
2970
2971 /**
2972  * @brief Get a Thread reference by its ID
2973  * @param tid The Thread's ID
2974  * @return A Thread reference
2975  */
2976 Thread * ModelChecker::get_thread(thread_id_t tid) const
2977 {
2978         return thread_map->get(id_to_int(tid));
2979 }
2980
2981 /**
2982  * @brief Get a reference to the Thread in which a ModelAction was executed
2983  * @param act The ModelAction
2984  * @return A Thread reference
2985  */
2986 Thread * ModelChecker::get_thread(const ModelAction *act) const
2987 {
2988         return get_thread(act->get_tid());
2989 }
2990
2991 /**
2992  * @brief Get a Promise's "promise number"
2993  *
2994  * A "promise number" is an index number that is unique to a promise, valid
2995  * only for a specific snapshot of an execution trace. Promises may come and go
2996  * as they are generated an resolved, so an index only retains meaning for the
2997  * current snapshot.
2998  *
2999  * @param promise The Promise to check
3000  * @return The promise index, if the promise still is valid; otherwise -1
3001  */
3002 int ModelChecker::get_promise_number(const Promise *promise) const
3003 {
3004         for (unsigned int i = 0; i < promises->size(); i++)
3005                 if ((*promises)[i] == promise)
3006                         return i;
3007         /* Not found */
3008         return -1;
3009 }
3010
3011 /**
3012  * @brief Check if a Thread is currently enabled
3013  * @param t The Thread to check
3014  * @return True if the Thread is currently enabled
3015  */
3016 bool ModelChecker::is_enabled(Thread *t) const
3017 {
3018         return scheduler->is_enabled(t);
3019 }
3020
3021 /**
3022  * @brief Check if a Thread is currently enabled
3023  * @param tid The ID of the Thread to check
3024  * @return True if the Thread is currently enabled
3025  */
3026 bool ModelChecker::is_enabled(thread_id_t tid) const
3027 {
3028         return scheduler->is_enabled(tid);
3029 }
3030
3031 /**
3032  * Switch from a model-checker context to a user-thread context. This is the
3033  * complement of ModelChecker::switch_to_master and must be called from the
3034  * model-checker context
3035  *
3036  * @param thread The user-thread to switch to
3037  */
3038 void ModelChecker::switch_from_master(Thread *thread)
3039 {
3040         scheduler->set_current_thread(thread);
3041         Thread::swap(&system_context, thread);
3042 }
3043
3044 /**
3045  * Switch from a user-context to the "master thread" context (a.k.a. system
3046  * context). This switch is made with the intention of exploring a particular
3047  * model-checking action (described by a ModelAction object). Must be called
3048  * from a user-thread context.
3049  *
3050  * @param act The current action that will be explored. May be NULL only if
3051  * trace is exiting via an assertion (see ModelChecker::set_assert and
3052  * ModelChecker::has_asserted).
3053  * @return Return the value returned by the current action
3054  */
3055 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3056 {
3057         DBG();
3058         Thread *old = thread_current();
3059         scheduler->set_current_thread(NULL);
3060         ASSERT(!old->get_pending());
3061         old->set_pending(act);
3062         if (Thread::swap(old, &system_context) < 0) {
3063                 perror("swap threads");
3064                 exit(EXIT_FAILURE);
3065         }
3066         return old->get_return_value();
3067 }
3068
3069 /**
3070  * Takes the next step in the execution, if possible.
3071  * @param curr The current step to take
3072  * @return Returns the next Thread to run, if any; NULL if this execution
3073  * should terminate
3074  */
3075 Thread * ModelChecker::take_step(ModelAction *curr)
3076 {
3077         Thread *curr_thrd = get_thread(curr);
3078         ASSERT(curr_thrd->get_state() == THREAD_READY);
3079
3080         curr = check_current_action(curr);
3081
3082         /* Infeasible -> don't take any more steps */
3083         if (is_infeasible())
3084                 return NULL;
3085         else if (isfeasibleprefix() && have_bug_reports()) {
3086                 set_assert();
3087                 return NULL;
3088         }
3089
3090         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3091                 return NULL;
3092
3093         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3094                 scheduler->remove_thread(curr_thrd);
3095
3096         Thread *next_thrd = NULL;
3097         if (curr)
3098                 next_thrd = action_select_next_thread(curr);
3099         if (!next_thrd)
3100                 next_thrd = get_next_thread();
3101
3102         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
3103                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
3104
3105         return next_thrd;
3106 }
3107
3108 /** Wrapper to run the user's main function, with appropriate arguments */
3109 void user_main_wrapper(void *)
3110 {
3111         user_main(model->params.argc, model->params.argv);
3112 }
3113
3114 /** @brief Run ModelChecker for the user program */
3115 void ModelChecker::run()
3116 {
3117         do {
3118                 thrd_t user_thread;
3119                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3120                 add_thread(t);
3121
3122                 do {
3123                         /*
3124                          * Stash next pending action(s) for thread(s). There
3125                          * should only need to stash one thread's action--the
3126                          * thread which just took a step--plus the first step
3127                          * for any newly-created thread
3128                          */
3129                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3130                                 thread_id_t tid = int_to_id(i);
3131                                 Thread *thr = get_thread(tid);
3132                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3133                                         switch_from_master(thr);
3134                                         if (thr->is_waiting_on(thr))
3135                                                 assert_bug("Deadlock detected");
3136                                 }
3137                         }
3138
3139                         /* Catch assertions from prior take_step or from
3140                          * between-ModelAction bugs (e.g., data races) */
3141                         if (has_asserted())
3142                                 break;
3143
3144                         /* Consume the next action for a Thread */
3145                         ModelAction *curr = t->get_pending();
3146                         t->set_pending(NULL);
3147                         t = take_step(curr);
3148                 } while (t && !t->is_model_thread());
3149
3150                 /*
3151                  * Launch end-of-execution release sequence fixups only when
3152                  * the execution is otherwise feasible AND there are:
3153                  *
3154                  * (1) pending release sequences
3155                  * (2) pending assertions that could be invalidated by a change
3156                  * in clock vectors (i.e., data races)
3157                  * (3) no pending promises
3158                  */
3159                 while (!pending_rel_seqs->empty() &&
3160                                 is_feasible_prefix_ignore_relseq() &&
3161                                 !unrealizedraces.empty()) {
3162                         model_print("*** WARNING: release sequence fixup action "
3163                                         "(%zu pending release seuqence(s)) ***\n",
3164                                         pending_rel_seqs->size());
3165                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3166                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3167                                         model_thread);
3168                         take_step(fixup);
3169                 };
3170         } while (next_execution());
3171
3172         model_print("******* Model-checking complete: *******\n");
3173         print_stats();
3174 }