remove plugins
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "execution.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
19
20 #define INITIAL_THREAD_ID       0
21
22 /**
23  * Structure for holding small ModelChecker members that should be snapshotted
24  */
25 struct model_snapshot_members {
26         model_snapshot_members() :
27                 /* First thread created will have id INITIAL_THREAD_ID */
28                 next_thread_id(INITIAL_THREAD_ID),
29                 used_sequence_numbers(0),
30                 next_backtrack(NULL),
31                 bugs(),
32                 failed_promise(false),
33                 hard_failed_promise(false),
34                 too_many_reads(false),
35                 no_valid_reads(false),
36                 bad_synchronization(false),
37                 bad_sc_read(false),
38                 asserted(false)
39         { }
40
41         ~model_snapshot_members() {
42                 for (unsigned int i = 0; i < bugs.size(); i++)
43                         delete bugs[i];
44                 bugs.clear();
45         }
46
47         unsigned int next_thread_id;
48         modelclock_t used_sequence_numbers;
49         ModelAction *next_backtrack;
50         SnapVector<bug_message *> bugs;
51         bool failed_promise;
52         bool hard_failed_promise;
53         bool too_many_reads;
54         bool no_valid_reads;
55         /** @brief Incorrectly-ordered synchronization was made */
56         bool bad_synchronization;
57         bool bad_sc_read;
58         bool asserted;
59
60         SNAPSHOTALLOC
61 };
62
63 /** @brief Constructor */
64 ModelExecution::ModelExecution(ModelChecker *m,
65                 const struct model_params *params,
66                 Scheduler *scheduler,
67                 NodeStack *node_stack) :
68         model(m),
69         params(params),
70         scheduler(scheduler),
71         action_trace(),
72         thread_map(2), /* We'll always need at least 2 threads */
73         obj_map(),
74         condvar_waiters_map(),
75         obj_thrd_map(),
76         promises(),
77         futurevalues(),
78         pending_rel_seqs(),
79         thrd_last_action(1),
80         thrd_last_fence_release(),
81         node_stack(node_stack),
82         priv(new struct model_snapshot_members()),
83         mo_graph(new CycleGraph())
84 {
85         /* Initialize a model-checker thread, for special ModelActions */
86         model_thread = new Thread(get_next_id());       // L: Create model thread
87         add_thread(model_thread);                       // L: Add model thread to scheduler
88         scheduler->register_engine(this);
89         node_stack->register_engine(this);
90 }
91
92 /** @brief Destructor */
93 ModelExecution::~ModelExecution()
94 {
95         for (unsigned int i = 0; i < get_num_threads(); i++)
96                 delete get_thread(int_to_id(i));
97
98         for (unsigned int i = 0; i < promises.size(); i++)
99                 delete promises[i];
100
101         delete mo_graph;
102         delete priv;
103 }
104
105 int ModelExecution::get_execution_number() const
106 {
107         return model->get_execution_number();
108 }
109
110 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
111 {
112         action_list_t *tmp = hash->get(ptr);
113         if (tmp == NULL) {
114                 tmp = new action_list_t();
115                 hash->put(ptr, tmp);
116         }
117         return tmp;
118 }
119
120 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
121 {
122         SnapVector<action_list_t> *tmp = hash->get(ptr);
123         if (tmp == NULL) {
124                 tmp = new SnapVector<action_list_t>();
125                 hash->put(ptr, tmp);
126         }
127         return tmp;
128 }
129
130 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
131 {
132         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
133         if (wrv==NULL)
134                 return NULL;
135         unsigned int thread=id_to_int(tid);
136         if (thread < wrv->size())
137                 return &(*wrv)[thread];
138         else
139                 return NULL;
140 }
141
142 /** @return a thread ID for a new Thread */
143 thread_id_t ModelExecution::get_next_id()
144 {
145         return priv->next_thread_id++;
146 }
147
148 /** @return the number of user threads created during this execution */
149 unsigned int ModelExecution::get_num_threads() const
150 {
151         return priv->next_thread_id;
152 }
153
154 /** @return a sequence number for a new ModelAction */
155 modelclock_t ModelExecution::get_next_seq_num()
156 {
157         return ++priv->used_sequence_numbers;
158 }
159
160 /**
161  * @brief Should the current action wake up a given thread?
162  *
163  * @param curr The current action
164  * @param thread The thread that we might wake up
165  * @return True, if we should wake up the sleeping thread; false otherwise
166  */
167 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
168 {
169         const ModelAction *asleep = thread->get_pending();
170         /* Don't allow partial RMW to wake anyone up */
171         if (curr->is_rmwr())
172                 return false;
173         /* Synchronizing actions may have been backtracked */
174         if (asleep->could_synchronize_with(curr))
175                 return true;
176         /* All acquire/release fences and fence-acquire/store-release */
177         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
178                 return true;
179         /* Fence-release + store can awake load-acquire on the same location */
180         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
181                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
182                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
183                         return true;
184         }
185         return false;
186 }
187
188 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
189 {
190         for (unsigned int i = 0; i < get_num_threads(); i++) {
191                 Thread *thr = get_thread(int_to_id(i));
192                 if (scheduler->is_sleep_set(thr)) {
193                         if (should_wake_up(curr, thr))
194                                 /* Remove this thread from sleep set */
195                                 scheduler->remove_sleep(thr);
196                 }
197         }
198 }
199
200 /** @brief Alert the model-checker that an incorrectly-ordered
201  * synchronization was made */
202 void ModelExecution::set_bad_synchronization()
203 {
204         priv->bad_synchronization = true;
205 }
206
207 /** @brief Alert the model-checker that an incorrectly-ordered
208  * synchronization was made */
209 void ModelExecution::set_bad_sc_read()
210 {
211         priv->bad_sc_read = true;
212 }
213
214 bool ModelExecution::assert_bug(const char *msg)
215 {
216         priv->bugs.push_back(new bug_message(msg));
217
218         if (isfeasibleprefix()) {
219                 set_assert();
220                 return true;
221         }
222         return false;
223 }
224
225 /** @return True, if any bugs have been reported for this execution */
226 bool ModelExecution::have_bug_reports() const
227 {
228         return priv->bugs.size() != 0;
229 }
230
231 SnapVector<bug_message *> * ModelExecution::get_bugs() const
232 {
233         return &priv->bugs;
234 }
235
236 /**
237  * Check whether the current trace has triggered an assertion which should halt
238  * its execution.
239  *
240  * @return True, if the execution should be aborted; false otherwise
241  */
242 bool ModelExecution::has_asserted() const
243 {
244         return priv->asserted;
245 }
246
247 /**
248  * Trigger a trace assertion which should cause this execution to be halted.
249  * This can be due to a detected bug or due to an infeasibility that should
250  * halt ASAP.
251  */
252 void ModelExecution::set_assert()
253 {
254         priv->asserted = true;
255 }
256
257 /**
258  * Check if we are in a deadlock. Should only be called at the end of an
259  * execution, although it should not give false positives in the middle of an
260  * execution (there should be some ENABLED thread).
261  *
262  * @return True if program is in a deadlock; false otherwise
263  */
264 bool ModelExecution::is_deadlocked() const
265 {
266         bool blocking_threads = false;
267         for (unsigned int i = 0; i < get_num_threads(); i++) {
268                 thread_id_t tid = int_to_id(i);
269                 if (is_enabled(tid))
270                         return false;
271                 Thread *t = get_thread(tid);
272                 if (!t->is_model_thread() && t->get_pending())
273                         blocking_threads = true;
274         }
275         return blocking_threads;
276 }
277
278 /**
279  * @brief Check if we are yield-blocked
280  *
281  * A program can be "yield-blocked" if all threads are ready to execute a
282  * yield.
283  *
284  * @return True if the program is yield-blocked; false otherwise
285  */
286 bool ModelExecution::is_yieldblocked() const
287 {
288         if (!params->yieldblock)
289                 return false;
290
291         for (unsigned int i = 0; i < get_num_threads(); i++) {
292                 thread_id_t tid = int_to_id(i);
293                 Thread *t = get_thread(tid);
294                 if (t->get_pending() && t->get_pending()->is_yield())
295                         return true;
296         }
297         return false;
298 }
299
300 /**
301  * Check if this is a complete execution. That is, have all thread completed
302  * execution (rather than exiting because sleep sets have forced a redundant
303  * execution).
304  *
305  * @return True if the execution is complete.
306  */
307 bool ModelExecution::is_complete_execution() const
308 {
309         if (is_yieldblocked())
310                 return false;
311         for (unsigned int i = 0; i < get_num_threads(); i++)
312                 if (is_enabled(int_to_id(i)))
313                         return false;
314         return true;
315 }
316
317 /**
318  * @brief Find the last fence-related backtracking conflict for a ModelAction
319  *
320  * This function performs the search for the most recent conflicting action
321  * against which we should perform backtracking, as affected by fence
322  * operations. This includes pairs of potentially-synchronizing actions which
323  * occur due to fence-acquire or fence-release, and hence should be explored in
324  * the opposite execution order.
325  *
326  * @param act The current action
327  * @return The most recent action which conflicts with act due to fences
328  */
329 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
330 {
331         /* Only perform release/acquire fence backtracking for stores */
332         if (!act->is_write())
333                 return NULL;
334
335         /* Find a fence-release (or, act is a release) */
336         ModelAction *last_release;
337         if (act->is_release())
338                 last_release = act;
339         else
340                 last_release = get_last_fence_release(act->get_tid());
341         if (!last_release)
342                 return NULL;
343
344         /* Skip past the release */
345         const action_list_t *list = &action_trace;
346         action_list_t::const_reverse_iterator rit;
347         for (rit = list->rbegin(); rit != list->rend(); rit++)
348                 if (*rit == last_release)
349                         break;
350         ASSERT(rit != list->rend());
351
352         /* Find a prior:
353          *   load-acquire
354          * or
355          *   load --sb-> fence-acquire */
356         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
357         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
358         bool found_acquire_fences = false;
359         for ( ; rit != list->rend(); rit++) {
360                 ModelAction *prev = *rit;
361                 if (act->same_thread(prev))
362                         continue;
363
364                 int tid = id_to_int(prev->get_tid());
365
366                 if (prev->is_read() && act->same_var(prev)) {
367                         if (prev->is_acquire()) {
368                                 /* Found most recent load-acquire, don't need
369                                  * to search for more fences */
370                                 if (!found_acquire_fences)
371                                         return NULL;
372                         } else {
373                                 prior_loads[tid] = prev;
374                         }
375                 }
376                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
377                         found_acquire_fences = true;
378                         acquire_fences[tid] = prev;
379                 }
380         }
381
382         ModelAction *latest_backtrack = NULL;
383         for (unsigned int i = 0; i < acquire_fences.size(); i++)
384                 if (acquire_fences[i] && prior_loads[i])
385                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
386                                 latest_backtrack = acquire_fences[i];
387         return latest_backtrack;
388 }
389
390 /**
391  * @brief Find the last backtracking conflict for a ModelAction
392  *
393  * This function performs the search for the most recent conflicting action
394  * against which we should perform backtracking. This primary includes pairs of
395  * synchronizing actions which should be explored in the opposite execution
396  * order.
397  *
398  * @param act The current action
399  * @return The most recent action which conflicts with act
400  */
401 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
402 {
403         switch (act->get_type()) {
404         case ATOMIC_FENCE:
405                 /* Only seq-cst fences can (directly) cause backtracking */
406                 if (!act->is_seqcst())
407                         break;
408         case ATOMIC_READ:
409         case ATOMIC_WRITE:
410         case ATOMIC_RMW: {
411                 ModelAction *ret = NULL;
412
413                 /* linear search: from most recent to oldest */
414                 action_list_t *list = obj_map.get(act->get_location());
415                 action_list_t::reverse_iterator rit;
416                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
417                         ModelAction *prev = *rit;
418                         if (prev == act)
419                                 continue;
420                         if (prev->could_synchronize_with(act)) {
421                                 ret = prev;
422                                 break;
423                         }
424                 }
425
426                 ModelAction *ret2 = get_last_fence_conflict(act);
427                 if (!ret2)
428                         return ret;
429                 if (!ret)
430                         return ret2;
431                 if (*ret < *ret2)
432                         return ret2;
433                 return ret;
434         }
435         case ATOMIC_LOCK:
436         case ATOMIC_TRYLOCK: {
437                 /* linear search: from most recent to oldest */
438                 action_list_t *list = obj_map.get(act->get_location());
439                 action_list_t::reverse_iterator rit;
440                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
441                         ModelAction *prev = *rit;
442                         if (act->is_conflicting_lock(prev))
443                                 return prev;
444                 }
445                 break;
446         }
447         case ATOMIC_UNLOCK: {
448                 /* linear search: from most recent to oldest */
449                 action_list_t *list = obj_map.get(act->get_location());
450                 action_list_t::reverse_iterator rit;
451                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
452                         ModelAction *prev = *rit;
453                         if (!act->same_thread(prev) && prev->is_failed_trylock())
454                                 return prev;
455                 }
456                 break;
457         }
458         case ATOMIC_WAIT: {
459                 /* linear search: from most recent to oldest */
460                 action_list_t *list = obj_map.get(act->get_location());
461                 action_list_t::reverse_iterator rit;
462                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
463                         ModelAction *prev = *rit;
464                         if (!act->same_thread(prev) && prev->is_failed_trylock())
465                                 return prev;
466                         if (!act->same_thread(prev) && prev->is_notify())
467                                 return prev;
468                 }
469                 break;
470         }
471
472         case ATOMIC_NOTIFY_ALL:
473         case ATOMIC_NOTIFY_ONE: {
474                 /* linear search: from most recent to oldest */
475                 action_list_t *list = obj_map.get(act->get_location());
476                 action_list_t::reverse_iterator rit;
477                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
478                         ModelAction *prev = *rit;
479                         if (!act->same_thread(prev) && prev->is_wait())
480                                 return prev;
481                 }
482                 break;
483         }
484         default:
485                 break;
486         }
487         return NULL;
488 }
489
490 /** This method finds backtracking points where we should try to
491  * reorder the parameter ModelAction against.
492  *
493  * @param the ModelAction to find backtracking points for.
494  */
495 void ModelExecution::set_backtracking(ModelAction *act)
496 {
497         Thread *t = get_thread(act);
498         ModelAction *prev = get_last_conflict(act);
499         if (prev == NULL)
500                 return;
501
502         Node *node = prev->get_node()->get_parent();
503
504         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
505         int low_tid, high_tid;
506         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
507                 low_tid = id_to_int(act->get_tid());
508                 high_tid = low_tid + 1;
509         } else {
510                 low_tid = 0;
511                 high_tid = get_num_threads();
512         }
513
514         for (int i = low_tid; i < high_tid; i++) {
515                 thread_id_t tid = int_to_id(i);
516
517                 /* Make sure this thread can be enabled here. */
518                 if (i >= node->get_num_threads())
519                         break;
520
521                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
522                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
523                 if (node->enabled_status(tid) != THREAD_ENABLED)
524                         continue;
525
526                 /* Check if this has been explored already */
527                 if (node->has_been_explored(tid))
528                         continue;
529
530                 /* See if fairness allows */
531                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
532                         bool unfair = false;
533                         for (int t = 0; t < node->get_num_threads(); t++) {
534                                 thread_id_t tother = int_to_id(t);
535                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
536                                         unfair = true;
537                                         break;
538                                 }
539                         }
540                         if (unfair)
541                                 continue;
542                 }
543
544                 /* See if CHESS-like yield fairness allows */
545                 if (params->yieldon) {
546                         bool unfair = false;
547                         for (int t = 0; t < node->get_num_threads(); t++) {
548                                 thread_id_t tother = int_to_id(t);
549                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
550                                         unfair = true;
551                                         break;
552                                 }
553                         }
554                         if (unfair)
555                                 continue;
556                 }
557
558                 /* Cache the latest backtracking point */
559                 set_latest_backtrack(prev);
560
561                 /* If this is a new backtracking point, mark the tree */
562                 if (!node->set_backtrack(tid))
563                         continue;
564                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
565                                         id_to_int(prev->get_tid()),
566                                         id_to_int(t->get_id()));
567                 if (DBG_ENABLED()) {
568                         prev->print();
569                         act->print();
570                 }
571         }
572 }
573
574 /**
575  * @brief Cache the a backtracking point as the "most recent", if eligible
576  *
577  * Note that this does not prepare the NodeStack for this backtracking
578  * operation, it only caches the action on a per-execution basis
579  *
580  * @param act The operation at which we should explore a different next action
581  * (i.e., backtracking point)
582  * @return True, if this action is now the most recent backtracking point;
583  * false otherwise
584  */
585 bool ModelExecution::set_latest_backtrack(ModelAction *act)
586 {
587         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
588                 priv->next_backtrack = act;
589                 return true;
590         }
591         return false;
592 }
593
594 /**
595  * Returns last backtracking point. The model checker will explore a different
596  * path for this point in the next execution.
597  * @return The ModelAction at which the next execution should diverge.
598  */
599 ModelAction * ModelExecution::get_next_backtrack()
600 {
601         ModelAction *next = priv->next_backtrack;
602         priv->next_backtrack = NULL;
603         return next;
604 }
605
606 /**
607  * Processes a read model action.
608  * @param curr is the read model action to process.
609  * @return True if processing this read updates the mo_graph.
610  */
611 bool ModelExecution::process_read(ModelAction *curr)
612 {
613         Node *node = curr->get_node();
614         while (true) {
615                 bool updated = false;
616                 switch (node->get_read_from_status()) {
617                 case READ_FROM_PAST: {
618                         const ModelAction *rf = node->get_read_from_past();
619                         ASSERT(rf);
620
621                         mo_graph->startChanges();
622
623                         ASSERT(!is_infeasible());
624                         if (!check_recency(curr, rf)) {
625                                 if (node->increment_read_from()) {
626                                         mo_graph->rollbackChanges();
627                                         continue;
628                                 } else {
629                                         priv->too_many_reads = true;
630                                 }
631                         }
632
633                         updated = r_modification_order(curr, rf);
634                         read_from(curr, rf);
635                         mo_graph->commitChanges();
636                         mo_check_promises(curr, true);
637                         break;
638                 }
639                 case READ_FROM_PROMISE: {
640                         Promise *promise = curr->get_node()->get_read_from_promise();
641                         if (promise->add_reader(curr))
642                                 priv->failed_promise = true;
643                         curr->set_read_from_promise(promise);
644                         mo_graph->startChanges();
645                         if (!check_recency(curr, promise))
646                                 priv->too_many_reads = true;
647                         updated = r_modification_order(curr, promise);
648                         mo_graph->commitChanges();
649                         break;
650                 }
651                 case READ_FROM_FUTURE: {
652                         /* Read from future value */
653                         struct future_value fv = node->get_future_value();
654                         Promise *promise = new Promise(this, curr, fv);
655                         curr->set_read_from_promise(promise);
656                         promises.push_back(promise);
657                         mo_graph->startChanges();
658                         updated = r_modification_order(curr, promise);
659                         mo_graph->commitChanges();
660                         break;
661                 }
662                 default:
663                         ASSERT(false);
664                 }
665                 get_thread(curr)->set_return_value(curr->get_return_value());
666                 return updated;
667         }
668 }
669
670 /**
671  * Processes a lock, trylock, or unlock model action.  @param curr is
672  * the read model action to process.
673  *
674  * The try lock operation checks whether the lock is taken.  If not,
675  * it falls to the normal lock operation case.  If so, it returns
676  * fail.
677  *
678  * The lock operation has already been checked that it is enabled, so
679  * it just grabs the lock and synchronizes with the previous unlock.
680  *
681  * The unlock operation has to re-enable all of the threads that are
682  * waiting on the lock.
683  *
684  * @return True if synchronization was updated; false otherwise
685  */
686 bool ModelExecution::process_mutex(ModelAction *curr)
687 {
688         std::mutex *mutex = curr->get_mutex();
689         struct std::mutex_state *state = NULL;
690
691         if (mutex)
692                 state = mutex->get_state();
693
694         switch (curr->get_type()) {
695         case ATOMIC_TRYLOCK: {
696                 bool success = !state->locked;
697                 curr->set_try_lock(success);
698                 if (!success) {
699                         get_thread(curr)->set_return_value(0);
700                         break;
701                 }
702                 get_thread(curr)->set_return_value(1);
703         }
704                 //otherwise fall into the lock case
705         case ATOMIC_LOCK: {
706                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
707                         assert_bug("Lock access before initialization");
708                 state->locked = get_thread(curr);
709                 ModelAction *unlock = get_last_unlock(curr);
710                 //synchronize with the previous unlock statement
711                 if (unlock != NULL) {
712                         synchronize(unlock, curr);
713                         return true;
714                 }
715                 break;
716         }
717         case ATOMIC_WAIT:
718         case ATOMIC_UNLOCK: {
719                 /* wake up the other threads */
720                 for (unsigned int i = 0; i < get_num_threads(); i++) {
721                         Thread *t = get_thread(int_to_id(i));
722                         Thread *curr_thrd = get_thread(curr);
723                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
724                                 scheduler->wake(t);
725                 }
726
727                 /* unlock the lock - after checking who was waiting on it */
728                 state->locked = NULL;
729
730                 if (!curr->is_wait())
731                         break; /* The rest is only for ATOMIC_WAIT */
732
733                 /* Should we go to sleep? (simulate spurious failures) */
734                 if (curr->get_node()->get_misc() == 0) {
735                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
736                         /* disable us */
737                         scheduler->sleep(get_thread(curr));
738                 }
739                 break;
740         }
741         case ATOMIC_NOTIFY_ALL: {
742                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
743                 //activate all the waiting threads
744                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
745                         scheduler->wake(get_thread(*rit));
746                 }
747                 waiters->clear();
748                 break;
749         }
750         case ATOMIC_NOTIFY_ONE: {
751                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
752                 int wakeupthread = curr->get_node()->get_misc();
753                 action_list_t::iterator it = waiters->begin();
754                 advance(it, wakeupthread);
755                 scheduler->wake(get_thread(*it));
756                 waiters->erase(it);
757                 break;
758         }
759
760         default:
761                 ASSERT(0);
762         }
763         return false;
764 }
765
766 /**
767  * @brief Check if the current pending promises allow a future value to be sent
768  *
769  * It is unsafe to pass a future value back if there exists a pending promise Pr
770  * such that:
771  *
772  *    reader --exec-> Pr --exec-> writer
773  *
774  * If such Pr exists, we must save the pending future value until Pr is
775  * resolved.
776  *
777  * @param writer The operation which sends the future value. Must be a write.
778  * @param reader The operation which will observe the value. Must be a read.
779  * @return True if the future value can be sent now; false if it must wait.
780  */
781 bool ModelExecution::promises_may_allow(const ModelAction *writer,
782                 const ModelAction *reader) const
783 {
784         for (int i = promises.size() - 1; i >= 0; i--) {
785                 ModelAction *pr = promises[i]->get_reader(0);
786                 //reader is after promise...doesn't cross any promise
787                 if (*reader > *pr)
788                         return true;
789                 //writer is after promise, reader before...bad...
790                 if (*writer > *pr)
791                         return false;
792         }
793         return true;
794 }
795
796 /**
797  * @brief Add a future value to a reader
798  *
799  * This function performs a few additional checks to ensure that the future
800  * value can be feasibly observed by the reader
801  *
802  * @param writer The operation whose value is sent. Must be a write.
803  * @param reader The read operation which may read the future value. Must be a read.
804  */
805 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
806 {
807         /* Do more ambitious checks now that mo is more complete */
808         if (!mo_may_allow(writer, reader))
809                 return;
810
811         Node *node = reader->get_node();
812
813         /* Find an ancestor thread which exists at the time of the reader */
814         Thread *write_thread = get_thread(writer);
815         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
816                 write_thread = write_thread->get_parent();
817
818         struct future_value fv = {
819                 writer->get_write_value(),
820                 writer->get_seq_number() + params->maxfuturedelay,
821                 write_thread->get_id(),
822         };
823         if (node->add_future_value(fv))
824                 set_latest_backtrack(reader);
825 }
826
827 /**
828  * Process a write ModelAction
829  * @param curr The ModelAction to process
830  * @param work The work queue, for adding fixup work
831  * @return True if the mo_graph was updated or promises were resolved
832  */
833 bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
834 {
835         /* Readers to which we may send our future value */
836         ModelVector<ModelAction *> send_fv;
837
838         const ModelAction *earliest_promise_reader;
839         bool updated_promises = false;
840
841         bool updated_mod_order = w_modification_order(curr, &send_fv);
842         Promise *promise = pop_promise_to_resolve(curr);
843
844         if (promise) {
845                 earliest_promise_reader = promise->get_reader(0);
846                 updated_promises = resolve_promise(curr, promise, work);
847         } else
848                 earliest_promise_reader = NULL;
849
850         for (unsigned int i = 0; i < send_fv.size(); i++) {
851                 ModelAction *read = send_fv[i];
852
853                 /* Don't send future values to reads after the Promise we resolve */
854                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
855                         /* Check if future value can be sent immediately */
856                         if (promises_may_allow(curr, read)) {
857                                 add_future_value(curr, read);
858                         } else {
859                                 futurevalues.push_back(PendingFutureValue(curr, read));
860                         }
861                 }
862         }
863
864         /* Check the pending future values */
865         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
866                 struct PendingFutureValue pfv = futurevalues[i];
867                 if (promises_may_allow(pfv.writer, pfv.reader)) {
868                         add_future_value(pfv.writer, pfv.reader);
869                         futurevalues.erase(futurevalues.begin() + i);
870                 }
871         }
872
873         mo_graph->commitChanges();
874         mo_check_promises(curr, false);
875
876         get_thread(curr)->set_return_value(VALUE_NONE);
877         return updated_mod_order || updated_promises;
878 }
879
880 /**
881  * Process a fence ModelAction
882  * @param curr The ModelAction to process
883  * @return True if synchronization was updated
884  */
885 bool ModelExecution::process_fence(ModelAction *curr)
886 {
887         /*
888          * fence-relaxed: no-op
889          * fence-release: only log the occurence (not in this function), for
890          *   use in later synchronization
891          * fence-acquire (this function): search for hypothetical release
892          *   sequences
893          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
894          */
895         bool updated = false;
896         if (curr->is_acquire()) {
897                 action_list_t *list = &action_trace;
898                 action_list_t::reverse_iterator rit;
899                 /* Find X : is_read(X) && X --sb-> curr */
900                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
901                         ModelAction *act = *rit;
902                         if (act == curr)
903                                 continue;
904                         if (act->get_tid() != curr->get_tid())
905                                 continue;
906                         /* Stop at the beginning of the thread */
907                         if (act->is_thread_start())
908                                 break;
909                         /* Stop once we reach a prior fence-acquire */
910                         if (act->is_fence() && act->is_acquire())
911                                 break;
912                         if (!act->is_read())
913                                 continue;
914                         /* read-acquire will find its own release sequences */
915                         if (act->is_acquire())
916                                 continue;
917
918                         /* Establish hypothetical release sequences */
919                         rel_heads_list_t release_heads;
920                         get_release_seq_heads(curr, act, &release_heads);
921                         for (unsigned int i = 0; i < release_heads.size(); i++)
922                                 synchronize(release_heads[i], curr);
923                         if (release_heads.size() != 0)
924                                 updated = true;
925                 }
926         }
927         return updated;
928 }
929
930 /**
931  * @brief Process the current action for thread-related activity
932  *
933  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
934  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
935  * synchronization, etc.  This function is a no-op for non-THREAD actions
936  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
937  *
938  * @param curr The current action
939  * @return True if synchronization was updated or a thread completed
940  */
941 bool ModelExecution::process_thread_action(ModelAction *curr)
942 {
943         bool updated = false;
944
945         switch (curr->get_type()) {
946         case THREAD_CREATE: {
947                 thrd_t *thrd = (thrd_t *)curr->get_location();
948                 struct thread_params *params = (struct thread_params *)curr->get_value();
949                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
950                 add_thread(th);
951                 th->set_creation(curr);
952                 /* Promises can be satisfied by children */
953                 for (unsigned int i = 0; i < promises.size(); i++) {
954                         Promise *promise = promises[i];
955                         if (promise->thread_is_available(curr->get_tid()))
956                                 promise->add_thread(th->get_id());
957                 }
958                 break;
959         }
960         case THREAD_JOIN: {
961                 Thread *blocking = curr->get_thread_operand();
962                 ModelAction *act = get_last_action(blocking->get_id());
963                 synchronize(act, curr);
964                 updated = true; /* trigger rel-seq checks */
965                 break;
966         }
967         case THREAD_FINISH: {
968                 Thread *th = get_thread(curr);
969                 /* Wake up any joining threads */
970                 for (unsigned int i = 0; i < get_num_threads(); i++) {
971                         Thread *waiting = get_thread(int_to_id(i));
972                         if (waiting->waiting_on() == th &&
973                                         waiting->get_pending()->is_thread_join())
974                                 scheduler->wake(waiting);
975                 }
976                 th->complete();
977                 /* Completed thread can't satisfy promises */
978                 for (unsigned int i = 0; i < promises.size(); i++) {
979                         Promise *promise = promises[i];
980                         if (promise->thread_is_available(th->get_id()))
981                                 if (promise->eliminate_thread(th->get_id()))
982                                         priv->failed_promise = true;
983                 }
984                 updated = true; /* trigger rel-seq checks */
985                 break;
986         }
987         case THREAD_START: {
988                 check_promises(curr->get_tid(), NULL, curr->get_cv());
989                 break;
990         }
991         default:
992                 break;
993         }
994
995         return updated;
996 }
997
998 /**
999  * @brief Process the current action for release sequence fixup activity
1000  *
1001  * Performs model-checker release sequence fixups for the current action,
1002  * forcing a single pending release sequence to break (with a given, potential
1003  * "loose" write) or to complete (i.e., synchronize). If a pending release
1004  * sequence forms a complete release sequence, then we must perform the fixup
1005  * synchronization, mo_graph additions, etc.
1006  *
1007  * @param curr The current action; must be a release sequence fixup action
1008  * @param work_queue The work queue to which to add work items as they are
1009  * generated
1010  */
1011 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1012 {
1013         const ModelAction *write = curr->get_node()->get_relseq_break();
1014         struct release_seq *sequence = pending_rel_seqs.back();
1015         pending_rel_seqs.pop_back();
1016         ASSERT(sequence);
1017         ModelAction *acquire = sequence->acquire;
1018         const ModelAction *rf = sequence->rf;
1019         const ModelAction *release = sequence->release;
1020         ASSERT(acquire);
1021         ASSERT(release);
1022         ASSERT(rf);
1023         ASSERT(release->same_thread(rf));
1024
1025         if (write == NULL) {
1026                 /**
1027                  * @todo Forcing a synchronization requires that we set
1028                  * modification order constraints. For instance, we can't allow
1029                  * a fixup sequence in which two separate read-acquire
1030                  * operations read from the same sequence, where the first one
1031                  * synchronizes and the other doesn't. Essentially, we can't
1032                  * allow any writes to insert themselves between 'release' and
1033                  * 'rf'
1034                  */
1035
1036                 /* Must synchronize */
1037                 if (!synchronize(release, acquire))
1038                         return;
1039
1040                 /* Propagate the changed clock vector */
1041                 propagate_clockvector(acquire, work_queue);
1042         } else {
1043                 /* Break release sequence with new edges:
1044                  *   release --mo--> write --mo--> rf */
1045                 mo_graph->addEdge(release, write);
1046                 mo_graph->addEdge(write, rf);
1047         }
1048
1049         /* See if we have realized a data race */
1050         checkDataRaces();
1051 }
1052
1053 /**
1054  * Initialize the current action by performing one or more of the following
1055  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1056  * in the NodeStack, manipulating backtracking sets, allocating and
1057  * initializing clock vectors, and computing the promises to fulfill.
1058  *
1059  * @param curr The current action, as passed from the user context; may be
1060  * freed/invalidated after the execution of this function, with a different
1061  * action "returned" its place (pass-by-reference)
1062  * @return True if curr is a newly-explored action; false otherwise
1063  */
1064 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1065 {
1066         ModelAction *newcurr;
1067
1068         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1069                 newcurr = process_rmw(*curr);
1070                 delete *curr;
1071
1072                 if (newcurr->is_rmw())
1073                         compute_promises(newcurr);
1074
1075                 *curr = newcurr;
1076                 return false;
1077         }
1078
1079         (*curr)->set_seq_number(get_next_seq_num());
1080
1081         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1082         if (newcurr) {
1083                 /* First restore type and order in case of RMW operation */
1084                 if ((*curr)->is_rmwr())
1085                         newcurr->copy_typeandorder(*curr);
1086
1087                 ASSERT((*curr)->get_location() == newcurr->get_location());
1088                 newcurr->copy_from_new(*curr);
1089
1090                 /* Discard duplicate ModelAction; use action from NodeStack */
1091                 delete *curr;
1092
1093                 /* Always compute new clock vector */
1094                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1095
1096                 *curr = newcurr;
1097                 return false; /* Action was explored previously */
1098         } else {
1099                 newcurr = *curr;
1100
1101                 /* Always compute new clock vector */
1102                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1103
1104                 /* Assign most recent release fence */
1105                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1106
1107                 /*
1108                  * Perform one-time actions when pushing new ModelAction onto
1109                  * NodeStack
1110                  */
1111                 if (newcurr->is_write())
1112                         compute_promises(newcurr);
1113                 else if (newcurr->is_relseq_fixup())
1114                         compute_relseq_breakwrites(newcurr);
1115                 else if (newcurr->is_wait())
1116                         newcurr->get_node()->set_misc_max(2);
1117                 else if (newcurr->is_notify_one()) {
1118                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1119                 }
1120                 return true; /* This was a new ModelAction */
1121         }
1122 }
1123
1124 /**
1125  * @brief Establish reads-from relation between two actions
1126  *
1127  * Perform basic operations involved with establishing a concrete rf relation,
1128  * including setting the ModelAction data and checking for release sequences.
1129  *
1130  * @param act The action that is reading (must be a read)
1131  * @param rf The action from which we are reading (must be a write)
1132  *
1133  * @return True if this read established synchronization
1134  */
1135 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1136 {
1137         ASSERT(rf);
1138         ASSERT(rf->is_write());
1139
1140         act->set_read_from(rf);
1141         if (act->is_acquire()) {
1142                 rel_heads_list_t release_heads;
1143                 get_release_seq_heads(act, act, &release_heads);
1144                 int num_heads = release_heads.size();
1145                 for (unsigned int i = 0; i < release_heads.size(); i++)
1146                         if (!synchronize(release_heads[i], act))
1147                                 num_heads--;
1148                 return num_heads > 0;
1149         }
1150         return false;
1151 }
1152
1153 /**
1154  * @brief Synchronizes two actions
1155  *
1156  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1157  * This function performs the synchronization as well as providing other hooks
1158  * for other checks along with synchronization.
1159  *
1160  * @param first The left-hand side of the synchronizes-with relation
1161  * @param second The right-hand side of the synchronizes-with relation
1162  * @return True if the synchronization was successful (i.e., was consistent
1163  * with the execution order); false otherwise
1164  */
1165 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1166 {
1167         if (*second < *first) {
1168                 set_bad_synchronization();
1169                 return false;
1170         }
1171         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1172         return second->synchronize_with(first);
1173 }
1174
1175 /**
1176  * Check promises and eliminate potentially-satisfying threads when a thread is
1177  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1178  * no longer satisfy a promise generated from that thread.
1179  *
1180  * @param blocker The thread on which a thread is waiting
1181  * @param waiting The waiting thread
1182  */
1183 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1184 {
1185         for (unsigned int i = 0; i < promises.size(); i++) {
1186                 Promise *promise = promises[i];
1187                 if (!promise->thread_is_available(waiting->get_id()))
1188                         continue;
1189                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1190                         ModelAction *reader = promise->get_reader(j);
1191                         if (reader->get_tid() != blocker->get_id())
1192                                 continue;
1193                         if (promise->eliminate_thread(waiting->get_id())) {
1194                                 /* Promise has failed */
1195                                 priv->failed_promise = true;
1196                         } else {
1197                                 /* Only eliminate the 'waiting' thread once */
1198                                 return;
1199                         }
1200                 }
1201         }
1202 }
1203
1204 /**
1205  * @brief Check whether a model action is enabled.
1206  *
1207  * Checks whether an operation would be successful (i.e., is a lock already
1208  * locked, or is the joined thread already complete).
1209  *
1210  * For yield-blocking, yields are never enabled.
1211  *
1212  * @param curr is the ModelAction to check whether it is enabled.
1213  * @return a bool that indicates whether the action is enabled.
1214  */
1215 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1216         if (curr->is_lock()) {
1217                 std::mutex *lock = curr->get_mutex();
1218                 struct std::mutex_state *state = lock->get_state();
1219                 if (state->locked)
1220                         return false;
1221         } else if (curr->is_thread_join()) {
1222                 Thread *blocking = curr->get_thread_operand();
1223                 if (!blocking->is_complete()) {
1224                         thread_blocking_check_promises(blocking, get_thread(curr));
1225                         return false;
1226                 }
1227         } else if (params->yieldblock && curr->is_yield()) {
1228                 return false;
1229         }
1230
1231         return true;
1232 }
1233
1234 /**
1235  * This is the heart of the model checker routine. It performs model-checking
1236  * actions corresponding to a given "current action." Among other processes, it
1237  * calculates reads-from relationships, updates synchronization clock vectors,
1238  * forms a memory_order constraints graph, and handles replay/backtrack
1239  * execution when running permutations of previously-observed executions.
1240  *
1241  * @param curr The current action to process
1242  * @return The ModelAction that is actually executed; may be different than
1243  * curr
1244  */
1245 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1246 {
1247         ASSERT(curr);
1248         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1249         bool newly_explored = initialize_curr_action(&curr);
1250
1251         DBG();
1252
1253         wake_up_sleeping_actions(curr);
1254
1255         /* Compute fairness information for CHESS yield algorithm */
1256         if (params->yieldon) {
1257                 curr->get_node()->update_yield(scheduler);
1258         }
1259
1260         /* Add the action to lists before any other model-checking tasks */
1261         if (!second_part_of_rmw)
1262                 add_action_to_lists(curr);
1263
1264         /* Build may_read_from set for newly-created actions */
1265         if (newly_explored && curr->is_read())
1266                 build_may_read_from(curr);
1267
1268         /* Initialize work_queue with the "current action" work */
1269         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1270         while (!work_queue.empty() && !has_asserted()) {
1271                 WorkQueueEntry work = work_queue.front();
1272                 work_queue.pop_front();
1273
1274                 switch (work.type) {
1275                 case WORK_CHECK_CURR_ACTION: {
1276                         ModelAction *act = work.action;
1277                         bool update = false; /* update this location's release seq's */
1278                         bool update_all = false; /* update all release seq's */
1279
1280                         if (process_thread_action(curr))
1281                                 update_all = true;
1282
1283                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1284                                 update = true;
1285
1286                         if (act->is_write() && process_write(act, &work_queue))
1287                                 update = true;
1288
1289                         if (act->is_fence() && process_fence(act))
1290                                 update_all = true;
1291
1292                         if (act->is_mutex_op() && process_mutex(act))
1293                                 update_all = true;
1294
1295                         if (act->is_relseq_fixup())
1296                                 process_relseq_fixup(curr, &work_queue);
1297
1298                         if (update_all)
1299                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1300                         else if (update)
1301                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1302                         break;
1303                 }
1304                 case WORK_CHECK_RELEASE_SEQ:
1305                         resolve_release_sequences(work.location, &work_queue);
1306                         break;
1307                 case WORK_CHECK_MO_EDGES: {
1308                         /** @todo Complete verification of work_queue */
1309                         ModelAction *act = work.action;
1310                         bool updated = false;
1311
1312                         if (act->is_read()) {
1313                                 const ModelAction *rf = act->get_reads_from();
1314                                 const Promise *promise = act->get_reads_from_promise();
1315                                 if (rf) {
1316                                         if (r_modification_order(act, rf))
1317                                                 updated = true;
1318                                         if (act->is_seqcst()) {
1319                                                 ModelAction *last_sc_write = get_last_seq_cst_write(act);
1320                                                 if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
1321                                                         set_bad_sc_read();
1322                                                 }
1323                                         }
1324                                 } else if (promise) {
1325                                         if (r_modification_order(act, promise))
1326                                                 updated = true;
1327                                 }
1328                         }
1329                         if (act->is_write()) {
1330                                 if (w_modification_order(act, NULL))
1331                                         updated = true;
1332                         }
1333                         mo_graph->commitChanges();
1334
1335                         if (updated)
1336                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1337                         break;
1338                 }
1339                 default:
1340                         ASSERT(false);
1341                         break;
1342                 }
1343         }
1344
1345         check_curr_backtracking(curr);
1346         set_backtracking(curr);
1347         return curr;
1348 }
1349
1350 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1351 {
1352         Node *currnode = curr->get_node();
1353         Node *parnode = currnode->get_parent();
1354
1355         if ((parnode && !parnode->backtrack_empty()) ||
1356                          !currnode->misc_empty() ||
1357                          !currnode->read_from_empty() ||
1358                          !currnode->promise_empty() ||
1359                          !currnode->relseq_break_empty()) {
1360                 set_latest_backtrack(curr);
1361         }
1362 }
1363
1364 bool ModelExecution::promises_expired() const
1365 {
1366         for (unsigned int i = 0; i < promises.size(); i++) {
1367                 Promise *promise = promises[i];
1368                 if (promise->get_expiration() < priv->used_sequence_numbers)
1369                         return true;
1370         }
1371         return false;
1372 }
1373
1374 /**
1375  * This is the strongest feasibility check available.
1376  * @return whether the current trace (partial or complete) must be a prefix of
1377  * a feasible trace.
1378  */
1379 bool ModelExecution::isfeasibleprefix() const
1380 {
1381         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1382 }
1383
1384 /**
1385  * Print disagnostic information about an infeasible execution
1386  * @param prefix A string to prefix the output with; if NULL, then a default
1387  * message prefix will be provided
1388  */
1389 void ModelExecution::print_infeasibility(const char *prefix) const
1390 {
1391         char buf[100];
1392         char *ptr = buf;
1393         if (mo_graph->checkForCycles())
1394                 ptr += sprintf(ptr, "[mo cycle]");
1395         if (priv->failed_promise || priv->hard_failed_promise)
1396                 ptr += sprintf(ptr, "[failed promise]");
1397         if (priv->too_many_reads)
1398                 ptr += sprintf(ptr, "[too many reads]");
1399         if (priv->no_valid_reads)
1400                 ptr += sprintf(ptr, "[no valid reads-from]");
1401         if (priv->bad_synchronization)
1402                 ptr += sprintf(ptr, "[bad sw ordering]");
1403         if (priv->bad_sc_read)
1404                 ptr += sprintf(ptr, "[bad sc read]");
1405         if (promises_expired())
1406                 ptr += sprintf(ptr, "[promise expired]");
1407         if (promises.size() != 0)
1408                 ptr += sprintf(ptr, "[unresolved promise]");
1409         if (ptr != buf)
1410                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
1411 }
1412
1413 /**
1414  * Returns whether the current completed trace is feasible, except for pending
1415  * release sequences.
1416  */
1417 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1418 {
1419         return !is_infeasible() && promises.size() == 0 && ! priv->failed_promise;
1420
1421 }
1422
1423 /**
1424  * Check if the current partial trace is infeasible. Does not check any
1425  * end-of-execution flags, which might rule out the execution. Thus, this is
1426  * useful only for ruling an execution as infeasible.
1427  * @return whether the current partial trace is infeasible.
1428  */
1429 bool ModelExecution::is_infeasible() const
1430 {
1431         return mo_graph->checkForCycles() ||
1432                 priv->no_valid_reads ||
1433                 priv->too_many_reads ||
1434                 priv->bad_synchronization ||
1435                 priv->bad_sc_read ||
1436                 priv->hard_failed_promise ||
1437                 promises_expired();
1438 }
1439
1440 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1441 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1442         ModelAction *lastread = get_last_action(act->get_tid());
1443         lastread->process_rmw(act);
1444         if (act->is_rmw()) {
1445                 if (lastread->get_reads_from())
1446                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1447                 else
1448                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1449                 mo_graph->commitChanges();
1450         }
1451         return lastread;
1452 }
1453
1454 /**
1455  * A helper function for ModelExecution::check_recency, to check if the current
1456  * thread is able to read from a different write/promise for 'params.maxreads'
1457  * number of steps and if that write/promise should become visible (i.e., is
1458  * ordered later in the modification order). This helps model memory liveness.
1459  *
1460  * @param curr The current action. Must be a read.
1461  * @param rf The write/promise from which we plan to read
1462  * @param other_rf The write/promise from which we may read
1463  * @return True if we were able to read from other_rf for params.maxreads steps
1464  */
1465 template <typename T, typename U>
1466 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1467 {
1468         /* Need a different write/promise */
1469         if (other_rf->equals(rf))
1470                 return false;
1471
1472         /* Only look for "newer" writes/promises */
1473         if (!mo_graph->checkReachable(rf, other_rf))
1474                 return false;
1475
1476         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1477         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1478         action_list_t::reverse_iterator rit = list->rbegin();
1479         ASSERT((*rit) == curr);
1480         /* Skip past curr */
1481         rit++;
1482
1483         /* Does this write/promise work for everyone? */
1484         for (int i = 0; i < params->maxreads; i++, rit++) {
1485                 ModelAction *act = *rit;
1486                 if (!act->may_read_from(other_rf))
1487                         return false;
1488         }
1489         return true;
1490 }
1491
1492 /**
1493  * Checks whether a thread has read from the same write or Promise for too many
1494  * times without seeing the effects of a later write/Promise.
1495  *
1496  * Basic idea:
1497  * 1) there must a different write/promise that we could read from,
1498  * 2) we must have read from the same write/promise in excess of maxreads times,
1499  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1500  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1501  *
1502  * If so, we decide that the execution is no longer feasible.
1503  *
1504  * @param curr The current action. Must be a read.
1505  * @param rf The ModelAction/Promise from which we might read.
1506  * @return True if the read should succeed; false otherwise
1507  */
1508 template <typename T>
1509 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1510 {
1511         if (!params->maxreads)
1512                 return true;
1513
1514         //NOTE: Next check is just optimization, not really necessary....
1515         if (curr->get_node()->get_read_from_past_size() +
1516                         curr->get_node()->get_read_from_promise_size() <= 1)
1517                 return true;
1518
1519         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1520         int tid = id_to_int(curr->get_tid());
1521         ASSERT(tid < (int)thrd_lists->size());
1522         action_list_t *list = &(*thrd_lists)[tid];
1523         action_list_t::reverse_iterator rit = list->rbegin();
1524         ASSERT((*rit) == curr);
1525         /* Skip past curr */
1526         rit++;
1527
1528         action_list_t::reverse_iterator ritcopy = rit;
1529         /* See if we have enough reads from the same value */
1530         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1531                 if (ritcopy == list->rend())
1532                         return true;
1533                 ModelAction *act = *ritcopy;
1534                 if (!act->is_read())
1535                         return true;
1536                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1537                         return true;
1538                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1539                         return true;
1540                 if (act->get_node()->get_read_from_past_size() +
1541                                 act->get_node()->get_read_from_promise_size() <= 1)
1542                         return true;
1543         }
1544         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1545                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1546                 if (should_read_instead(curr, rf, write))
1547                         return false; /* liveness failure */
1548         }
1549         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1550                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1551                 if (should_read_instead(curr, rf, promise))
1552                         return false; /* liveness failure */
1553         }
1554         return true;
1555 }
1556
1557 /**
1558  * @brief Updates the mo_graph with the constraints imposed from the current
1559  * read.
1560  *
1561  * Basic idea is the following: Go through each other thread and find
1562  * the last action that happened before our read.  Two cases:
1563  *
1564  * -# The action is a write: that write must either occur before
1565  * the write we read from or be the write we read from.
1566  * -# The action is a read: the write that that action read from
1567  * must occur before the write we read from or be the same write.
1568  *
1569  * @param curr The current action. Must be a read.
1570  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1571  * @return True if modification order edges were added; false otherwise
1572  */
1573 template <typename rf_type>
1574 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1575 {
1576         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1577         unsigned int i;
1578         bool added = false;
1579         ASSERT(curr->is_read());
1580
1581         /* Last SC fence in the current thread */
1582         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1583         ModelAction *last_sc_write = NULL;
1584         if (curr->is_seqcst())
1585                 last_sc_write = get_last_seq_cst_write(curr);
1586
1587         /* Iterate over all threads */
1588         for (i = 0; i < thrd_lists->size(); i++) {
1589                 /* Last SC fence in thread i */
1590                 ModelAction *last_sc_fence_thread_local = NULL;
1591                 if (int_to_id((int)i) != curr->get_tid())
1592                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1593
1594                 /* Last SC fence in thread i, before last SC fence in current thread */
1595                 ModelAction *last_sc_fence_thread_before = NULL;
1596                 if (last_sc_fence_local)
1597                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1598
1599                 /* Iterate over actions in thread, starting from most recent */
1600                 action_list_t *list = &(*thrd_lists)[i];
1601                 action_list_t::reverse_iterator rit;
1602                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1603                         ModelAction *act = *rit;
1604
1605                         /* Skip curr */
1606                         if (act == curr)
1607                                 continue;
1608                         /* Don't want to add reflexive edges on 'rf' */
1609                         if (act->equals(rf)) {
1610                                 if (act->happens_before(curr))
1611                                         break;
1612                                 else
1613                                         continue;
1614                         }
1615
1616                         if (act->is_write()) {
1617                                 /* C++, Section 29.3 statement 5 */
1618                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1619                                                 *act < *last_sc_fence_thread_local) {
1620                                         added = mo_graph->addEdge(act, rf) || added;
1621                                         break;
1622                                 }
1623                                 /* C++, Section 29.3 statement 4 */
1624                                 else if (act->is_seqcst() && last_sc_fence_local &&
1625                                                 *act < *last_sc_fence_local) {
1626                                         added = mo_graph->addEdge(act, rf) || added;
1627                                         break;
1628                                 }
1629                                 /* C++, Section 29.3 statement 6 */
1630                                 else if (last_sc_fence_thread_before &&
1631                                                 *act < *last_sc_fence_thread_before) {
1632                                         added = mo_graph->addEdge(act, rf) || added;
1633                                         break;
1634                                 }
1635                         }
1636
1637                         /*
1638                          * Include at most one act per-thread that "happens
1639                          * before" curr
1640                          */
1641                         if (act->happens_before(curr)) {
1642                                 if (act->is_write()) {
1643                                         added = mo_graph->addEdge(act, rf) || added;
1644                                 } else {
1645                                         const ModelAction *prevrf = act->get_reads_from();
1646                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1647                                         if (prevrf) {
1648                                                 if (!prevrf->equals(rf))
1649                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1650                                         } else if (!prevrf_promise->equals(rf)) {
1651                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1652                                         }
1653                                 }
1654                                 break;
1655                         }
1656                 }
1657         }
1658
1659         /*
1660          * All compatible, thread-exclusive promises must be ordered after any
1661          * concrete loads from the same thread
1662          */
1663         for (unsigned int i = 0; i < promises.size(); i++)
1664                 if (promises[i]->is_compatible_exclusive(curr))
1665                         added = mo_graph->addEdge(rf, promises[i]) || added;
1666
1667         return added;
1668 }
1669
1670 /**
1671  * Updates the mo_graph with the constraints imposed from the current write.
1672  *
1673  * Basic idea is the following: Go through each other thread and find
1674  * the lastest action that happened before our write.  Two cases:
1675  *
1676  * (1) The action is a write => that write must occur before
1677  * the current write
1678  *
1679  * (2) The action is a read => the write that that action read from
1680  * must occur before the current write.
1681  *
1682  * This method also handles two other issues:
1683  *
1684  * (I) Sequential Consistency: Making sure that if the current write is
1685  * seq_cst, that it occurs after the previous seq_cst write.
1686  *
1687  * (II) Sending the write back to non-synchronizing reads.
1688  *
1689  * @param curr The current action. Must be a write.
1690  * @param send_fv A vector for stashing reads to which we may pass our future
1691  * value. If NULL, then don't record any future values.
1692  * @return True if modification order edges were added; false otherwise
1693  */
1694 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1695 {
1696         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1697         unsigned int i;
1698         bool added = false;
1699         ASSERT(curr->is_write());
1700
1701         if (curr->is_seqcst()) {
1702                 /* We have to at least see the last sequentially consistent write,
1703                          so we are initialized. */
1704                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1705                 if (last_seq_cst != NULL) {
1706                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1707                 }
1708         }
1709
1710         /* Last SC fence in the current thread */
1711         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1712
1713         /* Iterate over all threads */
1714         for (i = 0; i < thrd_lists->size(); i++) {
1715                 /* Last SC fence in thread i, before last SC fence in current thread */
1716                 ModelAction *last_sc_fence_thread_before = NULL;
1717                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1718                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1719
1720                 /* Iterate over actions in thread, starting from most recent */
1721                 action_list_t *list = &(*thrd_lists)[i];
1722                 action_list_t::reverse_iterator rit;
1723                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1724                         ModelAction *act = *rit;
1725                         if (act == curr) {
1726                                 /*
1727                                  * 1) If RMW and it actually read from something, then we
1728                                  * already have all relevant edges, so just skip to next
1729                                  * thread.
1730                                  *
1731                                  * 2) If RMW and it didn't read from anything, we should
1732                                  * whatever edge we can get to speed up convergence.
1733                                  *
1734                                  * 3) If normal write, we need to look at earlier actions, so
1735                                  * continue processing list.
1736                                  */
1737                                 if (curr->is_rmw()) {
1738                                         if (curr->get_reads_from() != NULL)
1739                                                 break;
1740                                         else
1741                                                 continue;
1742                                 } else
1743                                         continue;
1744                         }
1745
1746                         /* C++, Section 29.3 statement 7 */
1747                         if (last_sc_fence_thread_before && act->is_write() &&
1748                                         *act < *last_sc_fence_thread_before) {
1749                                 added = mo_graph->addEdge(act, curr) || added;
1750                                 break;
1751                         }
1752
1753                         /*
1754                          * Include at most one act per-thread that "happens
1755                          * before" curr
1756                          */
1757                         if (act->happens_before(curr)) {
1758                                 /*
1759                                  * Note: if act is RMW, just add edge:
1760                                  *   act --mo--> curr
1761                                  * The following edge should be handled elsewhere:
1762                                  *   readfrom(act) --mo--> act
1763                                  */
1764                                 if (act->is_write())
1765                                         added = mo_graph->addEdge(act, curr) || added;
1766                                 else if (act->is_read()) {
1767                                         //if previous read accessed a null, just keep going
1768                                         if (act->get_reads_from() == NULL) {
1769                                                 added = mo_graph->addEdge(act->get_reads_from_promise(), curr) || added;
1770                                         } else
1771                                                 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1772                                 }
1773                                 break;
1774                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1775                                                      !act->same_thread(curr)) {
1776                                 /* We have an action that:
1777                                    (1) did not happen before us
1778                                    (2) is a read and we are a write
1779                                    (3) cannot synchronize with us
1780                                    (4) is in a different thread
1781                                    =>
1782                                    that read could potentially read from our write.  Note that
1783                                    these checks are overly conservative at this point, we'll
1784                                    do more checks before actually removing the
1785                                    pendingfuturevalue.
1786
1787                                  */
1788
1789                                 if (send_fv && thin_air_constraint_may_allow(curr, act) && check_coherence_promise(curr, act)) {
1790                                         if (!is_infeasible())
1791                                                 send_fv->push_back(act);
1792                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1793                                                 add_future_value(curr, act);
1794                                 }
1795                         }
1796                 }
1797         }
1798
1799         /*
1800          * All compatible, thread-exclusive promises must be ordered after any
1801          * concrete stores to the same thread, or else they can be merged with
1802          * this store later
1803          */
1804         for (unsigned int i = 0; i < promises.size(); i++)
1805                 if (promises[i]->is_compatible_exclusive(curr))
1806                         added = mo_graph->addEdge(curr, promises[i]) || added;
1807
1808         return added;
1809 }
1810
1811 //This procedure uses cohere to prune future values that are
1812 //guaranteed to generate a coherence violation.
1813 //
1814 //need to see if there is (1) a promise for thread write, (2)
1815 //the promise is sb before write, (3) the promise can only be
1816 //resolved by the thread read, and (4) the promise has same
1817 //location as read/write
1818
1819 bool ModelExecution::check_coherence_promise(const ModelAction * write, const ModelAction *read) {
1820         thread_id_t write_tid=write->get_tid();
1821         for(unsigned int i = promises.size(); i>0; i--) {
1822                 Promise *pr=promises[i-1];
1823                 if (!pr->same_location(write))
1824                         continue;
1825                 //the reading thread is the only thread that can resolve the promise
1826                 if (pr->get_num_was_available_threads()==1 && pr->thread_was_available(read->get_tid())) {
1827                         for(unsigned int j=0;j<pr->get_num_readers();j++) {
1828                                 ModelAction *prreader=pr->get_reader(j);
1829                                 //the writing thread reads from the promise before the write
1830                                 if (prreader->get_tid()==write_tid &&
1831                                                 (*prreader)<(*write)) {
1832                                         if ((*read)>(*prreader)) {
1833                                                 //check that we don't have a read between the read and promise
1834                                                 //from the same thread as read
1835                                                 bool okay=false;
1836                                                 for(const ModelAction *tmp=read;tmp!=prreader;) {
1837                                                         tmp=tmp->get_node()->get_parent()->get_action();
1838                                                         if (tmp->is_read() && tmp->same_thread(read)) {
1839                                                                 okay=true;
1840                                                                 break;
1841                                                         }
1842                                                 }
1843                                                 if (okay)
1844                                                         continue;
1845                                         }
1846                                         return false;
1847                                 }
1848                         }
1849                 }
1850         }
1851         return true;
1852 }
1853
1854
1855 /** Arbitrary reads from the future are not allowed.  Section 29.3
1856  * part 9 places some constraints.  This method checks one result of constraint
1857  * constraint.  Others require compiler support. */
1858 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1859 {
1860         if (!writer->is_rmw())
1861                 return true;
1862
1863         if (!reader->is_rmw())
1864                 return true;
1865
1866         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1867                 if (search == reader)
1868                         return false;
1869                 if (search->get_tid() == reader->get_tid() &&
1870                                 search->happens_before(reader))
1871                         break;
1872         }
1873
1874         return true;
1875 }
1876
1877 /**
1878  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1879  * some constraints. This method checks one the following constraint (others
1880  * require compiler support):
1881  *
1882  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1883  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1884  */
1885 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1886 {
1887         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1888         unsigned int i;
1889         /* Iterate over all threads */
1890         for (i = 0; i < thrd_lists->size(); i++) {
1891                 const ModelAction *write_after_read = NULL;
1892
1893                 /* Iterate over actions in thread, starting from most recent */
1894                 action_list_t *list = &(*thrd_lists)[i];
1895                 action_list_t::reverse_iterator rit;
1896                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1897                         ModelAction *act = *rit;
1898
1899                         /* Don't disallow due to act == reader */
1900                         if (!reader->happens_before(act) || reader == act)
1901                                 break;
1902                         else if (act->is_write())
1903                                 write_after_read = act;
1904                         else if (act->is_read() && act->get_reads_from() != NULL)
1905                                 write_after_read = act->get_reads_from();
1906                 }
1907
1908                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1909                         return false;
1910         }
1911         return true;
1912 }
1913
1914 /**
1915  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1916  * The ModelAction under consideration is expected to be taking part in
1917  * release/acquire synchronization as an object of the "reads from" relation.
1918  * Note that this can only provide release sequence support for RMW chains
1919  * which do not read from the future, as those actions cannot be traced until
1920  * their "promise" is fulfilled. Similarly, we may not even establish the
1921  * presence of a release sequence with certainty, as some modification order
1922  * constraints may be decided further in the future. Thus, this function
1923  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1924  * and a boolean representing certainty.
1925  *
1926  * @param rf The action that might be part of a release sequence. Must be a
1927  * write.
1928  * @param release_heads A pass-by-reference style return parameter. After
1929  * execution of this function, release_heads will contain the heads of all the
1930  * relevant release sequences, if any exists with certainty
1931  * @param pending A pass-by-reference style return parameter which is only used
1932  * when returning false (i.e., uncertain). Returns most information regarding
1933  * an uncertain release sequence, including any write operations that might
1934  * break the sequence.
1935  * @return true, if the ModelExecution is certain that release_heads is complete;
1936  * false otherwise
1937  */
1938 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1939                 rel_heads_list_t *release_heads,
1940                 struct release_seq *pending) const
1941 {
1942         /* Only check for release sequences if there are no cycles */
1943         if (mo_graph->checkForCycles())
1944                 return false;
1945
1946         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1947                 ASSERT(rf->is_write());
1948
1949                 if (rf->is_release())
1950                         release_heads->push_back(rf);
1951                 else if (rf->get_last_fence_release())
1952                         release_heads->push_back(rf->get_last_fence_release());
1953                 if (!rf->is_rmw())
1954                         break; /* End of RMW chain */
1955
1956                 /** @todo Need to be smarter here...  In the linux lock
1957                  * example, this will run to the beginning of the program for
1958                  * every acquire. */
1959                 /** @todo The way to be smarter here is to keep going until 1
1960                  * thread has a release preceded by an acquire and you've seen
1961                  *       both. */
1962
1963                 /* acq_rel RMW is a sufficient stopping condition */
1964                 if (rf->is_acquire() && rf->is_release())
1965                         return true; /* complete */
1966         };
1967         if (!rf) {
1968                 /* read from future: need to settle this later */
1969                 pending->rf = NULL;
1970                 return false; /* incomplete */
1971         }
1972
1973         if (rf->is_release())
1974                 return true; /* complete */
1975
1976         /* else relaxed write
1977          * - check for fence-release in the same thread (29.8, stmt. 3)
1978          * - check modification order for contiguous subsequence
1979          *   -> rf must be same thread as release */
1980
1981         const ModelAction *fence_release = rf->get_last_fence_release();
1982         /* Synchronize with a fence-release unconditionally; we don't need to
1983          * find any more "contiguous subsequence..." for it */
1984         if (fence_release)
1985                 release_heads->push_back(fence_release);
1986
1987         int tid = id_to_int(rf->get_tid());
1988         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1989         action_list_t *list = &(*thrd_lists)[tid];
1990         action_list_t::const_reverse_iterator rit;
1991
1992         /* Find rf in the thread list */
1993         rit = std::find(list->rbegin(), list->rend(), rf);
1994         ASSERT(rit != list->rend());
1995
1996         /* Find the last {write,fence}-release */
1997         for (; rit != list->rend(); rit++) {
1998                 if (fence_release && *(*rit) < *fence_release)
1999                         break;
2000                 if ((*rit)->is_release())
2001                         break;
2002         }
2003         if (rit == list->rend()) {
2004                 /* No write-release in this thread */
2005                 return true; /* complete */
2006         } else if (fence_release && *(*rit) < *fence_release) {
2007                 /* The fence-release is more recent (and so, "stronger") than
2008                  * the most recent write-release */
2009                 return true; /* complete */
2010         } /* else, need to establish contiguous release sequence */
2011         ModelAction *release = *rit;
2012
2013         ASSERT(rf->same_thread(release));
2014
2015         pending->writes.clear();
2016
2017         bool certain = true;
2018         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2019                 if (id_to_int(rf->get_tid()) == (int)i)
2020                         continue;
2021                 list = &(*thrd_lists)[i];
2022
2023                 /* Can we ensure no future writes from this thread may break
2024                  * the release seq? */
2025                 bool future_ordered = false;
2026
2027                 ModelAction *last = get_last_action(int_to_id(i));
2028                 Thread *th = get_thread(int_to_id(i));
2029                 if ((last && rf->happens_before(last)) ||
2030                                 !is_enabled(th) ||
2031                                 th->is_complete())
2032                         future_ordered = true;
2033
2034                 ASSERT(!th->is_model_thread() || future_ordered);
2035
2036                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2037                         const ModelAction *act = *rit;
2038                         /* Reach synchronization -> this thread is complete */
2039                         if (act->happens_before(release))
2040                                 break;
2041                         if (rf->happens_before(act)) {
2042                                 future_ordered = true;
2043                                 continue;
2044                         }
2045
2046                         /* Only non-RMW writes can break release sequences */
2047                         if (!act->is_write() || act->is_rmw())
2048                                 continue;
2049
2050                         /* Check modification order */
2051                         if (mo_graph->checkReachable(rf, act)) {
2052                                 /* rf --mo--> act */
2053                                 future_ordered = true;
2054                                 continue;
2055                         }
2056                         if (mo_graph->checkReachable(act, release))
2057                                 /* act --mo--> release */
2058                                 break;
2059                         if (mo_graph->checkReachable(release, act) &&
2060                                       mo_graph->checkReachable(act, rf)) {
2061                                 /* release --mo-> act --mo--> rf */
2062                                 return true; /* complete */
2063                         }
2064                         /* act may break release sequence */
2065                         pending->writes.push_back(act);
2066                         certain = false;
2067                 }
2068                 if (!future_ordered)
2069                         certain = false; /* This thread is uncertain */
2070         }
2071
2072         if (certain) {
2073                 release_heads->push_back(release);
2074                 pending->writes.clear();
2075         } else {
2076                 pending->release = release;
2077                 pending->rf = rf;
2078         }
2079         return certain;
2080 }
2081
2082 /**
2083  * An interface for getting the release sequence head(s) with which a
2084  * given ModelAction must synchronize. This function only returns a non-empty
2085  * result when it can locate a release sequence head with certainty. Otherwise,
2086  * it may mark the internal state of the ModelExecution so that it will handle
2087  * the release sequence at a later time, causing @a acquire to update its
2088  * synchronization at some later point in execution.
2089  *
2090  * @param acquire The 'acquire' action that may synchronize with a release
2091  * sequence
2092  * @param read The read action that may read from a release sequence; this may
2093  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2094  * when 'acquire' is a fence-acquire)
2095  * @param release_heads A pass-by-reference return parameter. Will be filled
2096  * with the head(s) of the release sequence(s), if they exists with certainty.
2097  * @see ModelExecution::release_seq_heads
2098  */
2099 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2100                 ModelAction *read, rel_heads_list_t *release_heads)
2101 {
2102         const ModelAction *rf = read->get_reads_from();
2103         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2104         sequence->acquire = acquire;
2105         sequence->read = read;
2106
2107         if (!release_seq_heads(rf, release_heads, sequence)) {
2108                 /* add act to 'lazy checking' list */
2109                 pending_rel_seqs.push_back(sequence);
2110         } else {
2111                 snapshot_free(sequence);
2112         }
2113 }
2114
2115 /**
2116  * @brief Propagate a modified clock vector to actions later in the execution
2117  * order
2118  *
2119  * After an acquire operation lazily completes a release-sequence
2120  * synchronization, we must update all clock vectors for operations later than
2121  * the acquire in the execution order.
2122  *
2123  * @param acquire The ModelAction whose clock vector must be propagated
2124  * @param work The work queue to which we can add work items, if this
2125  * propagation triggers more updates (e.g., to the modification order)
2126  */
2127 void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
2128 {
2129         /* Re-check all pending release sequences */
2130         work->push_back(CheckRelSeqWorkEntry(NULL));
2131         /* Re-check read-acquire for mo_graph edges */
2132         work->push_back(MOEdgeWorkEntry(acquire));
2133
2134         /* propagate synchronization to later actions */
2135         action_list_t::reverse_iterator rit = action_trace.rbegin();
2136         for (; (*rit) != acquire; rit++) {
2137                 ModelAction *propagate = *rit;
2138                 if (acquire->happens_before(propagate)) {
2139                         synchronize(acquire, propagate);
2140                         /* Re-check 'propagate' for mo_graph edges */
2141                         work->push_back(MOEdgeWorkEntry(propagate));
2142                 }
2143         }
2144 }
2145
2146 /**
2147  * Attempt to resolve all stashed operations that might synchronize with a
2148  * release sequence for a given location. This implements the "lazy" portion of
2149  * determining whether or not a release sequence was contiguous, since not all
2150  * modification order information is present at the time an action occurs.
2151  *
2152  * @param location The location/object that should be checked for release
2153  * sequence resolutions. A NULL value means to check all locations.
2154  * @param work_queue The work queue to which to add work items as they are
2155  * generated
2156  * @return True if any updates occurred (new synchronization, new mo_graph
2157  * edges)
2158  */
2159 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2160 {
2161         bool updated = false;
2162         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2163         while (it != pending_rel_seqs.end()) {
2164                 struct release_seq *pending = *it;
2165                 ModelAction *acquire = pending->acquire;
2166                 const ModelAction *read = pending->read;
2167
2168                 /* Only resolve sequences on the given location, if provided */
2169                 if (location && read->get_location() != location) {
2170                         it++;
2171                         continue;
2172                 }
2173
2174                 const ModelAction *rf = read->get_reads_from();
2175                 rel_heads_list_t release_heads;
2176                 bool complete;
2177                 complete = release_seq_heads(rf, &release_heads, pending);
2178                 for (unsigned int i = 0; i < release_heads.size(); i++)
2179                         if (!acquire->has_synchronized_with(release_heads[i]))
2180                                 if (synchronize(release_heads[i], acquire))
2181                                         updated = true;
2182
2183                 if (updated) {
2184                         /* Propagate the changed clock vector */
2185                         propagate_clockvector(acquire, work_queue);
2186                 }
2187                 if (complete) {
2188                         it = pending_rel_seqs.erase(it);
2189                         snapshot_free(pending);
2190                 } else {
2191                         it++;
2192                 }
2193         }
2194
2195         // If we resolved promises or data races, see if we have realized a data race.
2196         checkDataRaces();
2197
2198         return updated;
2199 }
2200
2201 /**
2202  * Performs various bookkeeping operations for the current ModelAction. For
2203  * instance, adds action to the per-object, per-thread action vector and to the
2204  * action trace list of all thread actions.
2205  *
2206  * @param act is the ModelAction to add.
2207  */
2208 void ModelExecution::add_action_to_lists(ModelAction *act)
2209 {
2210         int tid = id_to_int(act->get_tid());
2211         ModelAction *uninit = NULL;
2212         int uninit_id = -1;
2213         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2214         if (list->empty() && act->is_atomic_var()) {
2215                 uninit = get_uninitialized_action(act);
2216                 uninit_id = id_to_int(uninit->get_tid());
2217                 list->push_front(uninit);
2218         }
2219         list->push_back(act);
2220
2221         action_trace.push_back(act);
2222         if (uninit)
2223                 action_trace.push_front(uninit);
2224
2225         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2226         if (tid >= (int)vec->size())
2227                 vec->resize(priv->next_thread_id);
2228         (*vec)[tid].push_back(act);
2229         if (uninit)
2230                 (*vec)[uninit_id].push_front(uninit);
2231
2232         if ((int)thrd_last_action.size() <= tid)
2233                 thrd_last_action.resize(get_num_threads());
2234         thrd_last_action[tid] = act;
2235         if (uninit)
2236                 thrd_last_action[uninit_id] = uninit;
2237
2238         if (act->is_fence() && act->is_release()) {
2239                 if ((int)thrd_last_fence_release.size() <= tid)
2240                         thrd_last_fence_release.resize(get_num_threads());
2241                 thrd_last_fence_release[tid] = act;
2242         }
2243
2244         if (act->is_wait()) {
2245                 void *mutex_loc = (void *) act->get_value();
2246                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2247
2248                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2249                 if (tid >= (int)vec->size())
2250                         vec->resize(priv->next_thread_id);
2251                 (*vec)[tid].push_back(act);
2252         }
2253 }
2254
2255 /**
2256  * @brief Get the last action performed by a particular Thread
2257  * @param tid The thread ID of the Thread in question
2258  * @return The last action in the thread
2259  */
2260 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2261 {
2262         int threadid = id_to_int(tid);
2263         if (threadid < (int)thrd_last_action.size())
2264                 return thrd_last_action[id_to_int(tid)];
2265         else
2266                 return NULL;
2267 }
2268
2269 /**
2270  * @brief Get the last fence release performed by a particular Thread
2271  * @param tid The thread ID of the Thread in question
2272  * @return The last fence release in the thread, if one exists; NULL otherwise
2273  */
2274 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2275 {
2276         int threadid = id_to_int(tid);
2277         if (threadid < (int)thrd_last_fence_release.size())
2278                 return thrd_last_fence_release[id_to_int(tid)];
2279         else
2280                 return NULL;
2281 }
2282
2283 /**
2284  * Gets the last memory_order_seq_cst write (in the total global sequence)
2285  * performed on a particular object (i.e., memory location), not including the
2286  * current action.
2287  * @param curr The current ModelAction; also denotes the object location to
2288  * check
2289  * @return The last seq_cst write
2290  */
2291 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2292 {
2293         void *location = curr->get_location();
2294         action_list_t *list = obj_map.get(location);
2295         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2296         action_list_t::reverse_iterator rit;
2297         for (rit = list->rbegin(); (*rit) != curr; rit++)
2298                 ;
2299         rit++; /* Skip past curr */
2300         for ( ; rit != list->rend(); rit++)
2301                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2302                         return *rit;
2303         return NULL;
2304 }
2305
2306 /**
2307  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2308  * performed in a particular thread, prior to a particular fence.
2309  * @param tid The ID of the thread to check
2310  * @param before_fence The fence from which to begin the search; if NULL, then
2311  * search for the most recent fence in the thread.
2312  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2313  */
2314 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2315 {
2316         /* All fences should have location FENCE_LOCATION */
2317         action_list_t *list = obj_map.get(FENCE_LOCATION);
2318
2319         if (!list)
2320                 return NULL;
2321
2322         action_list_t::reverse_iterator rit = list->rbegin();
2323
2324         if (before_fence) {
2325                 for (; rit != list->rend(); rit++)
2326                         if (*rit == before_fence)
2327                                 break;
2328
2329                 ASSERT(*rit == before_fence);
2330                 rit++;
2331         }
2332
2333         for (; rit != list->rend(); rit++)
2334                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2335                         return *rit;
2336         return NULL;
2337 }
2338
2339 /**
2340  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2341  * location). This function identifies the mutex according to the current
2342  * action, which is presumed to perform on the same mutex.
2343  * @param curr The current ModelAction; also denotes the object location to
2344  * check
2345  * @return The last unlock operation
2346  */
2347 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2348 {
2349         void *location = curr->get_location();
2350         action_list_t *list = obj_map.get(location);
2351         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2352         action_list_t::reverse_iterator rit;
2353         for (rit = list->rbegin(); rit != list->rend(); rit++)
2354                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2355                         return *rit;
2356         return NULL;
2357 }
2358
2359 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2360 {
2361         ModelAction *parent = get_last_action(tid);
2362         if (!parent)
2363                 parent = get_thread(tid)->get_creation();
2364         return parent;
2365 }
2366
2367 /**
2368  * Returns the clock vector for a given thread.
2369  * @param tid The thread whose clock vector we want
2370  * @return Desired clock vector
2371  */
2372 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2373 {
2374         return get_parent_action(tid)->get_cv();
2375 }
2376
2377 /**
2378  * @brief Find the promise (if any) to resolve for the current action and
2379  * remove it from the pending promise vector
2380  * @param curr The current ModelAction. Should be a write.
2381  * @return The Promise to resolve, if any; otherwise NULL
2382  */
2383 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2384 {
2385         for (unsigned int i = 0; i < promises.size(); i++)
2386                 if (curr->get_node()->get_promise(i)) {
2387                         Promise *ret = promises[i];
2388                         promises.erase(promises.begin() + i);
2389                         return ret;
2390                 }
2391         return NULL;
2392 }
2393
2394 /**
2395  * Resolve a Promise with a current write.
2396  * @param write The ModelAction that is fulfilling Promises
2397  * @param promise The Promise to resolve
2398  * @param work The work queue, for adding new fixup work
2399  * @return True if the Promise was successfully resolved; false otherwise
2400  */
2401 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise,
2402                 work_queue_t *work)
2403 {
2404         ModelVector<ModelAction *> actions_to_check;
2405
2406         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2407                 ModelAction *read = promise->get_reader(i);
2408                 if (read_from(read, write)) {
2409                         /* Propagate the changed clock vector */
2410                         propagate_clockvector(read, work);
2411                 }
2412                 actions_to_check.push_back(read);
2413         }
2414         /* Make sure the promise's value matches the write's value */
2415         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2416         if (!mo_graph->resolvePromise(promise, write))
2417                 priv->hard_failed_promise = true;
2418
2419         /**
2420          * @todo  It is possible to end up in an inconsistent state, where a
2421          * "resolved" promise may still be referenced if
2422          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2423          *
2424          * Note that the inconsistency only matters when dumping mo_graph to
2425          * file.
2426          *
2427          * delete promise;
2428          */
2429
2430         //Check whether reading these writes has made threads unable to
2431         //resolve promises
2432         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2433                 ModelAction *read = actions_to_check[i];
2434                 mo_check_promises(read, true);
2435         }
2436
2437         return true;
2438 }
2439
2440 /**
2441  * Compute the set of promises that could potentially be satisfied by this
2442  * action. Note that the set computation actually appears in the Node, not in
2443  * ModelExecution.
2444  * @param curr The ModelAction that may satisfy promises
2445  */
2446 void ModelExecution::compute_promises(ModelAction *curr)
2447 {
2448         for (unsigned int i = 0; i < promises.size(); i++) {
2449                 Promise *promise = promises[i];
2450                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2451                         continue;
2452
2453                 bool satisfy = true;
2454                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2455                         const ModelAction *act = promise->get_reader(j);
2456                         if (act->happens_before(curr) ||
2457                                         act->could_synchronize_with(curr)) {
2458                                 satisfy = false;
2459                                 break;
2460                         }
2461                 }
2462                 if (satisfy)
2463                         curr->get_node()->set_promise(i);
2464         }
2465 }
2466
2467 /** Checks promises in response to change in ClockVector Threads. */
2468 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2469 {
2470         for (unsigned int i = 0; i < promises.size(); i++) {
2471                 Promise *promise = promises[i];
2472                 if (!promise->thread_is_available(tid))
2473                         continue;
2474                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2475                         const ModelAction *act = promise->get_reader(j);
2476                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2477                                         merge_cv->synchronized_since(act)) {
2478                                 if (promise->eliminate_thread(tid)) {
2479                                         /* Promise has failed */
2480                                         priv->failed_promise = true;
2481                                         return;
2482                                 }
2483                         }
2484                 }
2485         }
2486 }
2487
2488 void ModelExecution::check_promises_thread_disabled()
2489 {
2490         for (unsigned int i = 0; i < promises.size(); i++) {
2491                 Promise *promise = promises[i];
2492                 if (promise->has_failed()) {
2493                         priv->failed_promise = true;
2494                         return;
2495                 }
2496         }
2497 }
2498
2499 /**
2500  * @brief Checks promises in response to addition to modification order for
2501  * threads.
2502  *
2503  * We test whether threads are still available for satisfying promises after an
2504  * addition to our modification order constraints. Those that are unavailable
2505  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2506  * that promise has failed.
2507  *
2508  * @param act The ModelAction which updated the modification order
2509  * @param is_read_check Should be true if act is a read and we must check for
2510  * updates to the store from which it read (there is a distinction here for
2511  * RMW's, which are both a load and a store)
2512  */
2513 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2514 {
2515         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2516
2517         for (unsigned int i = 0; i < promises.size(); i++) {
2518                 Promise *promise = promises[i];
2519
2520                 // Is this promise on the same location?
2521                 if (!promise->same_location(write))
2522                         continue;
2523
2524                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2525                         const ModelAction *pread = promise->get_reader(j);
2526                         if (!pread->happens_before(act))
2527                                continue;
2528                         if (mo_graph->checkPromise(write, promise)) {
2529                                 priv->hard_failed_promise = true;
2530                                 return;
2531                         }
2532                         break;
2533                 }
2534
2535                 // Don't do any lookups twice for the same thread
2536                 if (!promise->thread_is_available(act->get_tid()))
2537                         continue;
2538
2539                 if (mo_graph->checkReachable(promise, write)) {
2540                         if (mo_graph->checkPromise(write, promise)) {
2541                                 priv->hard_failed_promise = true;
2542                                 return;
2543                         }
2544                 }
2545         }
2546 }
2547
2548 /**
2549  * Compute the set of writes that may break the current pending release
2550  * sequence. This information is extracted from previou release sequence
2551  * calculations.
2552  *
2553  * @param curr The current ModelAction. Must be a release sequence fixup
2554  * action.
2555  */
2556 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2557 {
2558         if (pending_rel_seqs.empty())
2559                 return;
2560
2561         struct release_seq *pending = pending_rel_seqs.back();
2562         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2563                 const ModelAction *write = pending->writes[i];
2564                 curr->get_node()->add_relseq_break(write);
2565         }
2566
2567         /* NULL means don't break the sequence; just synchronize */
2568         curr->get_node()->add_relseq_break(NULL);
2569 }
2570
2571 /**
2572  * Build up an initial set of all past writes that this 'read' action may read
2573  * from, as well as any previously-observed future values that must still be valid.
2574  *
2575  * @param curr is the current ModelAction that we are exploring; it must be a
2576  * 'read' operation.
2577  */
2578 void ModelExecution::build_may_read_from(ModelAction *curr)
2579 {
2580         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2581         unsigned int i;
2582         ASSERT(curr->is_read());
2583
2584         ModelAction *last_sc_write = NULL;
2585
2586         if (curr->is_seqcst())
2587                 last_sc_write = get_last_seq_cst_write(curr);
2588
2589         /* Iterate over all threads */
2590         for (i = 0; i < thrd_lists->size(); i++) {
2591                 /* Iterate over actions in thread, starting from most recent */
2592                 action_list_t *list = &(*thrd_lists)[i];
2593                 action_list_t::reverse_iterator rit;
2594                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2595                         ModelAction *act = *rit;
2596
2597                         /* Only consider 'write' actions */
2598                         if (!act->is_write() || act == curr)
2599                                 continue;
2600
2601                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2602                         bool allow_read = true;
2603
2604                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2605                                 allow_read = false;
2606                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2607                                 allow_read = false;
2608
2609                         if (allow_read) {
2610                                 /* Only add feasible reads */
2611                                 mo_graph->startChanges();
2612                                 r_modification_order(curr, act);
2613                                 if (!is_infeasible())
2614                                         curr->get_node()->add_read_from_past(act);
2615                                 mo_graph->rollbackChanges();
2616                         }
2617
2618                         /* Include at most one act per-thread that "happens before" curr */
2619                         if (act->happens_before(curr))
2620                                 break;
2621                 }
2622         }
2623
2624         /* Inherit existing, promised future values */
2625         for (i = 0; i < promises.size(); i++) {
2626                 const Promise *promise = promises[i];
2627                 const ModelAction *promise_read = promise->get_reader(0);
2628                 if (promise_read->same_var(curr)) {
2629                         /* Only add feasible future-values */
2630                         mo_graph->startChanges();
2631                         r_modification_order(curr, promise);
2632                         if (!is_infeasible())
2633                                 curr->get_node()->add_read_from_promise(promise_read);
2634                         mo_graph->rollbackChanges();
2635                 }
2636         }
2637
2638         /* We may find no valid may-read-from only if the execution is doomed */
2639         if (!curr->get_node()->read_from_size()) {
2640                 priv->no_valid_reads = true;
2641                 set_assert();
2642         }
2643
2644         if (DBG_ENABLED()) {
2645                 model_print("Reached read action:\n");
2646                 curr->print();
2647                 model_print("Printing read_from_past\n");
2648                 curr->get_node()->print_read_from_past();
2649                 model_print("End printing read_from_past\n");
2650         }
2651 }
2652
2653 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2654 {
2655         for ( ; write != NULL; write = write->get_reads_from()) {
2656                 /* UNINIT actions don't have a Node, and they never sleep */
2657                 if (write->is_uninitialized())
2658                         return true;
2659                 Node *prevnode = write->get_node()->get_parent();
2660
2661                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2662                 if (write->is_release() && thread_sleep)
2663                         return true;
2664                 if (!write->is_rmw())
2665                         return false;
2666         }
2667         return true;
2668 }
2669
2670 /**
2671  * @brief Get an action representing an uninitialized atomic
2672  *
2673  * This function may create a new one or try to retrieve one from the NodeStack
2674  *
2675  * @param curr The current action, which prompts the creation of an UNINIT action
2676  * @return A pointer to the UNINIT ModelAction
2677  */
2678 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2679 {
2680         Node *node = curr->get_node();
2681         ModelAction *act = node->get_uninit_action();
2682         if (!act) {
2683                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2684                 node->set_uninit_action(act);
2685         }
2686         act->create_cv(NULL);
2687         return act;
2688 }
2689
2690 static void print_list(const action_list_t *list)
2691 {
2692         action_list_t::const_iterator it;
2693
2694         model_print("------------------------------------------------------------------------------------\n");
2695         model_print("#    t    Action type     MO       Location         Value               Rf  CV\n");
2696         model_print("------------------------------------------------------------------------------------\n");
2697
2698         unsigned int hash = 0;
2699
2700         for (it = list->begin(); it != list->end(); it++) {
2701                 const ModelAction *act = *it;
2702                 if (act->get_seq_number() > 0)
2703                         act->print();
2704                 hash = hash^(hash<<3)^((*it)->hash());
2705         }
2706         model_print("HASH %u\n", hash);
2707         model_print("------------------------------------------------------------------------------------\n");
2708 }
2709
2710 #if SUPPORT_MOD_ORDER_DUMP
2711 void ModelExecution::dumpGraph(char *filename) const
2712 {
2713         char buffer[200];
2714         sprintf(buffer, "%s.dot", filename);
2715         FILE *file = fopen(buffer, "w");
2716         fprintf(file, "digraph %s {\n", filename);
2717         mo_graph->dumpNodes(file);
2718         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2719
2720         for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2721                 ModelAction *act = *it;
2722                 if (act->is_read()) {
2723                         mo_graph->dot_print_node(file, act);
2724                         if (act->get_reads_from())
2725                                 mo_graph->dot_print_edge(file,
2726                                                 act->get_reads_from(),
2727                                                 act,
2728                                                 "label=\"rf\", color=red, weight=2");
2729                         else
2730                                 mo_graph->dot_print_edge(file,
2731                                                 act->get_reads_from_promise(),
2732                                                 act,
2733                                                 "label=\"rf\", color=red");
2734                 }
2735                 if (thread_array[act->get_tid()]) {
2736                         mo_graph->dot_print_edge(file,
2737                                         thread_array[id_to_int(act->get_tid())],
2738                                         act,
2739                                         "label=\"sb\", color=blue, weight=400");
2740                 }
2741
2742                 thread_array[act->get_tid()] = act;
2743         }
2744         fprintf(file, "}\n");
2745         model_free(thread_array);
2746         fclose(file);
2747 }
2748 #endif
2749
2750 /** @brief Prints an execution trace summary. */
2751 void ModelExecution::print_summary() const
2752 {
2753 #if SUPPORT_MOD_ORDER_DUMP
2754         char buffername[100];
2755         sprintf(buffername, "exec%04u", get_execution_number());
2756         mo_graph->dumpGraphToFile(buffername);
2757         sprintf(buffername, "graph%04u", get_execution_number());
2758         dumpGraph(buffername);
2759 #endif
2760
2761         model_print("Execution trace %d:", get_execution_number());
2762         if (isfeasibleprefix()) {
2763                 if (is_yieldblocked())
2764                         model_print(" YIELD BLOCKED");
2765                 if (scheduler->all_threads_sleeping())
2766                         model_print(" SLEEP-SET REDUNDANT");
2767                 if (have_bug_reports())
2768                         model_print(" DETECTED BUG(S)");
2769         } else
2770                 print_infeasibility(" INFEASIBLE");
2771         model_print("\n");
2772
2773         print_list(&action_trace);
2774         model_print("\n");
2775
2776         if (!promises.empty()) {
2777                 model_print("Pending promises:\n");
2778                 for (unsigned int i = 0; i < promises.size(); i++) {
2779                         model_print(" [P%u] ", i);
2780                         promises[i]->print();
2781                 }
2782                 model_print("\n");
2783         }
2784 }
2785
2786 /**
2787  * Add a Thread to the system for the first time. Should only be called once
2788  * per thread.
2789  * @param t The Thread to add
2790  */
2791 void ModelExecution::add_thread(Thread *t)
2792 {
2793         unsigned int i = id_to_int(t->get_id());
2794         if (i >= thread_map.size())
2795                 thread_map.resize(i + 1);
2796         thread_map[i] = t;
2797         if (!t->is_model_thread())
2798                 scheduler->add_thread(t);
2799 }
2800
2801 /**
2802  * @brief Get a Thread reference by its ID
2803  * @param tid The Thread's ID
2804  * @return A Thread reference
2805  */
2806 Thread * ModelExecution::get_thread(thread_id_t tid) const
2807 {
2808         unsigned int i = id_to_int(tid);
2809         if (i < thread_map.size())
2810                 return thread_map[i];
2811         return NULL;
2812 }
2813
2814 /**
2815  * @brief Get a reference to the Thread in which a ModelAction was executed
2816  * @param act The ModelAction
2817  * @return A Thread reference
2818  */
2819 Thread * ModelExecution::get_thread(const ModelAction *act) const
2820 {
2821         return get_thread(act->get_tid());
2822 }
2823
2824 /**
2825  * @brief Get a Promise's "promise number"
2826  *
2827  * A "promise number" is an index number that is unique to a promise, valid
2828  * only for a specific snapshot of an execution trace. Promises may come and go
2829  * as they are generated an resolved, so an index only retains meaning for the
2830  * current snapshot.
2831  *
2832  * @param promise The Promise to check
2833  * @return The promise index, if the promise still is valid; otherwise -1
2834  */
2835 int ModelExecution::get_promise_number(const Promise *promise) const
2836 {
2837         for (unsigned int i = 0; i < promises.size(); i++)
2838                 if (promises[i] == promise)
2839                         return i;
2840         /* Not found */
2841         return -1;
2842 }
2843
2844 /**
2845  * @brief Check if a Thread is currently enabled
2846  * @param t The Thread to check
2847  * @return True if the Thread is currently enabled
2848  */
2849 bool ModelExecution::is_enabled(Thread *t) const
2850 {
2851         return scheduler->is_enabled(t);
2852 }
2853
2854 /**
2855  * @brief Check if a Thread is currently enabled
2856  * @param tid The ID of the Thread to check
2857  * @return True if the Thread is currently enabled
2858  */
2859 bool ModelExecution::is_enabled(thread_id_t tid) const
2860 {
2861         return scheduler->is_enabled(tid);
2862 }
2863
2864 /**
2865  * @brief Select the next thread to execute based on the curren action
2866  *
2867  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2868  * actions should be followed by the execution of their child thread. In either
2869  * case, the current action should determine the next thread schedule.
2870  *
2871  * @param curr The current action
2872  * @return The next thread to run, if the current action will determine this
2873  * selection; otherwise NULL
2874  */
2875 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2876 {
2877         /* Do not split atomic RMW */
2878         if (curr->is_rmwr())
2879                 return get_thread(curr);
2880         /* Follow CREATE with the created thread */
2881         if (curr->get_type() == THREAD_CREATE)
2882                 return curr->get_thread_operand();
2883         return NULL;
2884 }
2885
2886 /** @return True if the execution has taken too many steps */
2887 bool ModelExecution::too_many_steps() const
2888 {
2889         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2890 }
2891
2892 /**
2893  * Takes the next step in the execution, if possible.
2894  * @param curr The current step to take
2895  * @return Returns the next Thread to run, if any; NULL if this execution
2896  * should terminate
2897  */
2898 Thread * ModelExecution::take_step(ModelAction *curr)
2899 {
2900         Thread *curr_thrd = get_thread(curr);
2901         ASSERT(curr_thrd->get_state() == THREAD_READY);
2902
2903         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2904         curr = check_current_action(curr);
2905         ASSERT(curr);
2906
2907         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2908                 scheduler->remove_thread(curr_thrd);
2909
2910         return action_select_next_thread(curr);
2911 }
2912
2913 /**
2914  * Launch end-of-execution release sequence fixups only when
2915  * the execution is otherwise feasible AND there are:
2916  *
2917  * (1) pending release sequences
2918  * (2) pending assertions that could be invalidated by a change
2919  * in clock vectors (i.e., data races)
2920  * (3) no pending promises
2921  */
2922 void ModelExecution::fixup_release_sequences()
2923 {
2924         while (!pending_rel_seqs.empty() &&
2925                         is_feasible_prefix_ignore_relseq() &&
2926                         haveUnrealizedRaces()) {
2927                 model_print("*** WARNING: release sequence fixup action "
2928                                 "(%zu pending release seuqence(s)) ***\n",
2929                                 pending_rel_seqs.size());
2930                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2931                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2932                                 model_thread);
2933                 take_step(fixup);
2934         };
2935 }