execution: improve yield-blocking structure
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "execution.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
19
20 #define INITIAL_THREAD_ID       0
21
22 /**
23  * Structure for holding small ModelChecker members that should be snapshotted
24  */
25 struct model_snapshot_members {
26         model_snapshot_members() :
27                 /* First thread created will have id INITIAL_THREAD_ID */
28                 next_thread_id(INITIAL_THREAD_ID),
29                 used_sequence_numbers(0),
30                 next_backtrack(NULL),
31                 bugs(),
32                 failed_promise(false),
33                 too_many_reads(false),
34                 no_valid_reads(false),
35                 bad_synchronization(false),
36                 asserted(false)
37         { }
38
39         ~model_snapshot_members() {
40                 for (unsigned int i = 0; i < bugs.size(); i++)
41                         delete bugs[i];
42                 bugs.clear();
43         }
44
45         unsigned int next_thread_id;
46         modelclock_t used_sequence_numbers;
47         ModelAction *next_backtrack;
48         SnapVector<bug_message *> bugs;
49         bool failed_promise;
50         bool too_many_reads;
51         bool no_valid_reads;
52         /** @brief Incorrectly-ordered synchronization was made */
53         bool bad_synchronization;
54         bool asserted;
55
56         SNAPSHOTALLOC
57 };
58
59 /** @brief Constructor */
60 ModelExecution::ModelExecution(ModelChecker *m,
61                 const struct model_params *params,
62                 Scheduler *scheduler,
63                 NodeStack *node_stack) :
64         model(m),
65         params(params),
66         scheduler(scheduler),
67         action_trace(),
68         thread_map(2), /* We'll always need at least 2 threads */
69         obj_map(),
70         condvar_waiters_map(),
71         obj_thrd_map(),
72         promises(),
73         futurevalues(),
74         pending_rel_seqs(),
75         thrd_last_action(1),
76         thrd_last_fence_release(),
77         node_stack(node_stack),
78         priv(new struct model_snapshot_members()),
79         mo_graph(new CycleGraph())
80 {
81         /* Initialize a model-checker thread, for special ModelActions */
82         model_thread = new Thread(get_next_id());
83         add_thread(model_thread);
84         scheduler->register_engine(this);
85         node_stack->register_engine(this);
86 }
87
88 /** @brief Destructor */
89 ModelExecution::~ModelExecution()
90 {
91         for (unsigned int i = 0; i < get_num_threads(); i++)
92                 delete get_thread(int_to_id(i));
93
94         for (unsigned int i = 0; i < promises.size(); i++)
95                 delete promises[i];
96
97         delete mo_graph;
98         delete priv;
99 }
100
101 int ModelExecution::get_execution_number() const
102 {
103         return model->get_execution_number();
104 }
105
106 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
107 {
108         action_list_t *tmp = hash->get(ptr);
109         if (tmp == NULL) {
110                 tmp = new action_list_t();
111                 hash->put(ptr, tmp);
112         }
113         return tmp;
114 }
115
116 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
117 {
118         SnapVector<action_list_t> *tmp = hash->get(ptr);
119         if (tmp == NULL) {
120                 tmp = new SnapVector<action_list_t>();
121                 hash->put(ptr, tmp);
122         }
123         return tmp;
124 }
125
126 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
127 {
128         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
129         if (wrv==NULL)
130                 return NULL;
131         unsigned int thread=id_to_int(tid);
132         if (thread < wrv->size())
133                 return &(*wrv)[thread];
134         else
135                 return NULL;
136 }
137
138 /** @return a thread ID for a new Thread */
139 thread_id_t ModelExecution::get_next_id()
140 {
141         return priv->next_thread_id++;
142 }
143
144 /** @return the number of user threads created during this execution */
145 unsigned int ModelExecution::get_num_threads() const
146 {
147         return priv->next_thread_id;
148 }
149
150 /** @return a sequence number for a new ModelAction */
151 modelclock_t ModelExecution::get_next_seq_num()
152 {
153         return ++priv->used_sequence_numbers;
154 }
155
156 /**
157  * @brief Should the current action wake up a given thread?
158  *
159  * @param curr The current action
160  * @param thread The thread that we might wake up
161  * @return True, if we should wake up the sleeping thread; false otherwise
162  */
163 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
164 {
165         const ModelAction *asleep = thread->get_pending();
166         /* Don't allow partial RMW to wake anyone up */
167         if (curr->is_rmwr())
168                 return false;
169         /* Synchronizing actions may have been backtracked */
170         if (asleep->could_synchronize_with(curr))
171                 return true;
172         /* All acquire/release fences and fence-acquire/store-release */
173         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
174                 return true;
175         /* Fence-release + store can awake load-acquire on the same location */
176         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
177                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
178                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
179                         return true;
180         }
181         return false;
182 }
183
184 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
185 {
186         for (unsigned int i = 0; i < get_num_threads(); i++) {
187                 Thread *thr = get_thread(int_to_id(i));
188                 if (scheduler->is_sleep_set(thr)) {
189                         if (should_wake_up(curr, thr))
190                                 /* Remove this thread from sleep set */
191                                 scheduler->remove_sleep(thr);
192                 }
193         }
194 }
195
196 /** @brief Alert the model-checker that an incorrectly-ordered
197  * synchronization was made */
198 void ModelExecution::set_bad_synchronization()
199 {
200         priv->bad_synchronization = true;
201 }
202
203 bool ModelExecution::assert_bug(const char *msg)
204 {
205         priv->bugs.push_back(new bug_message(msg));
206
207         if (isfeasibleprefix()) {
208                 set_assert();
209                 return true;
210         }
211         return false;
212 }
213
214 /** @return True, if any bugs have been reported for this execution */
215 bool ModelExecution::have_bug_reports() const
216 {
217         return priv->bugs.size() != 0;
218 }
219
220 SnapVector<bug_message *> * ModelExecution::get_bugs() const
221 {
222         return &priv->bugs;
223 }
224
225 /**
226  * Check whether the current trace has triggered an assertion which should halt
227  * its execution.
228  *
229  * @return True, if the execution should be aborted; false otherwise
230  */
231 bool ModelExecution::has_asserted() const
232 {
233         return priv->asserted;
234 }
235
236 /**
237  * Trigger a trace assertion which should cause this execution to be halted.
238  * This can be due to a detected bug or due to an infeasibility that should
239  * halt ASAP.
240  */
241 void ModelExecution::set_assert()
242 {
243         priv->asserted = true;
244 }
245
246 /**
247  * Check if we are in a deadlock. Should only be called at the end of an
248  * execution, although it should not give false positives in the middle of an
249  * execution (there should be some ENABLED thread).
250  *
251  * @return True if program is in a deadlock; false otherwise
252  */
253 bool ModelExecution::is_deadlocked() const
254 {
255         bool blocking_threads = false;
256         for (unsigned int i = 0; i < get_num_threads(); i++) {
257                 thread_id_t tid = int_to_id(i);
258                 if (is_enabled(tid))
259                         return false;
260                 Thread *t = get_thread(tid);
261                 if (!t->is_model_thread() && t->get_pending())
262                         blocking_threads = true;
263         }
264         return blocking_threads;
265 }
266
267 bool ModelExecution::is_yieldblocked() const
268 {
269         if (!params->yieldblock)
270                 return false;
271
272         for (unsigned int i = 0; i < get_num_threads(); i++) {
273                 thread_id_t tid = int_to_id(i);
274                 Thread *t = get_thread(tid);
275                 if (t->get_pending() && t->get_pending()->is_yield())
276                         return true;
277         }
278         return false;
279 }
280
281 /**
282  * Check if this is a complete execution. That is, have all thread completed
283  * execution (rather than exiting because sleep sets have forced a redundant
284  * execution).
285  *
286  * @return True if the execution is complete.
287  */
288 bool ModelExecution::is_complete_execution() const
289 {
290         if (is_yieldblocked())
291                 return false;
292         for (unsigned int i = 0; i < get_num_threads(); i++)
293                 if (is_enabled(int_to_id(i)))
294                         return false;
295         return true;
296 }
297
298 /**
299  * @brief Find the last fence-related backtracking conflict for a ModelAction
300  *
301  * This function performs the search for the most recent conflicting action
302  * against which we should perform backtracking, as affected by fence
303  * operations. This includes pairs of potentially-synchronizing actions which
304  * occur due to fence-acquire or fence-release, and hence should be explored in
305  * the opposite execution order.
306  *
307  * @param act The current action
308  * @return The most recent action which conflicts with act due to fences
309  */
310 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
311 {
312         /* Only perform release/acquire fence backtracking for stores */
313         if (!act->is_write())
314                 return NULL;
315
316         /* Find a fence-release (or, act is a release) */
317         ModelAction *last_release;
318         if (act->is_release())
319                 last_release = act;
320         else
321                 last_release = get_last_fence_release(act->get_tid());
322         if (!last_release)
323                 return NULL;
324
325         /* Skip past the release */
326         const action_list_t *list = &action_trace;
327         action_list_t::const_reverse_iterator rit;
328         for (rit = list->rbegin(); rit != list->rend(); rit++)
329                 if (*rit == last_release)
330                         break;
331         ASSERT(rit != list->rend());
332
333         /* Find a prior:
334          *   load-acquire
335          * or
336          *   load --sb-> fence-acquire */
337         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
338         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
339         bool found_acquire_fences = false;
340         for ( ; rit != list->rend(); rit++) {
341                 ModelAction *prev = *rit;
342                 if (act->same_thread(prev))
343                         continue;
344
345                 int tid = id_to_int(prev->get_tid());
346
347                 if (prev->is_read() && act->same_var(prev)) {
348                         if (prev->is_acquire()) {
349                                 /* Found most recent load-acquire, don't need
350                                  * to search for more fences */
351                                 if (!found_acquire_fences)
352                                         return NULL;
353                         } else {
354                                 prior_loads[tid] = prev;
355                         }
356                 }
357                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
358                         found_acquire_fences = true;
359                         acquire_fences[tid] = prev;
360                 }
361         }
362
363         ModelAction *latest_backtrack = NULL;
364         for (unsigned int i = 0; i < acquire_fences.size(); i++)
365                 if (acquire_fences[i] && prior_loads[i])
366                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
367                                 latest_backtrack = acquire_fences[i];
368         return latest_backtrack;
369 }
370
371 /**
372  * @brief Find the last backtracking conflict for a ModelAction
373  *
374  * This function performs the search for the most recent conflicting action
375  * against which we should perform backtracking. This primary includes pairs of
376  * synchronizing actions which should be explored in the opposite execution
377  * order.
378  *
379  * @param act The current action
380  * @return The most recent action which conflicts with act
381  */
382 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
383 {
384         switch (act->get_type()) {
385         case ATOMIC_FENCE:
386                 /* Only seq-cst fences can (directly) cause backtracking */
387                 if (!act->is_seqcst())
388                         break;
389         case ATOMIC_READ:
390         case ATOMIC_WRITE:
391         case ATOMIC_RMW: {
392                 ModelAction *ret = NULL;
393
394                 /* linear search: from most recent to oldest */
395                 action_list_t *list = obj_map.get(act->get_location());
396                 action_list_t::reverse_iterator rit;
397                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
398                         ModelAction *prev = *rit;
399                         if (prev == act)
400                                 continue;
401                         if (prev->could_synchronize_with(act)) {
402                                 ret = prev;
403                                 break;
404                         }
405                 }
406
407                 ModelAction *ret2 = get_last_fence_conflict(act);
408                 if (!ret2)
409                         return ret;
410                 if (!ret)
411                         return ret2;
412                 if (*ret < *ret2)
413                         return ret2;
414                 return ret;
415         }
416         case ATOMIC_LOCK:
417         case ATOMIC_TRYLOCK: {
418                 /* linear search: from most recent to oldest */
419                 action_list_t *list = obj_map.get(act->get_location());
420                 action_list_t::reverse_iterator rit;
421                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
422                         ModelAction *prev = *rit;
423                         if (act->is_conflicting_lock(prev))
424                                 return prev;
425                 }
426                 break;
427         }
428         case ATOMIC_UNLOCK: {
429                 /* linear search: from most recent to oldest */
430                 action_list_t *list = obj_map.get(act->get_location());
431                 action_list_t::reverse_iterator rit;
432                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
433                         ModelAction *prev = *rit;
434                         if (!act->same_thread(prev) && prev->is_failed_trylock())
435                                 return prev;
436                 }
437                 break;
438         }
439         case ATOMIC_WAIT: {
440                 /* linear search: from most recent to oldest */
441                 action_list_t *list = obj_map.get(act->get_location());
442                 action_list_t::reverse_iterator rit;
443                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
444                         ModelAction *prev = *rit;
445                         if (!act->same_thread(prev) && prev->is_failed_trylock())
446                                 return prev;
447                         if (!act->same_thread(prev) && prev->is_notify())
448                                 return prev;
449                 }
450                 break;
451         }
452
453         case ATOMIC_NOTIFY_ALL:
454         case ATOMIC_NOTIFY_ONE: {
455                 /* linear search: from most recent to oldest */
456                 action_list_t *list = obj_map.get(act->get_location());
457                 action_list_t::reverse_iterator rit;
458                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
459                         ModelAction *prev = *rit;
460                         if (!act->same_thread(prev) && prev->is_wait())
461                                 return prev;
462                 }
463                 break;
464         }
465         default:
466                 break;
467         }
468         return NULL;
469 }
470
471 /** This method finds backtracking points where we should try to
472  * reorder the parameter ModelAction against.
473  *
474  * @param the ModelAction to find backtracking points for.
475  */
476 void ModelExecution::set_backtracking(ModelAction *act)
477 {
478         Thread *t = get_thread(act);
479         ModelAction *prev = get_last_conflict(act);
480         if (prev == NULL)
481                 return;
482
483         Node *node = prev->get_node()->get_parent();
484
485         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
486         int low_tid, high_tid;
487         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
488                 low_tid = id_to_int(act->get_tid());
489                 high_tid = low_tid + 1;
490         } else {
491                 low_tid = 0;
492                 high_tid = get_num_threads();
493         }
494
495         for (int i = low_tid; i < high_tid; i++) {
496                 thread_id_t tid = int_to_id(i);
497
498                 /* Make sure this thread can be enabled here. */
499                 if (i >= node->get_num_threads())
500                         break;
501
502                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
503                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
504                 if (node->enabled_status(tid) != THREAD_ENABLED)
505                         continue;
506
507                 /* Check if this has been explored already */
508                 if (node->has_been_explored(tid))
509                         continue;
510
511                 /* See if fairness allows */
512                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
513                         bool unfair = false;
514                         for (int t = 0; t < node->get_num_threads(); t++) {
515                                 thread_id_t tother = int_to_id(t);
516                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
517                                         unfair = true;
518                                         break;
519                                 }
520                         }
521                         if (unfair)
522                                 continue;
523                 }
524
525                 /* See if CHESS-like yield fairness allows */
526                 if (params->yieldon) {
527                         bool unfair = false;
528                         for (int t = 0; t < node->get_num_threads(); t++) {
529                                 thread_id_t tother = int_to_id(t);
530                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
531                                         unfair = true;
532                                         break;
533                                 }
534                         }
535                         if (unfair)
536                                 continue;
537                 }
538
539                 /* Cache the latest backtracking point */
540                 set_latest_backtrack(prev);
541
542                 /* If this is a new backtracking point, mark the tree */
543                 if (!node->set_backtrack(tid))
544                         continue;
545                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
546                                         id_to_int(prev->get_tid()),
547                                         id_to_int(t->get_id()));
548                 if (DBG_ENABLED()) {
549                         prev->print();
550                         act->print();
551                 }
552         }
553 }
554
555 /**
556  * @brief Cache the a backtracking point as the "most recent", if eligible
557  *
558  * Note that this does not prepare the NodeStack for this backtracking
559  * operation, it only caches the action on a per-execution basis
560  *
561  * @param act The operation at which we should explore a different next action
562  * (i.e., backtracking point)
563  * @return True, if this action is now the most recent backtracking point;
564  * false otherwise
565  */
566 bool ModelExecution::set_latest_backtrack(ModelAction *act)
567 {
568         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
569                 priv->next_backtrack = act;
570                 return true;
571         }
572         return false;
573 }
574
575 /**
576  * Returns last backtracking point. The model checker will explore a different
577  * path for this point in the next execution.
578  * @return The ModelAction at which the next execution should diverge.
579  */
580 ModelAction * ModelExecution::get_next_backtrack()
581 {
582         ModelAction *next = priv->next_backtrack;
583         priv->next_backtrack = NULL;
584         return next;
585 }
586
587 /**
588  * Processes a read model action.
589  * @param curr is the read model action to process.
590  * @return True if processing this read updates the mo_graph.
591  */
592 bool ModelExecution::process_read(ModelAction *curr)
593 {
594         Node *node = curr->get_node();
595         while (true) {
596                 bool updated = false;
597                 switch (node->get_read_from_status()) {
598                 case READ_FROM_PAST: {
599                         const ModelAction *rf = node->get_read_from_past();
600                         ASSERT(rf);
601
602                         mo_graph->startChanges();
603
604                         ASSERT(!is_infeasible());
605                         if (!check_recency(curr, rf)) {
606                                 if (node->increment_read_from()) {
607                                         mo_graph->rollbackChanges();
608                                         continue;
609                                 } else {
610                                         priv->too_many_reads = true;
611                                 }
612                         }
613
614                         updated = r_modification_order(curr, rf);
615                         read_from(curr, rf);
616                         mo_graph->commitChanges();
617                         mo_check_promises(curr, true);
618                         break;
619                 }
620                 case READ_FROM_PROMISE: {
621                         Promise *promise = curr->get_node()->get_read_from_promise();
622                         if (promise->add_reader(curr))
623                                 priv->failed_promise = true;
624                         curr->set_read_from_promise(promise);
625                         mo_graph->startChanges();
626                         if (!check_recency(curr, promise))
627                                 priv->too_many_reads = true;
628                         updated = r_modification_order(curr, promise);
629                         mo_graph->commitChanges();
630                         break;
631                 }
632                 case READ_FROM_FUTURE: {
633                         /* Read from future value */
634                         struct future_value fv = node->get_future_value();
635                         Promise *promise = new Promise(this, curr, fv);
636                         curr->set_read_from_promise(promise);
637                         promises.push_back(promise);
638                         mo_graph->startChanges();
639                         updated = r_modification_order(curr, promise);
640                         mo_graph->commitChanges();
641                         break;
642                 }
643                 default:
644                         ASSERT(false);
645                 }
646                 get_thread(curr)->set_return_value(curr->get_return_value());
647                 return updated;
648         }
649 }
650
651 /**
652  * Processes a lock, trylock, or unlock model action.  @param curr is
653  * the read model action to process.
654  *
655  * The try lock operation checks whether the lock is taken.  If not,
656  * it falls to the normal lock operation case.  If so, it returns
657  * fail.
658  *
659  * The lock operation has already been checked that it is enabled, so
660  * it just grabs the lock and synchronizes with the previous unlock.
661  *
662  * The unlock operation has to re-enable all of the threads that are
663  * waiting on the lock.
664  *
665  * @return True if synchronization was updated; false otherwise
666  */
667 bool ModelExecution::process_mutex(ModelAction *curr)
668 {
669         std::mutex *mutex = curr->get_mutex();
670         struct std::mutex_state *state = NULL;
671
672         if (mutex)
673                 state = mutex->get_state();
674
675         switch (curr->get_type()) {
676         case ATOMIC_TRYLOCK: {
677                 bool success = !state->locked;
678                 curr->set_try_lock(success);
679                 if (!success) {
680                         get_thread(curr)->set_return_value(0);
681                         break;
682                 }
683                 get_thread(curr)->set_return_value(1);
684         }
685                 //otherwise fall into the lock case
686         case ATOMIC_LOCK: {
687                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
688                         assert_bug("Lock access before initialization");
689                 state->locked = get_thread(curr);
690                 ModelAction *unlock = get_last_unlock(curr);
691                 //synchronize with the previous unlock statement
692                 if (unlock != NULL) {
693                         synchronize(unlock, curr);
694                         return true;
695                 }
696                 break;
697         }
698         case ATOMIC_WAIT:
699         case ATOMIC_UNLOCK: {
700                 /* wake up the other threads */
701                 for (unsigned int i = 0; i < get_num_threads(); i++) {
702                         Thread *t = get_thread(int_to_id(i));
703                         Thread *curr_thrd = get_thread(curr);
704                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
705                                 scheduler->wake(t);
706                 }
707
708                 /* unlock the lock - after checking who was waiting on it */
709                 state->locked = NULL;
710
711                 if (!curr->is_wait())
712                         break; /* The rest is only for ATOMIC_WAIT */
713
714                 /* Should we go to sleep? (simulate spurious failures) */
715                 if (curr->get_node()->get_misc() == 0) {
716                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
717                         /* disable us */
718                         scheduler->sleep(get_thread(curr));
719                 }
720                 break;
721         }
722         case ATOMIC_NOTIFY_ALL: {
723                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
724                 //activate all the waiting threads
725                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
726                         scheduler->wake(get_thread(*rit));
727                 }
728                 waiters->clear();
729                 break;
730         }
731         case ATOMIC_NOTIFY_ONE: {
732                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
733                 int wakeupthread = curr->get_node()->get_misc();
734                 action_list_t::iterator it = waiters->begin();
735                 advance(it, wakeupthread);
736                 scheduler->wake(get_thread(*it));
737                 waiters->erase(it);
738                 break;
739         }
740
741         default:
742                 ASSERT(0);
743         }
744         return false;
745 }
746
747 /**
748  * @brief Check if the current pending promises allow a future value to be sent
749  *
750  * If one of the following is true:
751  *  (a) there are no pending promises
752  *  (b) the reader and writer do not cross any promises
753  * Then, it is safe to pass a future value back now.
754  *
755  * Otherwise, we must save the pending future value until (a) or (b) is true
756  *
757  * @param writer The operation which sends the future value. Must be a write.
758  * @param reader The operation which will observe the value. Must be a read.
759  * @return True if the future value can be sent now; false if it must wait.
760  */
761 bool ModelExecution::promises_may_allow(const ModelAction *writer,
762                 const ModelAction *reader) const
763 {
764         if (promises.empty())
765                 return true;
766         for (int i = promises.size() - 1; i >= 0; i--) {
767                 ModelAction *pr = promises[i]->get_reader(0);
768                 //reader is after promise...doesn't cross any promise
769                 if (*reader > *pr)
770                         return true;
771                 //writer is after promise, reader before...bad...
772                 if (*writer > *pr)
773                         return false;
774         }
775         return true;
776 }
777
778 /**
779  * @brief Add a future value to a reader
780  *
781  * This function performs a few additional checks to ensure that the future
782  * value can be feasibly observed by the reader
783  *
784  * @param writer The operation whose value is sent. Must be a write.
785  * @param reader The read operation which may read the future value. Must be a read.
786  */
787 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
788 {
789         /* Do more ambitious checks now that mo is more complete */
790         if (!mo_may_allow(writer, reader))
791                 return;
792
793         Node *node = reader->get_node();
794
795         /* Find an ancestor thread which exists at the time of the reader */
796         Thread *write_thread = get_thread(writer);
797         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
798                 write_thread = write_thread->get_parent();
799
800         struct future_value fv = {
801                 writer->get_write_value(),
802                 writer->get_seq_number() + params->maxfuturedelay,
803                 write_thread->get_id(),
804         };
805         if (node->add_future_value(fv))
806                 set_latest_backtrack(reader);
807 }
808
809 /**
810  * Process a write ModelAction
811  * @param curr The ModelAction to process
812  * @return True if the mo_graph was updated or promises were resolved
813  */
814 bool ModelExecution::process_write(ModelAction *curr)
815 {
816         /* Readers to which we may send our future value */
817         ModelVector<ModelAction *> send_fv;
818
819         const ModelAction *earliest_promise_reader;
820         bool updated_promises = false;
821
822         bool updated_mod_order = w_modification_order(curr, &send_fv);
823         Promise *promise = pop_promise_to_resolve(curr);
824
825         if (promise) {
826                 earliest_promise_reader = promise->get_reader(0);
827                 updated_promises = resolve_promise(curr, promise);
828         } else
829                 earliest_promise_reader = NULL;
830
831         for (unsigned int i = 0; i < send_fv.size(); i++) {
832                 ModelAction *read = send_fv[i];
833
834                 /* Don't send future values to reads after the Promise we resolve */
835                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
836                         /* Check if future value can be sent immediately */
837                         if (promises_may_allow(curr, read)) {
838                                 add_future_value(curr, read);
839                         } else {
840                                 futurevalues.push_back(PendingFutureValue(curr, read));
841                         }
842                 }
843         }
844
845         /* Check the pending future values */
846         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
847                 struct PendingFutureValue pfv = futurevalues[i];
848                 if (promises_may_allow(pfv.writer, pfv.reader)) {
849                         add_future_value(pfv.writer, pfv.reader);
850                         futurevalues.erase(futurevalues.begin() + i);
851                 }
852         }
853
854         mo_graph->commitChanges();
855         mo_check_promises(curr, false);
856
857         get_thread(curr)->set_return_value(VALUE_NONE);
858         return updated_mod_order || updated_promises;
859 }
860
861 /**
862  * Process a fence ModelAction
863  * @param curr The ModelAction to process
864  * @return True if synchronization was updated
865  */
866 bool ModelExecution::process_fence(ModelAction *curr)
867 {
868         /*
869          * fence-relaxed: no-op
870          * fence-release: only log the occurence (not in this function), for
871          *   use in later synchronization
872          * fence-acquire (this function): search for hypothetical release
873          *   sequences
874          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
875          */
876         bool updated = false;
877         if (curr->is_acquire()) {
878                 action_list_t *list = &action_trace;
879                 action_list_t::reverse_iterator rit;
880                 /* Find X : is_read(X) && X --sb-> curr */
881                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
882                         ModelAction *act = *rit;
883                         if (act == curr)
884                                 continue;
885                         if (act->get_tid() != curr->get_tid())
886                                 continue;
887                         /* Stop at the beginning of the thread */
888                         if (act->is_thread_start())
889                                 break;
890                         /* Stop once we reach a prior fence-acquire */
891                         if (act->is_fence() && act->is_acquire())
892                                 break;
893                         if (!act->is_read())
894                                 continue;
895                         /* read-acquire will find its own release sequences */
896                         if (act->is_acquire())
897                                 continue;
898
899                         /* Establish hypothetical release sequences */
900                         rel_heads_list_t release_heads;
901                         get_release_seq_heads(curr, act, &release_heads);
902                         for (unsigned int i = 0; i < release_heads.size(); i++)
903                                 synchronize(release_heads[i], curr);
904                         if (release_heads.size() != 0)
905                                 updated = true;
906                 }
907         }
908         return updated;
909 }
910
911 /**
912  * @brief Process the current action for thread-related activity
913  *
914  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
915  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
916  * synchronization, etc.  This function is a no-op for non-THREAD actions
917  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
918  *
919  * @param curr The current action
920  * @return True if synchronization was updated or a thread completed
921  */
922 bool ModelExecution::process_thread_action(ModelAction *curr)
923 {
924         bool updated = false;
925
926         switch (curr->get_type()) {
927         case THREAD_CREATE: {
928                 thrd_t *thrd = (thrd_t *)curr->get_location();
929                 struct thread_params *params = (struct thread_params *)curr->get_value();
930                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
931                 add_thread(th);
932                 th->set_creation(curr);
933                 /* Promises can be satisfied by children */
934                 for (unsigned int i = 0; i < promises.size(); i++) {
935                         Promise *promise = promises[i];
936                         if (promise->thread_is_available(curr->get_tid()))
937                                 promise->add_thread(th->get_id());
938                 }
939                 break;
940         }
941         case THREAD_JOIN: {
942                 Thread *blocking = curr->get_thread_operand();
943                 ModelAction *act = get_last_action(blocking->get_id());
944                 synchronize(act, curr);
945                 updated = true; /* trigger rel-seq checks */
946                 break;
947         }
948         case THREAD_FINISH: {
949                 Thread *th = get_thread(curr);
950                 /* Wake up any joining threads */
951                 for (unsigned int i = 0; i < get_num_threads(); i++) {
952                         Thread *waiting = get_thread(int_to_id(i));
953                         if (waiting->waiting_on() == th &&
954                                         waiting->get_pending()->is_thread_join())
955                                 scheduler->wake(waiting);
956                 }
957                 th->complete();
958                 /* Completed thread can't satisfy promises */
959                 for (unsigned int i = 0; i < promises.size(); i++) {
960                         Promise *promise = promises[i];
961                         if (promise->thread_is_available(th->get_id()))
962                                 if (promise->eliminate_thread(th->get_id()))
963                                         priv->failed_promise = true;
964                 }
965                 updated = true; /* trigger rel-seq checks */
966                 break;
967         }
968         case THREAD_START: {
969                 check_promises(curr->get_tid(), NULL, curr->get_cv());
970                 break;
971         }
972         default:
973                 break;
974         }
975
976         return updated;
977 }
978
979 /**
980  * @brief Process the current action for release sequence fixup activity
981  *
982  * Performs model-checker release sequence fixups for the current action,
983  * forcing a single pending release sequence to break (with a given, potential
984  * "loose" write) or to complete (i.e., synchronize). If a pending release
985  * sequence forms a complete release sequence, then we must perform the fixup
986  * synchronization, mo_graph additions, etc.
987  *
988  * @param curr The current action; must be a release sequence fixup action
989  * @param work_queue The work queue to which to add work items as they are
990  * generated
991  */
992 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
993 {
994         const ModelAction *write = curr->get_node()->get_relseq_break();
995         struct release_seq *sequence = pending_rel_seqs.back();
996         pending_rel_seqs.pop_back();
997         ASSERT(sequence);
998         ModelAction *acquire = sequence->acquire;
999         const ModelAction *rf = sequence->rf;
1000         const ModelAction *release = sequence->release;
1001         ASSERT(acquire);
1002         ASSERT(release);
1003         ASSERT(rf);
1004         ASSERT(release->same_thread(rf));
1005
1006         if (write == NULL) {
1007                 /**
1008                  * @todo Forcing a synchronization requires that we set
1009                  * modification order constraints. For instance, we can't allow
1010                  * a fixup sequence in which two separate read-acquire
1011                  * operations read from the same sequence, where the first one
1012                  * synchronizes and the other doesn't. Essentially, we can't
1013                  * allow any writes to insert themselves between 'release' and
1014                  * 'rf'
1015                  */
1016
1017                 /* Must synchronize */
1018                 if (!synchronize(release, acquire))
1019                         return;
1020                 /* Re-check all pending release sequences */
1021                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1022                 /* Re-check act for mo_graph edges */
1023                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1024
1025                 /* propagate synchronization to later actions */
1026                 action_list_t::reverse_iterator rit = action_trace.rbegin();
1027                 for (; (*rit) != acquire; rit++) {
1028                         ModelAction *propagate = *rit;
1029                         if (acquire->happens_before(propagate)) {
1030                                 synchronize(acquire, propagate);
1031                                 /* Re-check 'propagate' for mo_graph edges */
1032                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1033                         }
1034                 }
1035         } else {
1036                 /* Break release sequence with new edges:
1037                  *   release --mo--> write --mo--> rf */
1038                 mo_graph->addEdge(release, write);
1039                 mo_graph->addEdge(write, rf);
1040         }
1041
1042         /* See if we have realized a data race */
1043         checkDataRaces();
1044 }
1045
1046 /**
1047  * Initialize the current action by performing one or more of the following
1048  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1049  * in the NodeStack, manipulating backtracking sets, allocating and
1050  * initializing clock vectors, and computing the promises to fulfill.
1051  *
1052  * @param curr The current action, as passed from the user context; may be
1053  * freed/invalidated after the execution of this function, with a different
1054  * action "returned" its place (pass-by-reference)
1055  * @return True if curr is a newly-explored action; false otherwise
1056  */
1057 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1058 {
1059         ModelAction *newcurr;
1060
1061         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1062                 newcurr = process_rmw(*curr);
1063                 delete *curr;
1064
1065                 if (newcurr->is_rmw())
1066                         compute_promises(newcurr);
1067
1068                 *curr = newcurr;
1069                 return false;
1070         }
1071
1072         (*curr)->set_seq_number(get_next_seq_num());
1073
1074         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1075         if (newcurr) {
1076                 /* First restore type and order in case of RMW operation */
1077                 if ((*curr)->is_rmwr())
1078                         newcurr->copy_typeandorder(*curr);
1079
1080                 ASSERT((*curr)->get_location() == newcurr->get_location());
1081                 newcurr->copy_from_new(*curr);
1082
1083                 /* Discard duplicate ModelAction; use action from NodeStack */
1084                 delete *curr;
1085
1086                 /* Always compute new clock vector */
1087                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1088
1089                 *curr = newcurr;
1090                 return false; /* Action was explored previously */
1091         } else {
1092                 newcurr = *curr;
1093
1094                 /* Always compute new clock vector */
1095                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1096
1097                 /* Assign most recent release fence */
1098                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1099
1100                 /*
1101                  * Perform one-time actions when pushing new ModelAction onto
1102                  * NodeStack
1103                  */
1104                 if (newcurr->is_write())
1105                         compute_promises(newcurr);
1106                 else if (newcurr->is_relseq_fixup())
1107                         compute_relseq_breakwrites(newcurr);
1108                 else if (newcurr->is_wait())
1109                         newcurr->get_node()->set_misc_max(2);
1110                 else if (newcurr->is_notify_one()) {
1111                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1112                 }
1113                 return true; /* This was a new ModelAction */
1114         }
1115 }
1116
1117 /**
1118  * @brief Establish reads-from relation between two actions
1119  *
1120  * Perform basic operations involved with establishing a concrete rf relation,
1121  * including setting the ModelAction data and checking for release sequences.
1122  *
1123  * @param act The action that is reading (must be a read)
1124  * @param rf The action from which we are reading (must be a write)
1125  *
1126  * @return True if this read established synchronization
1127  */
1128 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1129 {
1130         ASSERT(rf);
1131         ASSERT(rf->is_write());
1132
1133         act->set_read_from(rf);
1134         if (act->is_acquire()) {
1135                 rel_heads_list_t release_heads;
1136                 get_release_seq_heads(act, act, &release_heads);
1137                 int num_heads = release_heads.size();
1138                 for (unsigned int i = 0; i < release_heads.size(); i++)
1139                         if (!synchronize(release_heads[i], act))
1140                                 num_heads--;
1141                 return num_heads > 0;
1142         }
1143         return false;
1144 }
1145
1146 /**
1147  * @brief Synchronizes two actions
1148  *
1149  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1150  * This function performs the synchronization as well as providing other hooks
1151  * for other checks along with synchronization.
1152  *
1153  * @param first The left-hand side of the synchronizes-with relation
1154  * @param second The right-hand side of the synchronizes-with relation
1155  * @return True if the synchronization was successful (i.e., was consistent
1156  * with the execution order); false otherwise
1157  */
1158 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1159 {
1160         if (*second < *first) {
1161                 set_bad_synchronization();
1162                 return false;
1163         }
1164         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1165         return second->synchronize_with(first);
1166 }
1167
1168 /**
1169  * Check promises and eliminate potentially-satisfying threads when a thread is
1170  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1171  * no longer satisfy a promise generated from that thread.
1172  *
1173  * @param blocker The thread on which a thread is waiting
1174  * @param waiting The waiting thread
1175  */
1176 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1177 {
1178         for (unsigned int i = 0; i < promises.size(); i++) {
1179                 Promise *promise = promises[i];
1180                 if (!promise->thread_is_available(waiting->get_id()))
1181                         continue;
1182                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1183                         ModelAction *reader = promise->get_reader(j);
1184                         if (reader->get_tid() != blocker->get_id())
1185                                 continue;
1186                         if (promise->eliminate_thread(waiting->get_id())) {
1187                                 /* Promise has failed */
1188                                 priv->failed_promise = true;
1189                         } else {
1190                                 /* Only eliminate the 'waiting' thread once */
1191                                 return;
1192                         }
1193                 }
1194         }
1195 }
1196
1197 /**
1198  * @brief Check whether a model action is enabled.
1199  *
1200  * Checks whether a lock or join operation would be successful (i.e., is the
1201  * lock already locked, or is the joined thread already complete). If not, put
1202  * the action in a waiter list.
1203  *
1204  * @param curr is the ModelAction to check whether it is enabled.
1205  * @return a bool that indicates whether the action is enabled.
1206  */
1207 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1208         if (curr->is_lock()) {
1209                 std::mutex *lock = curr->get_mutex();
1210                 struct std::mutex_state *state = lock->get_state();
1211                 if (state->locked)
1212                         return false;
1213         } else if (curr->is_thread_join()) {
1214                 Thread *blocking = curr->get_thread_operand();
1215                 if (!blocking->is_complete()) {
1216                         thread_blocking_check_promises(blocking, get_thread(curr));
1217                         return false;
1218                 }
1219         } else if (params->yieldblock && curr->is_yield()) {
1220                 return false;
1221         }
1222
1223         return true;
1224 }
1225
1226 /**
1227  * This is the heart of the model checker routine. It performs model-checking
1228  * actions corresponding to a given "current action." Among other processes, it
1229  * calculates reads-from relationships, updates synchronization clock vectors,
1230  * forms a memory_order constraints graph, and handles replay/backtrack
1231  * execution when running permutations of previously-observed executions.
1232  *
1233  * @param curr The current action to process
1234  * @return The ModelAction that is actually executed; may be different than
1235  * curr
1236  */
1237 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1238 {
1239         ASSERT(curr);
1240         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1241         bool newly_explored = initialize_curr_action(&curr);
1242
1243         DBG();
1244
1245         wake_up_sleeping_actions(curr);
1246
1247         /* Compute fairness information for CHESS yield algorithm */
1248         if (params->yieldon) {
1249                 curr->get_node()->update_yield(scheduler);
1250         }
1251
1252         /* Add the action to lists before any other model-checking tasks */
1253         if (!second_part_of_rmw)
1254                 add_action_to_lists(curr);
1255
1256         /* Build may_read_from set for newly-created actions */
1257         if (newly_explored && curr->is_read())
1258                 build_may_read_from(curr);
1259
1260         /* Initialize work_queue with the "current action" work */
1261         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1262         while (!work_queue.empty() && !has_asserted()) {
1263                 WorkQueueEntry work = work_queue.front();
1264                 work_queue.pop_front();
1265
1266                 switch (work.type) {
1267                 case WORK_CHECK_CURR_ACTION: {
1268                         ModelAction *act = work.action;
1269                         bool update = false; /* update this location's release seq's */
1270                         bool update_all = false; /* update all release seq's */
1271
1272                         if (process_thread_action(curr))
1273                                 update_all = true;
1274
1275                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1276                                 update = true;
1277
1278                         if (act->is_write() && process_write(act))
1279                                 update = true;
1280
1281                         if (act->is_fence() && process_fence(act))
1282                                 update_all = true;
1283
1284                         if (act->is_mutex_op() && process_mutex(act))
1285                                 update_all = true;
1286
1287                         if (act->is_relseq_fixup())
1288                                 process_relseq_fixup(curr, &work_queue);
1289
1290                         if (update_all)
1291                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1292                         else if (update)
1293                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1294                         break;
1295                 }
1296                 case WORK_CHECK_RELEASE_SEQ:
1297                         resolve_release_sequences(work.location, &work_queue);
1298                         break;
1299                 case WORK_CHECK_MO_EDGES: {
1300                         /** @todo Complete verification of work_queue */
1301                         ModelAction *act = work.action;
1302                         bool updated = false;
1303
1304                         if (act->is_read()) {
1305                                 const ModelAction *rf = act->get_reads_from();
1306                                 const Promise *promise = act->get_reads_from_promise();
1307                                 if (rf) {
1308                                         if (r_modification_order(act, rf))
1309                                                 updated = true;
1310                                 } else if (promise) {
1311                                         if (r_modification_order(act, promise))
1312                                                 updated = true;
1313                                 }
1314                         }
1315                         if (act->is_write()) {
1316                                 if (w_modification_order(act, NULL))
1317                                         updated = true;
1318                         }
1319                         mo_graph->commitChanges();
1320
1321                         if (updated)
1322                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1323                         break;
1324                 }
1325                 default:
1326                         ASSERT(false);
1327                         break;
1328                 }
1329         }
1330
1331         check_curr_backtracking(curr);
1332         set_backtracking(curr);
1333         return curr;
1334 }
1335
1336 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1337 {
1338         Node *currnode = curr->get_node();
1339         Node *parnode = currnode->get_parent();
1340
1341         if ((parnode && !parnode->backtrack_empty()) ||
1342                          !currnode->misc_empty() ||
1343                          !currnode->read_from_empty() ||
1344                          !currnode->promise_empty() ||
1345                          !currnode->relseq_break_empty()) {
1346                 set_latest_backtrack(curr);
1347         }
1348 }
1349
1350 bool ModelExecution::promises_expired() const
1351 {
1352         for (unsigned int i = 0; i < promises.size(); i++) {
1353                 Promise *promise = promises[i];
1354                 if (promise->get_expiration() < priv->used_sequence_numbers)
1355                         return true;
1356         }
1357         return false;
1358 }
1359
1360 /**
1361  * This is the strongest feasibility check available.
1362  * @return whether the current trace (partial or complete) must be a prefix of
1363  * a feasible trace.
1364  */
1365 bool ModelExecution::isfeasibleprefix() const
1366 {
1367         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1368 }
1369
1370 /**
1371  * Print disagnostic information about an infeasible execution
1372  * @param prefix A string to prefix the output with; if NULL, then a default
1373  * message prefix will be provided
1374  */
1375 void ModelExecution::print_infeasibility(const char *prefix) const
1376 {
1377         char buf[100];
1378         char *ptr = buf;
1379         if (mo_graph->checkForCycles())
1380                 ptr += sprintf(ptr, "[mo cycle]");
1381         if (priv->failed_promise)
1382                 ptr += sprintf(ptr, "[failed promise]");
1383         if (priv->too_many_reads)
1384                 ptr += sprintf(ptr, "[too many reads]");
1385         if (priv->no_valid_reads)
1386                 ptr += sprintf(ptr, "[no valid reads-from]");
1387         if (priv->bad_synchronization)
1388                 ptr += sprintf(ptr, "[bad sw ordering]");
1389         if (promises_expired())
1390                 ptr += sprintf(ptr, "[promise expired]");
1391         if (promises.size() != 0)
1392                 ptr += sprintf(ptr, "[unresolved promise]");
1393         if (ptr != buf)
1394                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1395 }
1396
1397 /**
1398  * Returns whether the current completed trace is feasible, except for pending
1399  * release sequences.
1400  */
1401 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1402 {
1403         return !is_infeasible() && promises.size() == 0;
1404 }
1405
1406 /**
1407  * Check if the current partial trace is infeasible. Does not check any
1408  * end-of-execution flags, which might rule out the execution. Thus, this is
1409  * useful only for ruling an execution as infeasible.
1410  * @return whether the current partial trace is infeasible.
1411  */
1412 bool ModelExecution::is_infeasible() const
1413 {
1414         return mo_graph->checkForCycles() ||
1415                 priv->no_valid_reads ||
1416                 priv->failed_promise ||
1417                 priv->too_many_reads ||
1418                 priv->bad_synchronization ||
1419                 promises_expired();
1420 }
1421
1422 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1423 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1424         ModelAction *lastread = get_last_action(act->get_tid());
1425         lastread->process_rmw(act);
1426         if (act->is_rmw()) {
1427                 if (lastread->get_reads_from())
1428                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1429                 else
1430                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1431                 mo_graph->commitChanges();
1432         }
1433         return lastread;
1434 }
1435
1436 /**
1437  * A helper function for ModelExecution::check_recency, to check if the current
1438  * thread is able to read from a different write/promise for 'params.maxreads'
1439  * number of steps and if that write/promise should become visible (i.e., is
1440  * ordered later in the modification order). This helps model memory liveness.
1441  *
1442  * @param curr The current action. Must be a read.
1443  * @param rf The write/promise from which we plan to read
1444  * @param other_rf The write/promise from which we may read
1445  * @return True if we were able to read from other_rf for params.maxreads steps
1446  */
1447 template <typename T, typename U>
1448 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1449 {
1450         /* Need a different write/promise */
1451         if (other_rf->equals(rf))
1452                 return false;
1453
1454         /* Only look for "newer" writes/promises */
1455         if (!mo_graph->checkReachable(rf, other_rf))
1456                 return false;
1457
1458         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1459         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1460         action_list_t::reverse_iterator rit = list->rbegin();
1461         ASSERT((*rit) == curr);
1462         /* Skip past curr */
1463         rit++;
1464
1465         /* Does this write/promise work for everyone? */
1466         for (int i = 0; i < params->maxreads; i++, rit++) {
1467                 ModelAction *act = *rit;
1468                 if (!act->may_read_from(other_rf))
1469                         return false;
1470         }
1471         return true;
1472 }
1473
1474 /**
1475  * Checks whether a thread has read from the same write or Promise for too many
1476  * times without seeing the effects of a later write/Promise.
1477  *
1478  * Basic idea:
1479  * 1) there must a different write/promise that we could read from,
1480  * 2) we must have read from the same write/promise in excess of maxreads times,
1481  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1482  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1483  *
1484  * If so, we decide that the execution is no longer feasible.
1485  *
1486  * @param curr The current action. Must be a read.
1487  * @param rf The ModelAction/Promise from which we might read.
1488  * @return True if the read should succeed; false otherwise
1489  */
1490 template <typename T>
1491 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1492 {
1493         if (!params->maxreads)
1494                 return true;
1495
1496         //NOTE: Next check is just optimization, not really necessary....
1497         if (curr->get_node()->get_read_from_past_size() +
1498                         curr->get_node()->get_read_from_promise_size() <= 1)
1499                 return true;
1500
1501         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1502         int tid = id_to_int(curr->get_tid());
1503         ASSERT(tid < (int)thrd_lists->size());
1504         action_list_t *list = &(*thrd_lists)[tid];
1505         action_list_t::reverse_iterator rit = list->rbegin();
1506         ASSERT((*rit) == curr);
1507         /* Skip past curr */
1508         rit++;
1509
1510         action_list_t::reverse_iterator ritcopy = rit;
1511         /* See if we have enough reads from the same value */
1512         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1513                 if (ritcopy == list->rend())
1514                         return true;
1515                 ModelAction *act = *ritcopy;
1516                 if (!act->is_read())
1517                         return true;
1518                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1519                         return true;
1520                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1521                         return true;
1522                 if (act->get_node()->get_read_from_past_size() +
1523                                 act->get_node()->get_read_from_promise_size() <= 1)
1524                         return true;
1525         }
1526         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1527                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1528                 if (should_read_instead(curr, rf, write))
1529                         return false; /* liveness failure */
1530         }
1531         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1532                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1533                 if (should_read_instead(curr, rf, promise))
1534                         return false; /* liveness failure */
1535         }
1536         return true;
1537 }
1538
1539 /**
1540  * @brief Updates the mo_graph with the constraints imposed from the current
1541  * read.
1542  *
1543  * Basic idea is the following: Go through each other thread and find
1544  * the last action that happened before our read.  Two cases:
1545  *
1546  * -# The action is a write: that write must either occur before
1547  * the write we read from or be the write we read from.
1548  * -# The action is a read: the write that that action read from
1549  * must occur before the write we read from or be the same write.
1550  *
1551  * @param curr The current action. Must be a read.
1552  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1553  * @return True if modification order edges were added; false otherwise
1554  */
1555 template <typename rf_type>
1556 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1557 {
1558         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1559         unsigned int i;
1560         bool added = false;
1561         ASSERT(curr->is_read());
1562
1563         /* Last SC fence in the current thread */
1564         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1565         ModelAction *last_sc_write = NULL;
1566         if (curr->is_seqcst())
1567                 last_sc_write = get_last_seq_cst_write(curr);
1568
1569         /* Iterate over all threads */
1570         for (i = 0; i < thrd_lists->size(); i++) {
1571                 /* Last SC fence in thread i */
1572                 ModelAction *last_sc_fence_thread_local = NULL;
1573                 if (int_to_id((int)i) != curr->get_tid())
1574                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1575
1576                 /* Last SC fence in thread i, before last SC fence in current thread */
1577                 ModelAction *last_sc_fence_thread_before = NULL;
1578                 if (last_sc_fence_local)
1579                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1580
1581                 /* Iterate over actions in thread, starting from most recent */
1582                 action_list_t *list = &(*thrd_lists)[i];
1583                 action_list_t::reverse_iterator rit;
1584                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1585                         ModelAction *act = *rit;
1586
1587                         /* Skip curr */
1588                         if (act == curr)
1589                                 continue;
1590                         /* Don't want to add reflexive edges on 'rf' */
1591                         if (act->equals(rf)) {
1592                                 if (act->happens_before(curr))
1593                                         break;
1594                                 else
1595                                         continue;
1596                         }
1597
1598                         if (act->is_write()) {
1599                                 /* C++, Section 29.3 statement 5 */
1600                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1601                                                 *act < *last_sc_fence_thread_local) {
1602                                         added = mo_graph->addEdge(act, rf) || added;
1603                                         break;
1604                                 }
1605                                 /* C++, Section 29.3 statement 4 */
1606                                 else if (act->is_seqcst() && last_sc_fence_local &&
1607                                                 *act < *last_sc_fence_local) {
1608                                         added = mo_graph->addEdge(act, rf) || added;
1609                                         break;
1610                                 }
1611                                 /* C++, Section 29.3 statement 6 */
1612                                 else if (last_sc_fence_thread_before &&
1613                                                 *act < *last_sc_fence_thread_before) {
1614                                         added = mo_graph->addEdge(act, rf) || added;
1615                                         break;
1616                                 }
1617                         }
1618
1619                         /* C++, Section 29.3 statement 3 (second subpoint) */
1620                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1621                                 added = mo_graph->addEdge(act, rf) || added;
1622                                 break;
1623                         }
1624
1625                         /*
1626                          * Include at most one act per-thread that "happens
1627                          * before" curr
1628                          */
1629                         if (act->happens_before(curr)) {
1630                                 if (act->is_write()) {
1631                                         added = mo_graph->addEdge(act, rf) || added;
1632                                 } else {
1633                                         const ModelAction *prevrf = act->get_reads_from();
1634                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1635                                         if (prevrf) {
1636                                                 if (!prevrf->equals(rf))
1637                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1638                                         } else if (!prevrf_promise->equals(rf)) {
1639                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1640                                         }
1641                                 }
1642                                 break;
1643                         }
1644                 }
1645         }
1646
1647         /*
1648          * All compatible, thread-exclusive promises must be ordered after any
1649          * concrete loads from the same thread
1650          */
1651         for (unsigned int i = 0; i < promises.size(); i++)
1652                 if (promises[i]->is_compatible_exclusive(curr))
1653                         added = mo_graph->addEdge(rf, promises[i]) || added;
1654
1655         return added;
1656 }
1657
1658 /**
1659  * Updates the mo_graph with the constraints imposed from the current write.
1660  *
1661  * Basic idea is the following: Go through each other thread and find
1662  * the lastest action that happened before our write.  Two cases:
1663  *
1664  * (1) The action is a write => that write must occur before
1665  * the current write
1666  *
1667  * (2) The action is a read => the write that that action read from
1668  * must occur before the current write.
1669  *
1670  * This method also handles two other issues:
1671  *
1672  * (I) Sequential Consistency: Making sure that if the current write is
1673  * seq_cst, that it occurs after the previous seq_cst write.
1674  *
1675  * (II) Sending the write back to non-synchronizing reads.
1676  *
1677  * @param curr The current action. Must be a write.
1678  * @param send_fv A vector for stashing reads to which we may pass our future
1679  * value. If NULL, then don't record any future values.
1680  * @return True if modification order edges were added; false otherwise
1681  */
1682 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1683 {
1684         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1685         unsigned int i;
1686         bool added = false;
1687         ASSERT(curr->is_write());
1688
1689         if (curr->is_seqcst()) {
1690                 /* We have to at least see the last sequentially consistent write,
1691                          so we are initialized. */
1692                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1693                 if (last_seq_cst != NULL) {
1694                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1695                 }
1696         }
1697
1698         /* Last SC fence in the current thread */
1699         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1700
1701         /* Iterate over all threads */
1702         for (i = 0; i < thrd_lists->size(); i++) {
1703                 /* Last SC fence in thread i, before last SC fence in current thread */
1704                 ModelAction *last_sc_fence_thread_before = NULL;
1705                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1706                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1707
1708                 /* Iterate over actions in thread, starting from most recent */
1709                 action_list_t *list = &(*thrd_lists)[i];
1710                 action_list_t::reverse_iterator rit;
1711                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1712                         ModelAction *act = *rit;
1713                         if (act == curr) {
1714                                 /*
1715                                  * 1) If RMW and it actually read from something, then we
1716                                  * already have all relevant edges, so just skip to next
1717                                  * thread.
1718                                  *
1719                                  * 2) If RMW and it didn't read from anything, we should
1720                                  * whatever edge we can get to speed up convergence.
1721                                  *
1722                                  * 3) If normal write, we need to look at earlier actions, so
1723                                  * continue processing list.
1724                                  */
1725                                 if (curr->is_rmw()) {
1726                                         if (curr->get_reads_from() != NULL)
1727                                                 break;
1728                                         else
1729                                                 continue;
1730                                 } else
1731                                         continue;
1732                         }
1733
1734                         /* C++, Section 29.3 statement 7 */
1735                         if (last_sc_fence_thread_before && act->is_write() &&
1736                                         *act < *last_sc_fence_thread_before) {
1737                                 added = mo_graph->addEdge(act, curr) || added;
1738                                 break;
1739                         }
1740
1741                         /*
1742                          * Include at most one act per-thread that "happens
1743                          * before" curr
1744                          */
1745                         if (act->happens_before(curr)) {
1746                                 /*
1747                                  * Note: if act is RMW, just add edge:
1748                                  *   act --mo--> curr
1749                                  * The following edge should be handled elsewhere:
1750                                  *   readfrom(act) --mo--> act
1751                                  */
1752                                 if (act->is_write())
1753                                         added = mo_graph->addEdge(act, curr) || added;
1754                                 else if (act->is_read()) {
1755                                         //if previous read accessed a null, just keep going
1756                                         if (act->get_reads_from() == NULL)
1757                                                 continue;
1758                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1759                                 }
1760                                 break;
1761                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1762                                                      !act->same_thread(curr)) {
1763                                 /* We have an action that:
1764                                    (1) did not happen before us
1765                                    (2) is a read and we are a write
1766                                    (3) cannot synchronize with us
1767                                    (4) is in a different thread
1768                                    =>
1769                                    that read could potentially read from our write.  Note that
1770                                    these checks are overly conservative at this point, we'll
1771                                    do more checks before actually removing the
1772                                    pendingfuturevalue.
1773
1774                                  */
1775                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1776                                         if (!is_infeasible())
1777                                                 send_fv->push_back(act);
1778                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1779                                                 add_future_value(curr, act);
1780                                 }
1781                         }
1782                 }
1783         }
1784
1785         /*
1786          * All compatible, thread-exclusive promises must be ordered after any
1787          * concrete stores to the same thread, or else they can be merged with
1788          * this store later
1789          */
1790         for (unsigned int i = 0; i < promises.size(); i++)
1791                 if (promises[i]->is_compatible_exclusive(curr))
1792                         added = mo_graph->addEdge(curr, promises[i]) || added;
1793
1794         return added;
1795 }
1796
1797 /** Arbitrary reads from the future are not allowed.  Section 29.3
1798  * part 9 places some constraints.  This method checks one result of constraint
1799  * constraint.  Others require compiler support. */
1800 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1801 {
1802         if (!writer->is_rmw())
1803                 return true;
1804
1805         if (!reader->is_rmw())
1806                 return true;
1807
1808         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1809                 if (search == reader)
1810                         return false;
1811                 if (search->get_tid() == reader->get_tid() &&
1812                                 search->happens_before(reader))
1813                         break;
1814         }
1815
1816         return true;
1817 }
1818
1819 /**
1820  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1821  * some constraints. This method checks one the following constraint (others
1822  * require compiler support):
1823  *
1824  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1825  */
1826 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1827 {
1828         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1829         unsigned int i;
1830         /* Iterate over all threads */
1831         for (i = 0; i < thrd_lists->size(); i++) {
1832                 const ModelAction *write_after_read = NULL;
1833
1834                 /* Iterate over actions in thread, starting from most recent */
1835                 action_list_t *list = &(*thrd_lists)[i];
1836                 action_list_t::reverse_iterator rit;
1837                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1838                         ModelAction *act = *rit;
1839
1840                         /* Don't disallow due to act == reader */
1841                         if (!reader->happens_before(act) || reader == act)
1842                                 break;
1843                         else if (act->is_write())
1844                                 write_after_read = act;
1845                         else if (act->is_read() && act->get_reads_from() != NULL)
1846                                 write_after_read = act->get_reads_from();
1847                 }
1848
1849                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1850                         return false;
1851         }
1852         return true;
1853 }
1854
1855 /**
1856  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1857  * The ModelAction under consideration is expected to be taking part in
1858  * release/acquire synchronization as an object of the "reads from" relation.
1859  * Note that this can only provide release sequence support for RMW chains
1860  * which do not read from the future, as those actions cannot be traced until
1861  * their "promise" is fulfilled. Similarly, we may not even establish the
1862  * presence of a release sequence with certainty, as some modification order
1863  * constraints may be decided further in the future. Thus, this function
1864  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1865  * and a boolean representing certainty.
1866  *
1867  * @param rf The action that might be part of a release sequence. Must be a
1868  * write.
1869  * @param release_heads A pass-by-reference style return parameter. After
1870  * execution of this function, release_heads will contain the heads of all the
1871  * relevant release sequences, if any exists with certainty
1872  * @param pending A pass-by-reference style return parameter which is only used
1873  * when returning false (i.e., uncertain). Returns most information regarding
1874  * an uncertain release sequence, including any write operations that might
1875  * break the sequence.
1876  * @return true, if the ModelExecution is certain that release_heads is complete;
1877  * false otherwise
1878  */
1879 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1880                 rel_heads_list_t *release_heads,
1881                 struct release_seq *pending) const
1882 {
1883         /* Only check for release sequences if there are no cycles */
1884         if (mo_graph->checkForCycles())
1885                 return false;
1886
1887         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1888                 ASSERT(rf->is_write());
1889
1890                 if (rf->is_release())
1891                         release_heads->push_back(rf);
1892                 else if (rf->get_last_fence_release())
1893                         release_heads->push_back(rf->get_last_fence_release());
1894                 if (!rf->is_rmw())
1895                         break; /* End of RMW chain */
1896
1897                 /** @todo Need to be smarter here...  In the linux lock
1898                  * example, this will run to the beginning of the program for
1899                  * every acquire. */
1900                 /** @todo The way to be smarter here is to keep going until 1
1901                  * thread has a release preceded by an acquire and you've seen
1902                  *       both. */
1903
1904                 /* acq_rel RMW is a sufficient stopping condition */
1905                 if (rf->is_acquire() && rf->is_release())
1906                         return true; /* complete */
1907         };
1908         if (!rf) {
1909                 /* read from future: need to settle this later */
1910                 pending->rf = NULL;
1911                 return false; /* incomplete */
1912         }
1913
1914         if (rf->is_release())
1915                 return true; /* complete */
1916
1917         /* else relaxed write
1918          * - check for fence-release in the same thread (29.8, stmt. 3)
1919          * - check modification order for contiguous subsequence
1920          *   -> rf must be same thread as release */
1921
1922         const ModelAction *fence_release = rf->get_last_fence_release();
1923         /* Synchronize with a fence-release unconditionally; we don't need to
1924          * find any more "contiguous subsequence..." for it */
1925         if (fence_release)
1926                 release_heads->push_back(fence_release);
1927
1928         int tid = id_to_int(rf->get_tid());
1929         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1930         action_list_t *list = &(*thrd_lists)[tid];
1931         action_list_t::const_reverse_iterator rit;
1932
1933         /* Find rf in the thread list */
1934         rit = std::find(list->rbegin(), list->rend(), rf);
1935         ASSERT(rit != list->rend());
1936
1937         /* Find the last {write,fence}-release */
1938         for (; rit != list->rend(); rit++) {
1939                 if (fence_release && *(*rit) < *fence_release)
1940                         break;
1941                 if ((*rit)->is_release())
1942                         break;
1943         }
1944         if (rit == list->rend()) {
1945                 /* No write-release in this thread */
1946                 return true; /* complete */
1947         } else if (fence_release && *(*rit) < *fence_release) {
1948                 /* The fence-release is more recent (and so, "stronger") than
1949                  * the most recent write-release */
1950                 return true; /* complete */
1951         } /* else, need to establish contiguous release sequence */
1952         ModelAction *release = *rit;
1953
1954         ASSERT(rf->same_thread(release));
1955
1956         pending->writes.clear();
1957
1958         bool certain = true;
1959         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1960                 if (id_to_int(rf->get_tid()) == (int)i)
1961                         continue;
1962                 list = &(*thrd_lists)[i];
1963
1964                 /* Can we ensure no future writes from this thread may break
1965                  * the release seq? */
1966                 bool future_ordered = false;
1967
1968                 ModelAction *last = get_last_action(int_to_id(i));
1969                 Thread *th = get_thread(int_to_id(i));
1970                 if ((last && rf->happens_before(last)) ||
1971                                 !is_enabled(th) ||
1972                                 th->is_complete())
1973                         future_ordered = true;
1974
1975                 ASSERT(!th->is_model_thread() || future_ordered);
1976
1977                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1978                         const ModelAction *act = *rit;
1979                         /* Reach synchronization -> this thread is complete */
1980                         if (act->happens_before(release))
1981                                 break;
1982                         if (rf->happens_before(act)) {
1983                                 future_ordered = true;
1984                                 continue;
1985                         }
1986
1987                         /* Only non-RMW writes can break release sequences */
1988                         if (!act->is_write() || act->is_rmw())
1989                                 continue;
1990
1991                         /* Check modification order */
1992                         if (mo_graph->checkReachable(rf, act)) {
1993                                 /* rf --mo--> act */
1994                                 future_ordered = true;
1995                                 continue;
1996                         }
1997                         if (mo_graph->checkReachable(act, release))
1998                                 /* act --mo--> release */
1999                                 break;
2000                         if (mo_graph->checkReachable(release, act) &&
2001                                       mo_graph->checkReachable(act, rf)) {
2002                                 /* release --mo-> act --mo--> rf */
2003                                 return true; /* complete */
2004                         }
2005                         /* act may break release sequence */
2006                         pending->writes.push_back(act);
2007                         certain = false;
2008                 }
2009                 if (!future_ordered)
2010                         certain = false; /* This thread is uncertain */
2011         }
2012
2013         if (certain) {
2014                 release_heads->push_back(release);
2015                 pending->writes.clear();
2016         } else {
2017                 pending->release = release;
2018                 pending->rf = rf;
2019         }
2020         return certain;
2021 }
2022
2023 /**
2024  * An interface for getting the release sequence head(s) with which a
2025  * given ModelAction must synchronize. This function only returns a non-empty
2026  * result when it can locate a release sequence head with certainty. Otherwise,
2027  * it may mark the internal state of the ModelExecution so that it will handle
2028  * the release sequence at a later time, causing @a acquire to update its
2029  * synchronization at some later point in execution.
2030  *
2031  * @param acquire The 'acquire' action that may synchronize with a release
2032  * sequence
2033  * @param read The read action that may read from a release sequence; this may
2034  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2035  * when 'acquire' is a fence-acquire)
2036  * @param release_heads A pass-by-reference return parameter. Will be filled
2037  * with the head(s) of the release sequence(s), if they exists with certainty.
2038  * @see ModelExecution::release_seq_heads
2039  */
2040 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2041                 ModelAction *read, rel_heads_list_t *release_heads)
2042 {
2043         const ModelAction *rf = read->get_reads_from();
2044         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2045         sequence->acquire = acquire;
2046         sequence->read = read;
2047
2048         if (!release_seq_heads(rf, release_heads, sequence)) {
2049                 /* add act to 'lazy checking' list */
2050                 pending_rel_seqs.push_back(sequence);
2051         } else {
2052                 snapshot_free(sequence);
2053         }
2054 }
2055
2056 /**
2057  * Attempt to resolve all stashed operations that might synchronize with a
2058  * release sequence for a given location. This implements the "lazy" portion of
2059  * determining whether or not a release sequence was contiguous, since not all
2060  * modification order information is present at the time an action occurs.
2061  *
2062  * @param location The location/object that should be checked for release
2063  * sequence resolutions. A NULL value means to check all locations.
2064  * @param work_queue The work queue to which to add work items as they are
2065  * generated
2066  * @return True if any updates occurred (new synchronization, new mo_graph
2067  * edges)
2068  */
2069 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2070 {
2071         bool updated = false;
2072         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2073         while (it != pending_rel_seqs.end()) {
2074                 struct release_seq *pending = *it;
2075                 ModelAction *acquire = pending->acquire;
2076                 const ModelAction *read = pending->read;
2077
2078                 /* Only resolve sequences on the given location, if provided */
2079                 if (location && read->get_location() != location) {
2080                         it++;
2081                         continue;
2082                 }
2083
2084                 const ModelAction *rf = read->get_reads_from();
2085                 rel_heads_list_t release_heads;
2086                 bool complete;
2087                 complete = release_seq_heads(rf, &release_heads, pending);
2088                 for (unsigned int i = 0; i < release_heads.size(); i++)
2089                         if (!acquire->has_synchronized_with(release_heads[i]))
2090                                 if (synchronize(release_heads[i], acquire))
2091                                         updated = true;
2092
2093                 if (updated) {
2094                         /* Re-check all pending release sequences */
2095                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2096                         /* Re-check read-acquire for mo_graph edges */
2097                         if (acquire->is_read())
2098                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2099
2100                         /* propagate synchronization to later actions */
2101                         action_list_t::reverse_iterator rit = action_trace.rbegin();
2102                         for (; (*rit) != acquire; rit++) {
2103                                 ModelAction *propagate = *rit;
2104                                 if (acquire->happens_before(propagate)) {
2105                                         synchronize(acquire, propagate);
2106                                         /* Re-check 'propagate' for mo_graph edges */
2107                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2108                                 }
2109                         }
2110                 }
2111                 if (complete) {
2112                         it = pending_rel_seqs.erase(it);
2113                         snapshot_free(pending);
2114                 } else {
2115                         it++;
2116                 }
2117         }
2118
2119         // If we resolved promises or data races, see if we have realized a data race.
2120         checkDataRaces();
2121
2122         return updated;
2123 }
2124
2125 /**
2126  * Performs various bookkeeping operations for the current ModelAction. For
2127  * instance, adds action to the per-object, per-thread action vector and to the
2128  * action trace list of all thread actions.
2129  *
2130  * @param act is the ModelAction to add.
2131  */
2132 void ModelExecution::add_action_to_lists(ModelAction *act)
2133 {
2134         int tid = id_to_int(act->get_tid());
2135         ModelAction *uninit = NULL;
2136         int uninit_id = -1;
2137         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2138         if (list->empty() && act->is_atomic_var()) {
2139                 uninit = get_uninitialized_action(act);
2140                 uninit_id = id_to_int(uninit->get_tid());
2141                 list->push_front(uninit);
2142         }
2143         list->push_back(act);
2144
2145         action_trace.push_back(act);
2146         if (uninit)
2147                 action_trace.push_front(uninit);
2148
2149         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2150         if (tid >= (int)vec->size())
2151                 vec->resize(priv->next_thread_id);
2152         (*vec)[tid].push_back(act);
2153         if (uninit)
2154                 (*vec)[uninit_id].push_front(uninit);
2155
2156         if ((int)thrd_last_action.size() <= tid)
2157                 thrd_last_action.resize(get_num_threads());
2158         thrd_last_action[tid] = act;
2159         if (uninit)
2160                 thrd_last_action[uninit_id] = uninit;
2161
2162         if (act->is_fence() && act->is_release()) {
2163                 if ((int)thrd_last_fence_release.size() <= tid)
2164                         thrd_last_fence_release.resize(get_num_threads());
2165                 thrd_last_fence_release[tid] = act;
2166         }
2167
2168         if (act->is_wait()) {
2169                 void *mutex_loc = (void *) act->get_value();
2170                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2171
2172                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2173                 if (tid >= (int)vec->size())
2174                         vec->resize(priv->next_thread_id);
2175                 (*vec)[tid].push_back(act);
2176         }
2177 }
2178
2179 /**
2180  * @brief Get the last action performed by a particular Thread
2181  * @param tid The thread ID of the Thread in question
2182  * @return The last action in the thread
2183  */
2184 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2185 {
2186         int threadid = id_to_int(tid);
2187         if (threadid < (int)thrd_last_action.size())
2188                 return thrd_last_action[id_to_int(tid)];
2189         else
2190                 return NULL;
2191 }
2192
2193 /**
2194  * @brief Get the last fence release performed by a particular Thread
2195  * @param tid The thread ID of the Thread in question
2196  * @return The last fence release in the thread, if one exists; NULL otherwise
2197  */
2198 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2199 {
2200         int threadid = id_to_int(tid);
2201         if (threadid < (int)thrd_last_fence_release.size())
2202                 return thrd_last_fence_release[id_to_int(tid)];
2203         else
2204                 return NULL;
2205 }
2206
2207 /**
2208  * Gets the last memory_order_seq_cst write (in the total global sequence)
2209  * performed on a particular object (i.e., memory location), not including the
2210  * current action.
2211  * @param curr The current ModelAction; also denotes the object location to
2212  * check
2213  * @return The last seq_cst write
2214  */
2215 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2216 {
2217         void *location = curr->get_location();
2218         action_list_t *list = obj_map.get(location);
2219         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2220         action_list_t::reverse_iterator rit;
2221         for (rit = list->rbegin(); (*rit) != curr; rit++)
2222                 ;
2223         rit++; /* Skip past curr */
2224         for ( ; rit != list->rend(); rit++)
2225                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2226                         return *rit;
2227         return NULL;
2228 }
2229
2230 /**
2231  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2232  * performed in a particular thread, prior to a particular fence.
2233  * @param tid The ID of the thread to check
2234  * @param before_fence The fence from which to begin the search; if NULL, then
2235  * search for the most recent fence in the thread.
2236  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2237  */
2238 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2239 {
2240         /* All fences should have location FENCE_LOCATION */
2241         action_list_t *list = obj_map.get(FENCE_LOCATION);
2242
2243         if (!list)
2244                 return NULL;
2245
2246         action_list_t::reverse_iterator rit = list->rbegin();
2247
2248         if (before_fence) {
2249                 for (; rit != list->rend(); rit++)
2250                         if (*rit == before_fence)
2251                                 break;
2252
2253                 ASSERT(*rit == before_fence);
2254                 rit++;
2255         }
2256
2257         for (; rit != list->rend(); rit++)
2258                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2259                         return *rit;
2260         return NULL;
2261 }
2262
2263 /**
2264  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2265  * location). This function identifies the mutex according to the current
2266  * action, which is presumed to perform on the same mutex.
2267  * @param curr The current ModelAction; also denotes the object location to
2268  * check
2269  * @return The last unlock operation
2270  */
2271 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2272 {
2273         void *location = curr->get_location();
2274         action_list_t *list = obj_map.get(location);
2275         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2276         action_list_t::reverse_iterator rit;
2277         for (rit = list->rbegin(); rit != list->rend(); rit++)
2278                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2279                         return *rit;
2280         return NULL;
2281 }
2282
2283 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2284 {
2285         ModelAction *parent = get_last_action(tid);
2286         if (!parent)
2287                 parent = get_thread(tid)->get_creation();
2288         return parent;
2289 }
2290
2291 /**
2292  * Returns the clock vector for a given thread.
2293  * @param tid The thread whose clock vector we want
2294  * @return Desired clock vector
2295  */
2296 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2297 {
2298         return get_parent_action(tid)->get_cv();
2299 }
2300
2301 /**
2302  * @brief Find the promise (if any) to resolve for the current action and
2303  * remove it from the pending promise vector
2304  * @param curr The current ModelAction. Should be a write.
2305  * @return The Promise to resolve, if any; otherwise NULL
2306  */
2307 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2308 {
2309         for (unsigned int i = 0; i < promises.size(); i++)
2310                 if (curr->get_node()->get_promise(i)) {
2311                         Promise *ret = promises[i];
2312                         promises.erase(promises.begin() + i);
2313                         return ret;
2314                 }
2315         return NULL;
2316 }
2317
2318 /**
2319  * Resolve a Promise with a current write.
2320  * @param write The ModelAction that is fulfilling Promises
2321  * @param promise The Promise to resolve
2322  * @return True if the Promise was successfully resolved; false otherwise
2323  */
2324 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise)
2325 {
2326         ModelVector<ModelAction *> actions_to_check;
2327
2328         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2329                 ModelAction *read = promise->get_reader(i);
2330                 read_from(read, write);
2331                 actions_to_check.push_back(read);
2332         }
2333         /* Make sure the promise's value matches the write's value */
2334         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2335         if (!mo_graph->resolvePromise(promise, write))
2336                 priv->failed_promise = true;
2337
2338         /**
2339          * @todo  It is possible to end up in an inconsistent state, where a
2340          * "resolved" promise may still be referenced if
2341          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2342          *
2343          * Note that the inconsistency only matters when dumping mo_graph to
2344          * file.
2345          *
2346          * delete promise;
2347          */
2348
2349         //Check whether reading these writes has made threads unable to
2350         //resolve promises
2351         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2352                 ModelAction *read = actions_to_check[i];
2353                 mo_check_promises(read, true);
2354         }
2355
2356         return true;
2357 }
2358
2359 /**
2360  * Compute the set of promises that could potentially be satisfied by this
2361  * action. Note that the set computation actually appears in the Node, not in
2362  * ModelExecution.
2363  * @param curr The ModelAction that may satisfy promises
2364  */
2365 void ModelExecution::compute_promises(ModelAction *curr)
2366 {
2367         for (unsigned int i = 0; i < promises.size(); i++) {
2368                 Promise *promise = promises[i];
2369                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2370                         continue;
2371
2372                 bool satisfy = true;
2373                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2374                         const ModelAction *act = promise->get_reader(j);
2375                         if (act->happens_before(curr) ||
2376                                         act->could_synchronize_with(curr)) {
2377                                 satisfy = false;
2378                                 break;
2379                         }
2380                 }
2381                 if (satisfy)
2382                         curr->get_node()->set_promise(i);
2383         }
2384 }
2385
2386 /** Checks promises in response to change in ClockVector Threads. */
2387 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2388 {
2389         for (unsigned int i = 0; i < promises.size(); i++) {
2390                 Promise *promise = promises[i];
2391                 if (!promise->thread_is_available(tid))
2392                         continue;
2393                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2394                         const ModelAction *act = promise->get_reader(j);
2395                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2396                                         merge_cv->synchronized_since(act)) {
2397                                 if (promise->eliminate_thread(tid)) {
2398                                         /* Promise has failed */
2399                                         priv->failed_promise = true;
2400                                         return;
2401                                 }
2402                         }
2403                 }
2404         }
2405 }
2406
2407 void ModelExecution::check_promises_thread_disabled()
2408 {
2409         for (unsigned int i = 0; i < promises.size(); i++) {
2410                 Promise *promise = promises[i];
2411                 if (promise->has_failed()) {
2412                         priv->failed_promise = true;
2413                         return;
2414                 }
2415         }
2416 }
2417
2418 /**
2419  * @brief Checks promises in response to addition to modification order for
2420  * threads.
2421  *
2422  * We test whether threads are still available for satisfying promises after an
2423  * addition to our modification order constraints. Those that are unavailable
2424  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2425  * that promise has failed.
2426  *
2427  * @param act The ModelAction which updated the modification order
2428  * @param is_read_check Should be true if act is a read and we must check for
2429  * updates to the store from which it read (there is a distinction here for
2430  * RMW's, which are both a load and a store)
2431  */
2432 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2433 {
2434         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2435
2436         for (unsigned int i = 0; i < promises.size(); i++) {
2437                 Promise *promise = promises[i];
2438
2439                 // Is this promise on the same location?
2440                 if (!promise->same_location(write))
2441                         continue;
2442
2443                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2444                         const ModelAction *pread = promise->get_reader(j);
2445                         if (!pread->happens_before(act))
2446                                continue;
2447                         if (mo_graph->checkPromise(write, promise)) {
2448                                 priv->failed_promise = true;
2449                                 return;
2450                         }
2451                         break;
2452                 }
2453
2454                 // Don't do any lookups twice for the same thread
2455                 if (!promise->thread_is_available(act->get_tid()))
2456                         continue;
2457
2458                 if (mo_graph->checkReachable(promise, write)) {
2459                         if (mo_graph->checkPromise(write, promise)) {
2460                                 priv->failed_promise = true;
2461                                 return;
2462                         }
2463                 }
2464         }
2465 }
2466
2467 /**
2468  * Compute the set of writes that may break the current pending release
2469  * sequence. This information is extracted from previou release sequence
2470  * calculations.
2471  *
2472  * @param curr The current ModelAction. Must be a release sequence fixup
2473  * action.
2474  */
2475 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2476 {
2477         if (pending_rel_seqs.empty())
2478                 return;
2479
2480         struct release_seq *pending = pending_rel_seqs.back();
2481         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2482                 const ModelAction *write = pending->writes[i];
2483                 curr->get_node()->add_relseq_break(write);
2484         }
2485
2486         /* NULL means don't break the sequence; just synchronize */
2487         curr->get_node()->add_relseq_break(NULL);
2488 }
2489
2490 /**
2491  * Build up an initial set of all past writes that this 'read' action may read
2492  * from, as well as any previously-observed future values that must still be valid.
2493  *
2494  * @param curr is the current ModelAction that we are exploring; it must be a
2495  * 'read' operation.
2496  */
2497 void ModelExecution::build_may_read_from(ModelAction *curr)
2498 {
2499         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2500         unsigned int i;
2501         ASSERT(curr->is_read());
2502
2503         ModelAction *last_sc_write = NULL;
2504
2505         if (curr->is_seqcst())
2506                 last_sc_write = get_last_seq_cst_write(curr);
2507
2508         /* Iterate over all threads */
2509         for (i = 0; i < thrd_lists->size(); i++) {
2510                 /* Iterate over actions in thread, starting from most recent */
2511                 action_list_t *list = &(*thrd_lists)[i];
2512                 action_list_t::reverse_iterator rit;
2513                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2514                         ModelAction *act = *rit;
2515
2516                         /* Only consider 'write' actions */
2517                         if (!act->is_write() || act == curr)
2518                                 continue;
2519
2520                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2521                         bool allow_read = true;
2522
2523                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2524                                 allow_read = false;
2525                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2526                                 allow_read = false;
2527
2528                         if (allow_read) {
2529                                 /* Only add feasible reads */
2530                                 mo_graph->startChanges();
2531                                 r_modification_order(curr, act);
2532                                 if (!is_infeasible())
2533                                         curr->get_node()->add_read_from_past(act);
2534                                 mo_graph->rollbackChanges();
2535                         }
2536
2537                         /* Include at most one act per-thread that "happens before" curr */
2538                         if (act->happens_before(curr))
2539                                 break;
2540                 }
2541         }
2542
2543         /* Inherit existing, promised future values */
2544         for (i = 0; i < promises.size(); i++) {
2545                 const Promise *promise = promises[i];
2546                 const ModelAction *promise_read = promise->get_reader(0);
2547                 if (promise_read->same_var(curr)) {
2548                         /* Only add feasible future-values */
2549                         mo_graph->startChanges();
2550                         r_modification_order(curr, promise);
2551                         if (!is_infeasible())
2552                                 curr->get_node()->add_read_from_promise(promise_read);
2553                         mo_graph->rollbackChanges();
2554                 }
2555         }
2556
2557         /* We may find no valid may-read-from only if the execution is doomed */
2558         if (!curr->get_node()->read_from_size()) {
2559                 priv->no_valid_reads = true;
2560                 set_assert();
2561         }
2562
2563         if (DBG_ENABLED()) {
2564                 model_print("Reached read action:\n");
2565                 curr->print();
2566                 model_print("Printing read_from_past\n");
2567                 curr->get_node()->print_read_from_past();
2568                 model_print("End printing read_from_past\n");
2569         }
2570 }
2571
2572 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2573 {
2574         for ( ; write != NULL; write = write->get_reads_from()) {
2575                 /* UNINIT actions don't have a Node, and they never sleep */
2576                 if (write->is_uninitialized())
2577                         return true;
2578                 Node *prevnode = write->get_node()->get_parent();
2579
2580                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2581                 if (write->is_release() && thread_sleep)
2582                         return true;
2583                 if (!write->is_rmw())
2584                         return false;
2585         }
2586         return true;
2587 }
2588
2589 /**
2590  * @brief Get an action representing an uninitialized atomic
2591  *
2592  * This function may create a new one or try to retrieve one from the NodeStack
2593  *
2594  * @param curr The current action, which prompts the creation of an UNINIT action
2595  * @return A pointer to the UNINIT ModelAction
2596  */
2597 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2598 {
2599         Node *node = curr->get_node();
2600         ModelAction *act = node->get_uninit_action();
2601         if (!act) {
2602                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2603                 node->set_uninit_action(act);
2604         }
2605         act->create_cv(NULL);
2606         return act;
2607 }
2608
2609 static void print_list(const action_list_t *list)
2610 {
2611         action_list_t::const_iterator it;
2612
2613         model_print("---------------------------------------------------------------------\n");
2614
2615         unsigned int hash = 0;
2616
2617         for (it = list->begin(); it != list->end(); it++) {
2618                 const ModelAction *act = *it;
2619                 if (act->get_seq_number() > 0)
2620                         act->print();
2621                 hash = hash^(hash<<3)^((*it)->hash());
2622         }
2623         model_print("HASH %u\n", hash);
2624         model_print("---------------------------------------------------------------------\n");
2625 }
2626
2627 #if SUPPORT_MOD_ORDER_DUMP
2628 void ModelExecution::dumpGraph(char *filename) const
2629 {
2630         char buffer[200];
2631         sprintf(buffer, "%s.dot", filename);
2632         FILE *file = fopen(buffer, "w");
2633         fprintf(file, "digraph %s {\n", filename);
2634         mo_graph->dumpNodes(file);
2635         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2636
2637         for (action_list_t::iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2638                 ModelAction *act = *it;
2639                 if (act->is_read()) {
2640                         mo_graph->dot_print_node(file, act);
2641                         if (act->get_reads_from())
2642                                 mo_graph->dot_print_edge(file,
2643                                                 act->get_reads_from(),
2644                                                 act,
2645                                                 "label=\"rf\", color=red, weight=2");
2646                         else
2647                                 mo_graph->dot_print_edge(file,
2648                                                 act->get_reads_from_promise(),
2649                                                 act,
2650                                                 "label=\"rf\", color=red");
2651                 }
2652                 if (thread_array[act->get_tid()]) {
2653                         mo_graph->dot_print_edge(file,
2654                                         thread_array[id_to_int(act->get_tid())],
2655                                         act,
2656                                         "label=\"sb\", color=blue, weight=400");
2657                 }
2658
2659                 thread_array[act->get_tid()] = act;
2660         }
2661         fprintf(file, "}\n");
2662         model_free(thread_array);
2663         fclose(file);
2664 }
2665 #endif
2666
2667 /** @brief Prints an execution trace summary. */
2668 void ModelExecution::print_summary() const
2669 {
2670 #if SUPPORT_MOD_ORDER_DUMP
2671         char buffername[100];
2672         sprintf(buffername, "exec%04u", get_execution_number());
2673         mo_graph->dumpGraphToFile(buffername);
2674         sprintf(buffername, "graph%04u", get_execution_number());
2675         dumpGraph(buffername);
2676 #endif
2677
2678         model_print("Execution %d:", get_execution_number());
2679         if (isfeasibleprefix()) {
2680                 if (is_yieldblocked())
2681                         model_print(" YIELD BLOCKED");
2682                 if (scheduler->all_threads_sleeping())
2683                         model_print(" SLEEP-SET REDUNDANT");
2684                 model_print("\n");
2685         } else
2686                 print_infeasibility(" INFEASIBLE");
2687         print_list(&action_trace);
2688         model_print("\n");
2689         if (!promises.empty()) {
2690                 model_print("Pending promises:\n");
2691                 for (unsigned int i = 0; i < promises.size(); i++) {
2692                         model_print(" [P%u] ", i);
2693                         promises[i]->print();
2694                 }
2695                 model_print("\n");
2696         }
2697 }
2698
2699 /**
2700  * Add a Thread to the system for the first time. Should only be called once
2701  * per thread.
2702  * @param t The Thread to add
2703  */
2704 void ModelExecution::add_thread(Thread *t)
2705 {
2706         unsigned int i = id_to_int(t->get_id());
2707         if (i >= thread_map.size())
2708                 thread_map.resize(i + 1);
2709         thread_map[i] = t;
2710         if (!t->is_model_thread())
2711                 scheduler->add_thread(t);
2712 }
2713
2714 /**
2715  * @brief Get a Thread reference by its ID
2716  * @param tid The Thread's ID
2717  * @return A Thread reference
2718  */
2719 Thread * ModelExecution::get_thread(thread_id_t tid) const
2720 {
2721         unsigned int i = id_to_int(tid);
2722         if (i < thread_map.size())
2723                 return thread_map[i];
2724         return NULL;
2725 }
2726
2727 /**
2728  * @brief Get a reference to the Thread in which a ModelAction was executed
2729  * @param act The ModelAction
2730  * @return A Thread reference
2731  */
2732 Thread * ModelExecution::get_thread(const ModelAction *act) const
2733 {
2734         return get_thread(act->get_tid());
2735 }
2736
2737 /**
2738  * @brief Get a Promise's "promise number"
2739  *
2740  * A "promise number" is an index number that is unique to a promise, valid
2741  * only for a specific snapshot of an execution trace. Promises may come and go
2742  * as they are generated an resolved, so an index only retains meaning for the
2743  * current snapshot.
2744  *
2745  * @param promise The Promise to check
2746  * @return The promise index, if the promise still is valid; otherwise -1
2747  */
2748 int ModelExecution::get_promise_number(const Promise *promise) const
2749 {
2750         for (unsigned int i = 0; i < promises.size(); i++)
2751                 if (promises[i] == promise)
2752                         return i;
2753         /* Not found */
2754         return -1;
2755 }
2756
2757 /**
2758  * @brief Check if a Thread is currently enabled
2759  * @param t The Thread to check
2760  * @return True if the Thread is currently enabled
2761  */
2762 bool ModelExecution::is_enabled(Thread *t) const
2763 {
2764         return scheduler->is_enabled(t);
2765 }
2766
2767 /**
2768  * @brief Check if a Thread is currently enabled
2769  * @param tid The ID of the Thread to check
2770  * @return True if the Thread is currently enabled
2771  */
2772 bool ModelExecution::is_enabled(thread_id_t tid) const
2773 {
2774         return scheduler->is_enabled(tid);
2775 }
2776
2777 /**
2778  * @brief Select the next thread to execute based on the curren action
2779  *
2780  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2781  * actions should be followed by the execution of their child thread. In either
2782  * case, the current action should determine the next thread schedule.
2783  *
2784  * @param curr The current action
2785  * @return The next thread to run, if the current action will determine this
2786  * selection; otherwise NULL
2787  */
2788 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2789 {
2790         /* Do not split atomic RMW */
2791         if (curr->is_rmwr())
2792                 return get_thread(curr);
2793         /* Follow CREATE with the created thread */
2794         if (curr->get_type() == THREAD_CREATE)
2795                 return curr->get_thread_operand();
2796         return NULL;
2797 }
2798
2799 /** @return True if the execution has taken too many steps */
2800 bool ModelExecution::too_many_steps() const
2801 {
2802         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2803 }
2804
2805 /**
2806  * Takes the next step in the execution, if possible.
2807  * @param curr The current step to take
2808  * @return Returns the next Thread to run, if any; NULL if this execution
2809  * should terminate
2810  */
2811 Thread * ModelExecution::take_step(ModelAction *curr)
2812 {
2813         Thread *curr_thrd = get_thread(curr);
2814         ASSERT(curr_thrd->get_state() == THREAD_READY);
2815
2816         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2817         curr = check_current_action(curr);
2818         ASSERT(curr);
2819
2820         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2821                 scheduler->remove_thread(curr_thrd);
2822
2823         return action_select_next_thread(curr);
2824 }
2825
2826 /**
2827  * Launch end-of-execution release sequence fixups only when
2828  * the execution is otherwise feasible AND there are:
2829  *
2830  * (1) pending release sequences
2831  * (2) pending assertions that could be invalidated by a change
2832  * in clock vectors (i.e., data races)
2833  * (3) no pending promises
2834  */
2835 void ModelExecution::fixup_release_sequences()
2836 {
2837         while (!pending_rel_seqs.empty() &&
2838                         is_feasible_prefix_ignore_relseq() &&
2839                         haveUnrealizedRaces()) {
2840                 model_print("*** WARNING: release sequence fixup action "
2841                                 "(%zu pending release seuqence(s)) ***\n",
2842                                 pending_rel_seqs.size());
2843                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2844                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2845                                 model_thread);
2846                 take_step(fixup);
2847         };
2848 }