typos
[model-checker.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "execution.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
19
20 #define INITIAL_THREAD_ID       0
21
22 /**
23  * Structure for holding small ModelChecker members that should be snapshotted
24  */
25 struct model_snapshot_members {
26         model_snapshot_members() :
27                 /* First thread created will have id INITIAL_THREAD_ID */
28                 next_thread_id(INITIAL_THREAD_ID),
29                 used_sequence_numbers(0),
30                 next_backtrack(NULL),
31                 bugs(),
32                 failed_promise(false),
33                 too_many_reads(false),
34                 no_valid_reads(false),
35                 bad_synchronization(false),
36                 asserted(false)
37         { }
38
39         ~model_snapshot_members() {
40                 for (unsigned int i = 0; i < bugs.size(); i++)
41                         delete bugs[i];
42                 bugs.clear();
43         }
44
45         unsigned int next_thread_id;
46         modelclock_t used_sequence_numbers;
47         ModelAction *next_backtrack;
48         SnapVector<bug_message *> bugs;
49         bool failed_promise;
50         bool too_many_reads;
51         bool no_valid_reads;
52         /** @brief Incorrectly-ordered synchronization was made */
53         bool bad_synchronization;
54         bool asserted;
55
56         SNAPSHOTALLOC
57 };
58
59 /** @brief Constructor */
60 ModelExecution::ModelExecution(ModelChecker *m,
61                 const struct model_params *params,
62                 Scheduler *scheduler,
63                 NodeStack *node_stack) :
64         model(m),
65         params(params),
66         scheduler(scheduler),
67         action_trace(),
68         thread_map(2), /* We'll always need at least 2 threads */
69         obj_map(),
70         condvar_waiters_map(),
71         obj_thrd_map(),
72         promises(),
73         futurevalues(),
74         pending_rel_seqs(),
75         thrd_last_action(1),
76         thrd_last_fence_release(),
77         node_stack(node_stack),
78         priv(new struct model_snapshot_members()),
79         mo_graph(new CycleGraph())
80 {
81         /* Initialize a model-checker thread, for special ModelActions */
82         model_thread = new Thread(get_next_id());
83         add_thread(model_thread);
84         scheduler->register_engine(this);
85         node_stack->register_engine(this);
86 }
87
88 /** @brief Destructor */
89 ModelExecution::~ModelExecution()
90 {
91         for (unsigned int i = 0; i < get_num_threads(); i++)
92                 delete get_thread(int_to_id(i));
93
94         for (unsigned int i = 0; i < promises.size(); i++)
95                 delete promises[i];
96
97         delete mo_graph;
98         delete priv;
99 }
100
101 int ModelExecution::get_execution_number() const
102 {
103         return model->get_execution_number();
104 }
105
106 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
107 {
108         action_list_t *tmp = hash->get(ptr);
109         if (tmp == NULL) {
110                 tmp = new action_list_t();
111                 hash->put(ptr, tmp);
112         }
113         return tmp;
114 }
115
116 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
117 {
118         SnapVector<action_list_t> *tmp = hash->get(ptr);
119         if (tmp == NULL) {
120                 tmp = new SnapVector<action_list_t>();
121                 hash->put(ptr, tmp);
122         }
123         return tmp;
124 }
125
126 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
127 {
128         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
129         if (wrv==NULL)
130                 return NULL;
131         unsigned int thread=id_to_int(tid);
132         if (thread < wrv->size())
133                 return &(*wrv)[thread];
134         else
135                 return NULL;
136 }
137
138 /** @return a thread ID for a new Thread */
139 thread_id_t ModelExecution::get_next_id()
140 {
141         return priv->next_thread_id++;
142 }
143
144 /** @return the number of user threads created during this execution */
145 unsigned int ModelExecution::get_num_threads() const
146 {
147         return priv->next_thread_id;
148 }
149
150 /** @return a sequence number for a new ModelAction */
151 modelclock_t ModelExecution::get_next_seq_num()
152 {
153         return ++priv->used_sequence_numbers;
154 }
155
156 /**
157  * @brief Should the current action wake up a given thread?
158  *
159  * @param curr The current action
160  * @param thread The thread that we might wake up
161  * @return True, if we should wake up the sleeping thread; false otherwise
162  */
163 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
164 {
165         const ModelAction *asleep = thread->get_pending();
166         /* Don't allow partial RMW to wake anyone up */
167         if (curr->is_rmwr())
168                 return false;
169         /* Synchronizing actions may have been backtracked */
170         if (asleep->could_synchronize_with(curr))
171                 return true;
172         /* All acquire/release fences and fence-acquire/store-release */
173         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
174                 return true;
175         /* Fence-release + store can awake load-acquire on the same location */
176         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
177                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
178                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
179                         return true;
180         }
181         return false;
182 }
183
184 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
185 {
186         for (unsigned int i = 0; i < get_num_threads(); i++) {
187                 Thread *thr = get_thread(int_to_id(i));
188                 if (scheduler->is_sleep_set(thr)) {
189                         if (should_wake_up(curr, thr))
190                                 /* Remove this thread from sleep set */
191                                 scheduler->remove_sleep(thr);
192                 }
193         }
194 }
195
196 /** @brief Alert the model-checker that an incorrectly-ordered
197  * synchronization was made */
198 void ModelExecution::set_bad_synchronization()
199 {
200         priv->bad_synchronization = true;
201 }
202
203 bool ModelExecution::assert_bug(const char *msg)
204 {
205         priv->bugs.push_back(new bug_message(msg));
206
207         if (isfeasibleprefix()) {
208                 set_assert();
209                 return true;
210         }
211         return false;
212 }
213
214 /** @return True, if any bugs have been reported for this execution */
215 bool ModelExecution::have_bug_reports() const
216 {
217         return priv->bugs.size() != 0;
218 }
219
220 SnapVector<bug_message *> * ModelExecution::get_bugs() const
221 {
222         return &priv->bugs;
223 }
224
225 /**
226  * Check whether the current trace has triggered an assertion which should halt
227  * its execution.
228  *
229  * @return True, if the execution should be aborted; false otherwise
230  */
231 bool ModelExecution::has_asserted() const
232 {
233         return priv->asserted;
234 }
235
236 /**
237  * Trigger a trace assertion which should cause this execution to be halted.
238  * This can be due to a detected bug or due to an infeasibility that should
239  * halt ASAP.
240  */
241 void ModelExecution::set_assert()
242 {
243         priv->asserted = true;
244 }
245
246 /**
247  * Check if we are in a deadlock. Should only be called at the end of an
248  * execution, although it should not give false positives in the middle of an
249  * execution (there should be some ENABLED thread).
250  *
251  * @return True if program is in a deadlock; false otherwise
252  */
253 bool ModelExecution::is_deadlocked() const
254 {
255         bool blocking_threads = false;
256         for (unsigned int i = 0; i < get_num_threads(); i++) {
257                 thread_id_t tid = int_to_id(i);
258                 if (is_enabled(tid))
259                         return false;
260                 Thread *t = get_thread(tid);
261                 if (!t->is_model_thread() && t->get_pending())
262                         blocking_threads = true;
263         }
264         return blocking_threads;
265 }
266
267 /**
268  * @brief Check if we are yield-blocked
269  *
270  * A program can be "yield-blocked" if all threads are ready to execute a
271  * yield.
272  *
273  * @return True if the program is yield-blocked; false otherwise
274  */
275 bool ModelExecution::is_yieldblocked() const
276 {
277         if (!params->yieldblock)
278                 return false;
279
280         for (unsigned int i = 0; i < get_num_threads(); i++) {
281                 thread_id_t tid = int_to_id(i);
282                 Thread *t = get_thread(tid);
283                 if (t->get_pending() && t->get_pending()->is_yield())
284                         return true;
285         }
286         return false;
287 }
288
289 /**
290  * Check if this is a complete execution. That is, have all thread completed
291  * execution (rather than exiting because sleep sets have forced a redundant
292  * execution).
293  *
294  * @return True if the execution is complete.
295  */
296 bool ModelExecution::is_complete_execution() const
297 {
298         if (is_yieldblocked())
299                 return false;
300         for (unsigned int i = 0; i < get_num_threads(); i++)
301                 if (is_enabled(int_to_id(i)))
302                         return false;
303         return true;
304 }
305
306 /**
307  * @brief Find the last fence-related backtracking conflict for a ModelAction
308  *
309  * This function performs the search for the most recent conflicting action
310  * against which we should perform backtracking, as affected by fence
311  * operations. This includes pairs of potentially-synchronizing actions which
312  * occur due to fence-acquire or fence-release, and hence should be explored in
313  * the opposite execution order.
314  *
315  * @param act The current action
316  * @return The most recent action which conflicts with act due to fences
317  */
318 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
319 {
320         /* Only perform release/acquire fence backtracking for stores */
321         if (!act->is_write())
322                 return NULL;
323
324         /* Find a fence-release (or, act is a release) */
325         ModelAction *last_release;
326         if (act->is_release())
327                 last_release = act;
328         else
329                 last_release = get_last_fence_release(act->get_tid());
330         if (!last_release)
331                 return NULL;
332
333         /* Skip past the release */
334         const action_list_t *list = &action_trace;
335         action_list_t::const_reverse_iterator rit;
336         for (rit = list->rbegin(); rit != list->rend(); rit++)
337                 if (*rit == last_release)
338                         break;
339         ASSERT(rit != list->rend());
340
341         /* Find a prior:
342          *   load-acquire
343          * or
344          *   load --sb-> fence-acquire */
345         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
346         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
347         bool found_acquire_fences = false;
348         for ( ; rit != list->rend(); rit++) {
349                 ModelAction *prev = *rit;
350                 if (act->same_thread(prev))
351                         continue;
352
353                 int tid = id_to_int(prev->get_tid());
354
355                 if (prev->is_read() && act->same_var(prev)) {
356                         if (prev->is_acquire()) {
357                                 /* Found most recent load-acquire, don't need
358                                  * to search for more fences */
359                                 if (!found_acquire_fences)
360                                         return NULL;
361                         } else {
362                                 prior_loads[tid] = prev;
363                         }
364                 }
365                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
366                         found_acquire_fences = true;
367                         acquire_fences[tid] = prev;
368                 }
369         }
370
371         ModelAction *latest_backtrack = NULL;
372         for (unsigned int i = 0; i < acquire_fences.size(); i++)
373                 if (acquire_fences[i] && prior_loads[i])
374                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
375                                 latest_backtrack = acquire_fences[i];
376         return latest_backtrack;
377 }
378
379 /**
380  * @brief Find the last backtracking conflict for a ModelAction
381  *
382  * This function performs the search for the most recent conflicting action
383  * against which we should perform backtracking. This primary includes pairs of
384  * synchronizing actions which should be explored in the opposite execution
385  * order.
386  *
387  * @param act The current action
388  * @return The most recent action which conflicts with act
389  */
390 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
391 {
392         switch (act->get_type()) {
393         case ATOMIC_FENCE:
394                 /* Only seq-cst fences can (directly) cause backtracking */
395                 if (!act->is_seqcst())
396                         break;
397         case ATOMIC_READ:
398         case ATOMIC_WRITE:
399         case ATOMIC_RMW: {
400                 ModelAction *ret = NULL;
401
402                 /* linear search: from most recent to oldest */
403                 action_list_t *list = obj_map.get(act->get_location());
404                 action_list_t::reverse_iterator rit;
405                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
406                         ModelAction *prev = *rit;
407                         if (prev == act)
408                                 continue;
409                         if (prev->could_synchronize_with(act)) {
410                                 ret = prev;
411                                 break;
412                         }
413                 }
414
415                 ModelAction *ret2 = get_last_fence_conflict(act);
416                 if (!ret2)
417                         return ret;
418                 if (!ret)
419                         return ret2;
420                 if (*ret < *ret2)
421                         return ret2;
422                 return ret;
423         }
424         case ATOMIC_LOCK:
425         case ATOMIC_TRYLOCK: {
426                 /* linear search: from most recent to oldest */
427                 action_list_t *list = obj_map.get(act->get_location());
428                 action_list_t::reverse_iterator rit;
429                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
430                         ModelAction *prev = *rit;
431                         if (act->is_conflicting_lock(prev))
432                                 return prev;
433                 }
434                 break;
435         }
436         case ATOMIC_UNLOCK: {
437                 /* linear search: from most recent to oldest */
438                 action_list_t *list = obj_map.get(act->get_location());
439                 action_list_t::reverse_iterator rit;
440                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
441                         ModelAction *prev = *rit;
442                         if (!act->same_thread(prev) && prev->is_failed_trylock())
443                                 return prev;
444                 }
445                 break;
446         }
447         case ATOMIC_WAIT: {
448                 /* linear search: from most recent to oldest */
449                 action_list_t *list = obj_map.get(act->get_location());
450                 action_list_t::reverse_iterator rit;
451                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
452                         ModelAction *prev = *rit;
453                         if (!act->same_thread(prev) && prev->is_failed_trylock())
454                                 return prev;
455                         if (!act->same_thread(prev) && prev->is_notify())
456                                 return prev;
457                 }
458                 break;
459         }
460
461         case ATOMIC_NOTIFY_ALL:
462         case ATOMIC_NOTIFY_ONE: {
463                 /* linear search: from most recent to oldest */
464                 action_list_t *list = obj_map.get(act->get_location());
465                 action_list_t::reverse_iterator rit;
466                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
467                         ModelAction *prev = *rit;
468                         if (!act->same_thread(prev) && prev->is_wait())
469                                 return prev;
470                 }
471                 break;
472         }
473         default:
474                 break;
475         }
476         return NULL;
477 }
478
479 /** This method finds backtracking points where we should try to
480  * reorder the parameter ModelAction against.
481  *
482  * @param the ModelAction to find backtracking points for.
483  */
484 void ModelExecution::set_backtracking(ModelAction *act)
485 {
486         Thread *t = get_thread(act);
487         ModelAction *prev = get_last_conflict(act);
488         if (prev == NULL)
489                 return;
490
491         Node *node = prev->get_node()->get_parent();
492
493         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
494         int low_tid, high_tid;
495         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
496                 low_tid = id_to_int(act->get_tid());
497                 high_tid = low_tid + 1;
498         } else {
499                 low_tid = 0;
500                 high_tid = get_num_threads();
501         }
502
503         for (int i = low_tid; i < high_tid; i++) {
504                 thread_id_t tid = int_to_id(i);
505
506                 /* Make sure this thread can be enabled here. */
507                 if (i >= node->get_num_threads())
508                         break;
509
510                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
511                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
512                 if (node->enabled_status(tid) != THREAD_ENABLED)
513                         continue;
514
515                 /* Check if this has been explored already */
516                 if (node->has_been_explored(tid))
517                         continue;
518
519                 /* See if fairness allows */
520                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
521                         bool unfair = false;
522                         for (int t = 0; t < node->get_num_threads(); t++) {
523                                 thread_id_t tother = int_to_id(t);
524                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
525                                         unfair = true;
526                                         break;
527                                 }
528                         }
529                         if (unfair)
530                                 continue;
531                 }
532
533                 /* See if CHESS-like yield fairness allows */
534                 if (params->yieldon) {
535                         bool unfair = false;
536                         for (int t = 0; t < node->get_num_threads(); t++) {
537                                 thread_id_t tother = int_to_id(t);
538                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
539                                         unfair = true;
540                                         break;
541                                 }
542                         }
543                         if (unfair)
544                                 continue;
545                 }
546
547                 /* Cache the latest backtracking point */
548                 set_latest_backtrack(prev);
549
550                 /* If this is a new backtracking point, mark the tree */
551                 if (!node->set_backtrack(tid))
552                         continue;
553                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
554                                         id_to_int(prev->get_tid()),
555                                         id_to_int(t->get_id()));
556                 if (DBG_ENABLED()) {
557                         prev->print();
558                         act->print();
559                 }
560         }
561 }
562
563 /**
564  * @brief Cache the a backtracking point as the "most recent", if eligible
565  *
566  * Note that this does not prepare the NodeStack for this backtracking
567  * operation, it only caches the action on a per-execution basis
568  *
569  * @param act The operation at which we should explore a different next action
570  * (i.e., backtracking point)
571  * @return True, if this action is now the most recent backtracking point;
572  * false otherwise
573  */
574 bool ModelExecution::set_latest_backtrack(ModelAction *act)
575 {
576         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
577                 priv->next_backtrack = act;
578                 return true;
579         }
580         return false;
581 }
582
583 /**
584  * Returns last backtracking point. The model checker will explore a different
585  * path for this point in the next execution.
586  * @return The ModelAction at which the next execution should diverge.
587  */
588 ModelAction * ModelExecution::get_next_backtrack()
589 {
590         ModelAction *next = priv->next_backtrack;
591         priv->next_backtrack = NULL;
592         return next;
593 }
594
595 /**
596  * Processes a read model action.
597  * @param curr is the read model action to process.
598  * @return True if processing this read updates the mo_graph.
599  */
600 bool ModelExecution::process_read(ModelAction *curr)
601 {
602         Node *node = curr->get_node();
603         while (true) {
604                 bool updated = false;
605                 switch (node->get_read_from_status()) {
606                 case READ_FROM_PAST: {
607                         const ModelAction *rf = node->get_read_from_past();
608                         ASSERT(rf);
609
610                         mo_graph->startChanges();
611
612                         ASSERT(!is_infeasible());
613                         if (!check_recency(curr, rf)) {
614                                 if (node->increment_read_from()) {
615                                         mo_graph->rollbackChanges();
616                                         continue;
617                                 } else {
618                                         priv->too_many_reads = true;
619                                 }
620                         }
621
622                         updated = r_modification_order(curr, rf);
623                         read_from(curr, rf);
624                         mo_graph->commitChanges();
625                         mo_check_promises(curr, true);
626                         break;
627                 }
628                 case READ_FROM_PROMISE: {
629                         Promise *promise = curr->get_node()->get_read_from_promise();
630                         if (promise->add_reader(curr))
631                                 priv->failed_promise = true;
632                         curr->set_read_from_promise(promise);
633                         mo_graph->startChanges();
634                         if (!check_recency(curr, promise))
635                                 priv->too_many_reads = true;
636                         updated = r_modification_order(curr, promise);
637                         mo_graph->commitChanges();
638                         break;
639                 }
640                 case READ_FROM_FUTURE: {
641                         /* Read from future value */
642                         struct future_value fv = node->get_future_value();
643                         Promise *promise = new Promise(this, curr, fv);
644                         curr->set_read_from_promise(promise);
645                         promises.push_back(promise);
646                         mo_graph->startChanges();
647                         updated = r_modification_order(curr, promise);
648                         mo_graph->commitChanges();
649                         break;
650                 }
651                 default:
652                         ASSERT(false);
653                 }
654                 get_thread(curr)->set_return_value(curr->get_return_value());
655                 return updated;
656         }
657 }
658
659 /**
660  * Processes a lock, trylock, or unlock model action.  @param curr is
661  * the read model action to process.
662  *
663  * The try lock operation checks whether the lock is taken.  If not,
664  * it falls to the normal lock operation case.  If so, it returns
665  * fail.
666  *
667  * The lock operation has already been checked that it is enabled, so
668  * it just grabs the lock and synchronizes with the previous unlock.
669  *
670  * The unlock operation has to re-enable all of the threads that are
671  * waiting on the lock.
672  *
673  * @return True if synchronization was updated; false otherwise
674  */
675 bool ModelExecution::process_mutex(ModelAction *curr)
676 {
677         std::mutex *mutex = curr->get_mutex();
678         struct std::mutex_state *state = NULL;
679
680         if (mutex)
681                 state = mutex->get_state();
682
683         switch (curr->get_type()) {
684         case ATOMIC_TRYLOCK: {
685                 bool success = !state->locked;
686                 curr->set_try_lock(success);
687                 if (!success) {
688                         get_thread(curr)->set_return_value(0);
689                         break;
690                 }
691                 get_thread(curr)->set_return_value(1);
692         }
693                 //otherwise fall into the lock case
694         case ATOMIC_LOCK: {
695                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
696                         assert_bug("Lock access before initialization");
697                 state->locked = get_thread(curr);
698                 ModelAction *unlock = get_last_unlock(curr);
699                 //synchronize with the previous unlock statement
700                 if (unlock != NULL) {
701                         synchronize(unlock, curr);
702                         return true;
703                 }
704                 break;
705         }
706         case ATOMIC_WAIT:
707         case ATOMIC_UNLOCK: {
708                 /* wake up the other threads */
709                 for (unsigned int i = 0; i < get_num_threads(); i++) {
710                         Thread *t = get_thread(int_to_id(i));
711                         Thread *curr_thrd = get_thread(curr);
712                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
713                                 scheduler->wake(t);
714                 }
715
716                 /* unlock the lock - after checking who was waiting on it */
717                 state->locked = NULL;
718
719                 if (!curr->is_wait())
720                         break; /* The rest is only for ATOMIC_WAIT */
721
722                 /* Should we go to sleep? (simulate spurious failures) */
723                 if (curr->get_node()->get_misc() == 0) {
724                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
725                         /* disable us */
726                         scheduler->sleep(get_thread(curr));
727                 }
728                 break;
729         }
730         case ATOMIC_NOTIFY_ALL: {
731                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
732                 //activate all the waiting threads
733                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
734                         scheduler->wake(get_thread(*rit));
735                 }
736                 waiters->clear();
737                 break;
738         }
739         case ATOMIC_NOTIFY_ONE: {
740                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
741                 int wakeupthread = curr->get_node()->get_misc();
742                 action_list_t::iterator it = waiters->begin();
743                 advance(it, wakeupthread);
744                 scheduler->wake(get_thread(*it));
745                 waiters->erase(it);
746                 break;
747         }
748
749         default:
750                 ASSERT(0);
751         }
752         return false;
753 }
754
755 /**
756  * @brief Check if the current pending promises allow a future value to be sent
757  *
758  * If one of the following is true:
759  *  (a) there are no pending promises
760  *  (b) the reader and writer do not cross any promises
761  * Then, it is safe to pass a future value back now.
762  *
763  * Otherwise, we must save the pending future value until (a) or (b) is true
764  *
765  * @param writer The operation which sends the future value. Must be a write.
766  * @param reader The operation which will observe the value. Must be a read.
767  * @return True if the future value can be sent now; false if it must wait.
768  */
769 bool ModelExecution::promises_may_allow(const ModelAction *writer,
770                 const ModelAction *reader) const
771 {
772         if (promises.empty())
773                 return true;
774         for (int i = promises.size() - 1; i >= 0; i--) {
775                 ModelAction *pr = promises[i]->get_reader(0);
776                 //reader is after promise...doesn't cross any promise
777                 if (*reader > *pr)
778                         return true;
779                 //writer is after promise, reader before...bad...
780                 if (*writer > *pr)
781                         return false;
782         }
783         return true;
784 }
785
786 /**
787  * @brief Add a future value to a reader
788  *
789  * This function performs a few additional checks to ensure that the future
790  * value can be feasibly observed by the reader
791  *
792  * @param writer The operation whose value is sent. Must be a write.
793  * @param reader The read operation which may read the future value. Must be a read.
794  */
795 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
796 {
797         /* Do more ambitious checks now that mo is more complete */
798         if (!mo_may_allow(writer, reader))
799                 return;
800
801         Node *node = reader->get_node();
802
803         /* Find an ancestor thread which exists at the time of the reader */
804         Thread *write_thread = get_thread(writer);
805         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
806                 write_thread = write_thread->get_parent();
807
808         struct future_value fv = {
809                 writer->get_write_value(),
810                 writer->get_seq_number() + params->maxfuturedelay,
811                 write_thread->get_id(),
812         };
813         if (node->add_future_value(fv))
814                 set_latest_backtrack(reader);
815 }
816
817 /**
818  * Process a write ModelAction
819  * @param curr The ModelAction to process
820  * @return True if the mo_graph was updated or promises were resolved
821  */
822 bool ModelExecution::process_write(ModelAction *curr)
823 {
824         /* Readers to which we may send our future value */
825         ModelVector<ModelAction *> send_fv;
826
827         const ModelAction *earliest_promise_reader;
828         bool updated_promises = false;
829
830         bool updated_mod_order = w_modification_order(curr, &send_fv);
831         Promise *promise = pop_promise_to_resolve(curr);
832
833         if (promise) {
834                 earliest_promise_reader = promise->get_reader(0);
835                 updated_promises = resolve_promise(curr, promise);
836         } else
837                 earliest_promise_reader = NULL;
838
839         for (unsigned int i = 0; i < send_fv.size(); i++) {
840                 ModelAction *read = send_fv[i];
841
842                 /* Don't send future values to reads after the Promise we resolve */
843                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
844                         /* Check if future value can be sent immediately */
845                         if (promises_may_allow(curr, read)) {
846                                 add_future_value(curr, read);
847                         } else {
848                                 futurevalues.push_back(PendingFutureValue(curr, read));
849                         }
850                 }
851         }
852
853         /* Check the pending future values */
854         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
855                 struct PendingFutureValue pfv = futurevalues[i];
856                 if (promises_may_allow(pfv.writer, pfv.reader)) {
857                         add_future_value(pfv.writer, pfv.reader);
858                         futurevalues.erase(futurevalues.begin() + i);
859                 }
860         }
861
862         mo_graph->commitChanges();
863         mo_check_promises(curr, false);
864
865         get_thread(curr)->set_return_value(VALUE_NONE);
866         return updated_mod_order || updated_promises;
867 }
868
869 /**
870  * Process a fence ModelAction
871  * @param curr The ModelAction to process
872  * @return True if synchronization was updated
873  */
874 bool ModelExecution::process_fence(ModelAction *curr)
875 {
876         /*
877          * fence-relaxed: no-op
878          * fence-release: only log the occurence (not in this function), for
879          *   use in later synchronization
880          * fence-acquire (this function): search for hypothetical release
881          *   sequences
882          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
883          */
884         bool updated = false;
885         if (curr->is_acquire()) {
886                 action_list_t *list = &action_trace;
887                 action_list_t::reverse_iterator rit;
888                 /* Find X : is_read(X) && X --sb-> curr */
889                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
890                         ModelAction *act = *rit;
891                         if (act == curr)
892                                 continue;
893                         if (act->get_tid() != curr->get_tid())
894                                 continue;
895                         /* Stop at the beginning of the thread */
896                         if (act->is_thread_start())
897                                 break;
898                         /* Stop once we reach a prior fence-acquire */
899                         if (act->is_fence() && act->is_acquire())
900                                 break;
901                         if (!act->is_read())
902                                 continue;
903                         /* read-acquire will find its own release sequences */
904                         if (act->is_acquire())
905                                 continue;
906
907                         /* Establish hypothetical release sequences */
908                         rel_heads_list_t release_heads;
909                         get_release_seq_heads(curr, act, &release_heads);
910                         for (unsigned int i = 0; i < release_heads.size(); i++)
911                                 synchronize(release_heads[i], curr);
912                         if (release_heads.size() != 0)
913                                 updated = true;
914                 }
915         }
916         return updated;
917 }
918
919 /**
920  * @brief Process the current action for thread-related activity
921  *
922  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
923  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
924  * synchronization, etc.  This function is a no-op for non-THREAD actions
925  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
926  *
927  * @param curr The current action
928  * @return True if synchronization was updated or a thread completed
929  */
930 bool ModelExecution::process_thread_action(ModelAction *curr)
931 {
932         bool updated = false;
933
934         switch (curr->get_type()) {
935         case THREAD_CREATE: {
936                 thrd_t *thrd = (thrd_t *)curr->get_location();
937                 struct thread_params *params = (struct thread_params *)curr->get_value();
938                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
939                 add_thread(th);
940                 th->set_creation(curr);
941                 /* Promises can be satisfied by children */
942                 for (unsigned int i = 0; i < promises.size(); i++) {
943                         Promise *promise = promises[i];
944                         if (promise->thread_is_available(curr->get_tid()))
945                                 promise->add_thread(th->get_id());
946                 }
947                 break;
948         }
949         case THREAD_JOIN: {
950                 Thread *blocking = curr->get_thread_operand();
951                 ModelAction *act = get_last_action(blocking->get_id());
952                 synchronize(act, curr);
953                 updated = true; /* trigger rel-seq checks */
954                 break;
955         }
956         case THREAD_FINISH: {
957                 Thread *th = get_thread(curr);
958                 /* Wake up any joining threads */
959                 for (unsigned int i = 0; i < get_num_threads(); i++) {
960                         Thread *waiting = get_thread(int_to_id(i));
961                         if (waiting->waiting_on() == th &&
962                                         waiting->get_pending()->is_thread_join())
963                                 scheduler->wake(waiting);
964                 }
965                 th->complete();
966                 /* Completed thread can't satisfy promises */
967                 for (unsigned int i = 0; i < promises.size(); i++) {
968                         Promise *promise = promises[i];
969                         if (promise->thread_is_available(th->get_id()))
970                                 if (promise->eliminate_thread(th->get_id()))
971                                         priv->failed_promise = true;
972                 }
973                 updated = true; /* trigger rel-seq checks */
974                 break;
975         }
976         case THREAD_START: {
977                 check_promises(curr->get_tid(), NULL, curr->get_cv());
978                 break;
979         }
980         default:
981                 break;
982         }
983
984         return updated;
985 }
986
987 /**
988  * @brief Process the current action for release sequence fixup activity
989  *
990  * Performs model-checker release sequence fixups for the current action,
991  * forcing a single pending release sequence to break (with a given, potential
992  * "loose" write) or to complete (i.e., synchronize). If a pending release
993  * sequence forms a complete release sequence, then we must perform the fixup
994  * synchronization, mo_graph additions, etc.
995  *
996  * @param curr The current action; must be a release sequence fixup action
997  * @param work_queue The work queue to which to add work items as they are
998  * generated
999  */
1000 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1001 {
1002         const ModelAction *write = curr->get_node()->get_relseq_break();
1003         struct release_seq *sequence = pending_rel_seqs.back();
1004         pending_rel_seqs.pop_back();
1005         ASSERT(sequence);
1006         ModelAction *acquire = sequence->acquire;
1007         const ModelAction *rf = sequence->rf;
1008         const ModelAction *release = sequence->release;
1009         ASSERT(acquire);
1010         ASSERT(release);
1011         ASSERT(rf);
1012         ASSERT(release->same_thread(rf));
1013
1014         if (write == NULL) {
1015                 /**
1016                  * @todo Forcing a synchronization requires that we set
1017                  * modification order constraints. For instance, we can't allow
1018                  * a fixup sequence in which two separate read-acquire
1019                  * operations read from the same sequence, where the first one
1020                  * synchronizes and the other doesn't. Essentially, we can't
1021                  * allow any writes to insert themselves between 'release' and
1022                  * 'rf'
1023                  */
1024
1025                 /* Must synchronize */
1026                 if (!synchronize(release, acquire))
1027                         return;
1028                 /* Re-check all pending release sequences */
1029                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1030                 /* Re-check act for mo_graph edges */
1031                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1032
1033                 /* propagate synchronization to later actions */
1034                 action_list_t::reverse_iterator rit = action_trace.rbegin();
1035                 for (; (*rit) != acquire; rit++) {
1036                         ModelAction *propagate = *rit;
1037                         if (acquire->happens_before(propagate)) {
1038                                 synchronize(acquire, propagate);
1039                                 /* Re-check 'propagate' for mo_graph edges */
1040                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1041                         }
1042                 }
1043         } else {
1044                 /* Break release sequence with new edges:
1045                  *   release --mo--> write --mo--> rf */
1046                 mo_graph->addEdge(release, write);
1047                 mo_graph->addEdge(write, rf);
1048         }
1049
1050         /* See if we have realized a data race */
1051         checkDataRaces();
1052 }
1053
1054 /**
1055  * Initialize the current action by performing one or more of the following
1056  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1057  * in the NodeStack, manipulating backtracking sets, allocating and
1058  * initializing clock vectors, and computing the promises to fulfill.
1059  *
1060  * @param curr The current action, as passed from the user context; may be
1061  * freed/invalidated after the execution of this function, with a different
1062  * action "returned" its place (pass-by-reference)
1063  * @return True if curr is a newly-explored action; false otherwise
1064  */
1065 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1066 {
1067         ModelAction *newcurr;
1068
1069         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1070                 newcurr = process_rmw(*curr);
1071                 delete *curr;
1072
1073                 if (newcurr->is_rmw())
1074                         compute_promises(newcurr);
1075
1076                 *curr = newcurr;
1077                 return false;
1078         }
1079
1080         (*curr)->set_seq_number(get_next_seq_num());
1081
1082         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1083         if (newcurr) {
1084                 /* First restore type and order in case of RMW operation */
1085                 if ((*curr)->is_rmwr())
1086                         newcurr->copy_typeandorder(*curr);
1087
1088                 ASSERT((*curr)->get_location() == newcurr->get_location());
1089                 newcurr->copy_from_new(*curr);
1090
1091                 /* Discard duplicate ModelAction; use action from NodeStack */
1092                 delete *curr;
1093
1094                 /* Always compute new clock vector */
1095                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1096
1097                 *curr = newcurr;
1098                 return false; /* Action was explored previously */
1099         } else {
1100                 newcurr = *curr;
1101
1102                 /* Always compute new clock vector */
1103                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1104
1105                 /* Assign most recent release fence */
1106                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1107
1108                 /*
1109                  * Perform one-time actions when pushing new ModelAction onto
1110                  * NodeStack
1111                  */
1112                 if (newcurr->is_write())
1113                         compute_promises(newcurr);
1114                 else if (newcurr->is_relseq_fixup())
1115                         compute_relseq_breakwrites(newcurr);
1116                 else if (newcurr->is_wait())
1117                         newcurr->get_node()->set_misc_max(2);
1118                 else if (newcurr->is_notify_one()) {
1119                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1120                 }
1121                 return true; /* This was a new ModelAction */
1122         }
1123 }
1124
1125 /**
1126  * @brief Establish reads-from relation between two actions
1127  *
1128  * Perform basic operations involved with establishing a concrete rf relation,
1129  * including setting the ModelAction data and checking for release sequences.
1130  *
1131  * @param act The action that is reading (must be a read)
1132  * @param rf The action from which we are reading (must be a write)
1133  *
1134  * @return True if this read established synchronization
1135  */
1136 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1137 {
1138         ASSERT(rf);
1139         ASSERT(rf->is_write());
1140
1141         act->set_read_from(rf);
1142         if (act->is_acquire()) {
1143                 rel_heads_list_t release_heads;
1144                 get_release_seq_heads(act, act, &release_heads);
1145                 int num_heads = release_heads.size();
1146                 for (unsigned int i = 0; i < release_heads.size(); i++)
1147                         if (!synchronize(release_heads[i], act))
1148                                 num_heads--;
1149                 return num_heads > 0;
1150         }
1151         return false;
1152 }
1153
1154 /**
1155  * @brief Synchronizes two actions
1156  *
1157  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1158  * This function performs the synchronization as well as providing other hooks
1159  * for other checks along with synchronization.
1160  *
1161  * @param first The left-hand side of the synchronizes-with relation
1162  * @param second The right-hand side of the synchronizes-with relation
1163  * @return True if the synchronization was successful (i.e., was consistent
1164  * with the execution order); false otherwise
1165  */
1166 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1167 {
1168         if (*second < *first) {
1169                 set_bad_synchronization();
1170                 return false;
1171         }
1172         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1173         return second->synchronize_with(first);
1174 }
1175
1176 /**
1177  * Check promises and eliminate potentially-satisfying threads when a thread is
1178  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1179  * no longer satisfy a promise generated from that thread.
1180  *
1181  * @param blocker The thread on which a thread is waiting
1182  * @param waiting The waiting thread
1183  */
1184 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1185 {
1186         for (unsigned int i = 0; i < promises.size(); i++) {
1187                 Promise *promise = promises[i];
1188                 if (!promise->thread_is_available(waiting->get_id()))
1189                         continue;
1190                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1191                         ModelAction *reader = promise->get_reader(j);
1192                         if (reader->get_tid() != blocker->get_id())
1193                                 continue;
1194                         if (promise->eliminate_thread(waiting->get_id())) {
1195                                 /* Promise has failed */
1196                                 priv->failed_promise = true;
1197                         } else {
1198                                 /* Only eliminate the 'waiting' thread once */
1199                                 return;
1200                         }
1201                 }
1202         }
1203 }
1204
1205 /**
1206  * @brief Check whether a model action is enabled.
1207  *
1208  * Checks whether an operation would be successful (i.e., is a lock already
1209  * locked, or is the joined thread already complete).
1210  *
1211  * For yield-blocking, yields are never enabled.
1212  *
1213  * @param curr is the ModelAction to check whether it is enabled.
1214  * @return a bool that indicates whether the action is enabled.
1215  */
1216 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1217         if (curr->is_lock()) {
1218                 std::mutex *lock = curr->get_mutex();
1219                 struct std::mutex_state *state = lock->get_state();
1220                 if (state->locked)
1221                         return false;
1222         } else if (curr->is_thread_join()) {
1223                 Thread *blocking = curr->get_thread_operand();
1224                 if (!blocking->is_complete()) {
1225                         thread_blocking_check_promises(blocking, get_thread(curr));
1226                         return false;
1227                 }
1228         } else if (params->yieldblock && curr->is_yield()) {
1229                 return false;
1230         }
1231
1232         return true;
1233 }
1234
1235 /**
1236  * This is the heart of the model checker routine. It performs model-checking
1237  * actions corresponding to a given "current action." Among other processes, it
1238  * calculates reads-from relationships, updates synchronization clock vectors,
1239  * forms a memory_order constraints graph, and handles replay/backtrack
1240  * execution when running permutations of previously-observed executions.
1241  *
1242  * @param curr The current action to process
1243  * @return The ModelAction that is actually executed; may be different than
1244  * curr
1245  */
1246 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1247 {
1248         ASSERT(curr);
1249         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1250         bool newly_explored = initialize_curr_action(&curr);
1251
1252         DBG();
1253
1254         wake_up_sleeping_actions(curr);
1255
1256         /* Compute fairness information for CHESS yield algorithm */
1257         if (params->yieldon) {
1258                 curr->get_node()->update_yield(scheduler);
1259         }
1260
1261         /* Add the action to lists before any other model-checking tasks */
1262         if (!second_part_of_rmw)
1263                 add_action_to_lists(curr);
1264
1265         /* Build may_read_from set for newly-created actions */
1266         if (newly_explored && curr->is_read())
1267                 build_may_read_from(curr);
1268
1269         /* Initialize work_queue with the "current action" work */
1270         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1271         while (!work_queue.empty() && !has_asserted()) {
1272                 WorkQueueEntry work = work_queue.front();
1273                 work_queue.pop_front();
1274
1275                 switch (work.type) {
1276                 case WORK_CHECK_CURR_ACTION: {
1277                         ModelAction *act = work.action;
1278                         bool update = false; /* update this location's release seq's */
1279                         bool update_all = false; /* update all release seq's */
1280
1281                         if (process_thread_action(curr))
1282                                 update_all = true;
1283
1284                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1285                                 update = true;
1286
1287                         if (act->is_write() && process_write(act))
1288                                 update = true;
1289
1290                         if (act->is_fence() && process_fence(act))
1291                                 update_all = true;
1292
1293                         if (act->is_mutex_op() && process_mutex(act))
1294                                 update_all = true;
1295
1296                         if (act->is_relseq_fixup())
1297                                 process_relseq_fixup(curr, &work_queue);
1298
1299                         if (update_all)
1300                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1301                         else if (update)
1302                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1303                         break;
1304                 }
1305                 case WORK_CHECK_RELEASE_SEQ:
1306                         resolve_release_sequences(work.location, &work_queue);
1307                         break;
1308                 case WORK_CHECK_MO_EDGES: {
1309                         /** @todo Complete verification of work_queue */
1310                         ModelAction *act = work.action;
1311                         bool updated = false;
1312
1313                         if (act->is_read()) {
1314                                 const ModelAction *rf = act->get_reads_from();
1315                                 const Promise *promise = act->get_reads_from_promise();
1316                                 if (rf) {
1317                                         if (r_modification_order(act, rf))
1318                                                 updated = true;
1319                                 } else if (promise) {
1320                                         if (r_modification_order(act, promise))
1321                                                 updated = true;
1322                                 }
1323                         }
1324                         if (act->is_write()) {
1325                                 if (w_modification_order(act, NULL))
1326                                         updated = true;
1327                         }
1328                         mo_graph->commitChanges();
1329
1330                         if (updated)
1331                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1332                         break;
1333                 }
1334                 default:
1335                         ASSERT(false);
1336                         break;
1337                 }
1338         }
1339
1340         check_curr_backtracking(curr);
1341         set_backtracking(curr);
1342         return curr;
1343 }
1344
1345 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1346 {
1347         Node *currnode = curr->get_node();
1348         Node *parnode = currnode->get_parent();
1349
1350         if ((parnode && !parnode->backtrack_empty()) ||
1351                          !currnode->misc_empty() ||
1352                          !currnode->read_from_empty() ||
1353                          !currnode->promise_empty() ||
1354                          !currnode->relseq_break_empty()) {
1355                 set_latest_backtrack(curr);
1356         }
1357 }
1358
1359 bool ModelExecution::promises_expired() const
1360 {
1361         for (unsigned int i = 0; i < promises.size(); i++) {
1362                 Promise *promise = promises[i];
1363                 if (promise->get_expiration() < priv->used_sequence_numbers)
1364                         return true;
1365         }
1366         return false;
1367 }
1368
1369 /**
1370  * This is the strongest feasibility check available.
1371  * @return whether the current trace (partial or complete) must be a prefix of
1372  * a feasible trace.
1373  */
1374 bool ModelExecution::isfeasibleprefix() const
1375 {
1376         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1377 }
1378
1379 /**
1380  * Print disagnostic information about an infeasible execution
1381  * @param prefix A string to prefix the output with; if NULL, then a default
1382  * message prefix will be provided
1383  */
1384 void ModelExecution::print_infeasibility(const char *prefix) const
1385 {
1386         char buf[100];
1387         char *ptr = buf;
1388         if (mo_graph->checkForCycles())
1389                 ptr += sprintf(ptr, "[mo cycle]");
1390         if (priv->failed_promise)
1391                 ptr += sprintf(ptr, "[failed promise]");
1392         if (priv->too_many_reads)
1393                 ptr += sprintf(ptr, "[too many reads]");
1394         if (priv->no_valid_reads)
1395                 ptr += sprintf(ptr, "[no valid reads-from]");
1396         if (priv->bad_synchronization)
1397                 ptr += sprintf(ptr, "[bad sw ordering]");
1398         if (promises_expired())
1399                 ptr += sprintf(ptr, "[promise expired]");
1400         if (promises.size() != 0)
1401                 ptr += sprintf(ptr, "[unresolved promise]");
1402         if (ptr != buf)
1403                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1404 }
1405
1406 /**
1407  * Returns whether the current completed trace is feasible, except for pending
1408  * release sequences.
1409  */
1410 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1411 {
1412         return !is_infeasible() && promises.size() == 0;
1413 }
1414
1415 /**
1416  * Check if the current partial trace is infeasible. Does not check any
1417  * end-of-execution flags, which might rule out the execution. Thus, this is
1418  * useful only for ruling an execution as infeasible.
1419  * @return whether the current partial trace is infeasible.
1420  */
1421 bool ModelExecution::is_infeasible() const
1422 {
1423         return mo_graph->checkForCycles() ||
1424                 priv->no_valid_reads ||
1425                 priv->failed_promise ||
1426                 priv->too_many_reads ||
1427                 priv->bad_synchronization ||
1428                 promises_expired();
1429 }
1430
1431 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1432 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1433         ModelAction *lastread = get_last_action(act->get_tid());
1434         lastread->process_rmw(act);
1435         if (act->is_rmw()) {
1436                 if (lastread->get_reads_from())
1437                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1438                 else
1439                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1440                 mo_graph->commitChanges();
1441         }
1442         return lastread;
1443 }
1444
1445 /**
1446  * A helper function for ModelExecution::check_recency, to check if the current
1447  * thread is able to read from a different write/promise for 'params.maxreads'
1448  * number of steps and if that write/promise should become visible (i.e., is
1449  * ordered later in the modification order). This helps model memory liveness.
1450  *
1451  * @param curr The current action. Must be a read.
1452  * @param rf The write/promise from which we plan to read
1453  * @param other_rf The write/promise from which we may read
1454  * @return True if we were able to read from other_rf for params.maxreads steps
1455  */
1456 template <typename T, typename U>
1457 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1458 {
1459         /* Need a different write/promise */
1460         if (other_rf->equals(rf))
1461                 return false;
1462
1463         /* Only look for "newer" writes/promises */
1464         if (!mo_graph->checkReachable(rf, other_rf))
1465                 return false;
1466
1467         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1468         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1469         action_list_t::reverse_iterator rit = list->rbegin();
1470         ASSERT((*rit) == curr);
1471         /* Skip past curr */
1472         rit++;
1473
1474         /* Does this write/promise work for everyone? */
1475         for (int i = 0; i < params->maxreads; i++, rit++) {
1476                 ModelAction *act = *rit;
1477                 if (!act->may_read_from(other_rf))
1478                         return false;
1479         }
1480         return true;
1481 }
1482
1483 /**
1484  * Checks whether a thread has read from the same write or Promise for too many
1485  * times without seeing the effects of a later write/Promise.
1486  *
1487  * Basic idea:
1488  * 1) there must a different write/promise that we could read from,
1489  * 2) we must have read from the same write/promise in excess of maxreads times,
1490  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1491  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1492  *
1493  * If so, we decide that the execution is no longer feasible.
1494  *
1495  * @param curr The current action. Must be a read.
1496  * @param rf The ModelAction/Promise from which we might read.
1497  * @return True if the read should succeed; false otherwise
1498  */
1499 template <typename T>
1500 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1501 {
1502         if (!params->maxreads)
1503                 return true;
1504
1505         //NOTE: Next check is just optimization, not really necessary....
1506         if (curr->get_node()->get_read_from_past_size() +
1507                         curr->get_node()->get_read_from_promise_size() <= 1)
1508                 return true;
1509
1510         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1511         int tid = id_to_int(curr->get_tid());
1512         ASSERT(tid < (int)thrd_lists->size());
1513         action_list_t *list = &(*thrd_lists)[tid];
1514         action_list_t::reverse_iterator rit = list->rbegin();
1515         ASSERT((*rit) == curr);
1516         /* Skip past curr */
1517         rit++;
1518
1519         action_list_t::reverse_iterator ritcopy = rit;
1520         /* See if we have enough reads from the same value */
1521         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1522                 if (ritcopy == list->rend())
1523                         return true;
1524                 ModelAction *act = *ritcopy;
1525                 if (!act->is_read())
1526                         return true;
1527                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1528                         return true;
1529                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1530                         return true;
1531                 if (act->get_node()->get_read_from_past_size() +
1532                                 act->get_node()->get_read_from_promise_size() <= 1)
1533                         return true;
1534         }
1535         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1536                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1537                 if (should_read_instead(curr, rf, write))
1538                         return false; /* liveness failure */
1539         }
1540         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1541                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1542                 if (should_read_instead(curr, rf, promise))
1543                         return false; /* liveness failure */
1544         }
1545         return true;
1546 }
1547
1548 /**
1549  * @brief Updates the mo_graph with the constraints imposed from the current
1550  * read.
1551  *
1552  * Basic idea is the following: Go through each other thread and find
1553  * the last action that happened before our read.  Two cases:
1554  *
1555  * -# The action is a write: that write must either occur before
1556  * the write we read from or be the write we read from.
1557  * -# The action is a read: the write that that action read from
1558  * must occur before the write we read from or be the same write.
1559  *
1560  * @param curr The current action. Must be a read.
1561  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1562  * @return True if modification order edges were added; false otherwise
1563  */
1564 template <typename rf_type>
1565 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1566 {
1567         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1568         unsigned int i;
1569         bool added = false;
1570         ASSERT(curr->is_read());
1571
1572         /* Last SC fence in the current thread */
1573         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1574         ModelAction *last_sc_write = NULL;
1575         if (curr->is_seqcst())
1576                 last_sc_write = get_last_seq_cst_write(curr);
1577
1578         /* Iterate over all threads */
1579         for (i = 0; i < thrd_lists->size(); i++) {
1580                 /* Last SC fence in thread i */
1581                 ModelAction *last_sc_fence_thread_local = NULL;
1582                 if (int_to_id((int)i) != curr->get_tid())
1583                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1584
1585                 /* Last SC fence in thread i, before last SC fence in current thread */
1586                 ModelAction *last_sc_fence_thread_before = NULL;
1587                 if (last_sc_fence_local)
1588                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1589
1590                 /* Iterate over actions in thread, starting from most recent */
1591                 action_list_t *list = &(*thrd_lists)[i];
1592                 action_list_t::reverse_iterator rit;
1593                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1594                         ModelAction *act = *rit;
1595
1596                         /* Skip curr */
1597                         if (act == curr)
1598                                 continue;
1599                         /* Don't want to add reflexive edges on 'rf' */
1600                         if (act->equals(rf)) {
1601                                 if (act->happens_before(curr))
1602                                         break;
1603                                 else
1604                                         continue;
1605                         }
1606
1607                         if (act->is_write()) {
1608                                 /* C++, Section 29.3 statement 5 */
1609                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1610                                                 *act < *last_sc_fence_thread_local) {
1611                                         added = mo_graph->addEdge(act, rf) || added;
1612                                         break;
1613                                 }
1614                                 /* C++, Section 29.3 statement 4 */
1615                                 else if (act->is_seqcst() && last_sc_fence_local &&
1616                                                 *act < *last_sc_fence_local) {
1617                                         added = mo_graph->addEdge(act, rf) || added;
1618                                         break;
1619                                 }
1620                                 /* C++, Section 29.3 statement 6 */
1621                                 else if (last_sc_fence_thread_before &&
1622                                                 *act < *last_sc_fence_thread_before) {
1623                                         added = mo_graph->addEdge(act, rf) || added;
1624                                         break;
1625                                 }
1626                         }
1627
1628                         /* C++, Section 29.3 statement 3 (second subpoint) */
1629                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1630                                 added = mo_graph->addEdge(act, rf) || added;
1631                                 break;
1632                         }
1633
1634                         /*
1635                          * Include at most one act per-thread that "happens
1636                          * before" curr
1637                          */
1638                         if (act->happens_before(curr)) {
1639                                 if (act->is_write()) {
1640                                         added = mo_graph->addEdge(act, rf) || added;
1641                                 } else {
1642                                         const ModelAction *prevrf = act->get_reads_from();
1643                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1644                                         if (prevrf) {
1645                                                 if (!prevrf->equals(rf))
1646                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1647                                         } else if (!prevrf_promise->equals(rf)) {
1648                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1649                                         }
1650                                 }
1651                                 break;
1652                         }
1653                 }
1654         }
1655
1656         /*
1657          * All compatible, thread-exclusive promises must be ordered after any
1658          * concrete loads from the same thread
1659          */
1660         for (unsigned int i = 0; i < promises.size(); i++)
1661                 if (promises[i]->is_compatible_exclusive(curr))
1662                         added = mo_graph->addEdge(rf, promises[i]) || added;
1663
1664         return added;
1665 }
1666
1667 /**
1668  * Updates the mo_graph with the constraints imposed from the current write.
1669  *
1670  * Basic idea is the following: Go through each other thread and find
1671  * the lastest action that happened before our write.  Two cases:
1672  *
1673  * (1) The action is a write => that write must occur before
1674  * the current write
1675  *
1676  * (2) The action is a read => the write that that action read from
1677  * must occur before the current write.
1678  *
1679  * This method also handles two other issues:
1680  *
1681  * (I) Sequential Consistency: Making sure that if the current write is
1682  * seq_cst, that it occurs after the previous seq_cst write.
1683  *
1684  * (II) Sending the write back to non-synchronizing reads.
1685  *
1686  * @param curr The current action. Must be a write.
1687  * @param send_fv A vector for stashing reads to which we may pass our future
1688  * value. If NULL, then don't record any future values.
1689  * @return True if modification order edges were added; false otherwise
1690  */
1691 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1692 {
1693         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1694         unsigned int i;
1695         bool added = false;
1696         ASSERT(curr->is_write());
1697
1698         if (curr->is_seqcst()) {
1699                 /* We have to at least see the last sequentially consistent write,
1700                          so we are initialized. */
1701                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1702                 if (last_seq_cst != NULL) {
1703                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1704                 }
1705         }
1706
1707         /* Last SC fence in the current thread */
1708         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1709
1710         /* Iterate over all threads */
1711         for (i = 0; i < thrd_lists->size(); i++) {
1712                 /* Last SC fence in thread i, before last SC fence in current thread */
1713                 ModelAction *last_sc_fence_thread_before = NULL;
1714                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1715                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1716
1717                 /* Iterate over actions in thread, starting from most recent */
1718                 action_list_t *list = &(*thrd_lists)[i];
1719                 action_list_t::reverse_iterator rit;
1720                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1721                         ModelAction *act = *rit;
1722                         if (act == curr) {
1723                                 /*
1724                                  * 1) If RMW and it actually read from something, then we
1725                                  * already have all relevant edges, so just skip to next
1726                                  * thread.
1727                                  *
1728                                  * 2) If RMW and it didn't read from anything, we should
1729                                  * whatever edge we can get to speed up convergence.
1730                                  *
1731                                  * 3) If normal write, we need to look at earlier actions, so
1732                                  * continue processing list.
1733                                  */
1734                                 if (curr->is_rmw()) {
1735                                         if (curr->get_reads_from() != NULL)
1736                                                 break;
1737                                         else
1738                                                 continue;
1739                                 } else
1740                                         continue;
1741                         }
1742
1743                         /* C++, Section 29.3 statement 7 */
1744                         if (last_sc_fence_thread_before && act->is_write() &&
1745                                         *act < *last_sc_fence_thread_before) {
1746                                 added = mo_graph->addEdge(act, curr) || added;
1747                                 break;
1748                         }
1749
1750                         /*
1751                          * Include at most one act per-thread that "happens
1752                          * before" curr
1753                          */
1754                         if (act->happens_before(curr)) {
1755                                 /*
1756                                  * Note: if act is RMW, just add edge:
1757                                  *   act --mo--> curr
1758                                  * The following edge should be handled elsewhere:
1759                                  *   readfrom(act) --mo--> act
1760                                  */
1761                                 if (act->is_write())
1762                                         added = mo_graph->addEdge(act, curr) || added;
1763                                 else if (act->is_read()) {
1764                                         //if previous read accessed a null, just keep going
1765                                         if (act->get_reads_from() == NULL)
1766                                                 continue;
1767                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1768                                 }
1769                                 break;
1770                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1771                                                      !act->same_thread(curr)) {
1772                                 /* We have an action that:
1773                                    (1) did not happen before us
1774                                    (2) is a read and we are a write
1775                                    (3) cannot synchronize with us
1776                                    (4) is in a different thread
1777                                    =>
1778                                    that read could potentially read from our write.  Note that
1779                                    these checks are overly conservative at this point, we'll
1780                                    do more checks before actually removing the
1781                                    pendingfuturevalue.
1782
1783                                  */
1784                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1785                                         if (!is_infeasible())
1786                                                 send_fv->push_back(act);
1787                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1788                                                 add_future_value(curr, act);
1789                                 }
1790                         }
1791                 }
1792         }
1793
1794         /*
1795          * All compatible, thread-exclusive promises must be ordered after any
1796          * concrete stores to the same thread, or else they can be merged with
1797          * this store later
1798          */
1799         for (unsigned int i = 0; i < promises.size(); i++)
1800                 if (promises[i]->is_compatible_exclusive(curr))
1801                         added = mo_graph->addEdge(curr, promises[i]) || added;
1802
1803         return added;
1804 }
1805
1806 /** Arbitrary reads from the future are not allowed.  Section 29.3
1807  * part 9 places some constraints.  This method checks one result of constraint
1808  * constraint.  Others require compiler support. */
1809 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1810 {
1811         if (!writer->is_rmw())
1812                 return true;
1813
1814         if (!reader->is_rmw())
1815                 return true;
1816
1817         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1818                 if (search == reader)
1819                         return false;
1820                 if (search->get_tid() == reader->get_tid() &&
1821                                 search->happens_before(reader))
1822                         break;
1823         }
1824
1825         return true;
1826 }
1827
1828 /**
1829  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1830  * some constraints. This method checks one the following constraint (others
1831  * require compiler support):
1832  *
1833  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1834  */
1835 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1836 {
1837         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1838         unsigned int i;
1839         /* Iterate over all threads */
1840         for (i = 0; i < thrd_lists->size(); i++) {
1841                 const ModelAction *write_after_read = NULL;
1842
1843                 /* Iterate over actions in thread, starting from most recent */
1844                 action_list_t *list = &(*thrd_lists)[i];
1845                 action_list_t::reverse_iterator rit;
1846                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1847                         ModelAction *act = *rit;
1848
1849                         /* Don't disallow due to act == reader */
1850                         if (!reader->happens_before(act) || reader == act)
1851                                 break;
1852                         else if (act->is_write())
1853                                 write_after_read = act;
1854                         else if (act->is_read() && act->get_reads_from() != NULL)
1855                                 write_after_read = act->get_reads_from();
1856                 }
1857
1858                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1859                         return false;
1860         }
1861         return true;
1862 }
1863
1864 /**
1865  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1866  * The ModelAction under consideration is expected to be taking part in
1867  * release/acquire synchronization as an object of the "reads from" relation.
1868  * Note that this can only provide release sequence support for RMW chains
1869  * which do not read from the future, as those actions cannot be traced until
1870  * their "promise" is fulfilled. Similarly, we may not even establish the
1871  * presence of a release sequence with certainty, as some modification order
1872  * constraints may be decided further in the future. Thus, this function
1873  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1874  * and a boolean representing certainty.
1875  *
1876  * @param rf The action that might be part of a release sequence. Must be a
1877  * write.
1878  * @param release_heads A pass-by-reference style return parameter. After
1879  * execution of this function, release_heads will contain the heads of all the
1880  * relevant release sequences, if any exists with certainty
1881  * @param pending A pass-by-reference style return parameter which is only used
1882  * when returning false (i.e., uncertain). Returns most information regarding
1883  * an uncertain release sequence, including any write operations that might
1884  * break the sequence.
1885  * @return true, if the ModelExecution is certain that release_heads is complete;
1886  * false otherwise
1887  */
1888 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1889                 rel_heads_list_t *release_heads,
1890                 struct release_seq *pending) const
1891 {
1892         /* Only check for release sequences if there are no cycles */
1893         if (mo_graph->checkForCycles())
1894                 return false;
1895
1896         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1897                 ASSERT(rf->is_write());
1898
1899                 if (rf->is_release())
1900                         release_heads->push_back(rf);
1901                 else if (rf->get_last_fence_release())
1902                         release_heads->push_back(rf->get_last_fence_release());
1903                 if (!rf->is_rmw())
1904                         break; /* End of RMW chain */
1905
1906                 /** @todo Need to be smarter here...  In the linux lock
1907                  * example, this will run to the beginning of the program for
1908                  * every acquire. */
1909                 /** @todo The way to be smarter here is to keep going until 1
1910                  * thread has a release preceded by an acquire and you've seen
1911                  *       both. */
1912
1913                 /* acq_rel RMW is a sufficient stopping condition */
1914                 if (rf->is_acquire() && rf->is_release())
1915                         return true; /* complete */
1916         };
1917         if (!rf) {
1918                 /* read from future: need to settle this later */
1919                 pending->rf = NULL;
1920                 return false; /* incomplete */
1921         }
1922
1923         if (rf->is_release())
1924                 return true; /* complete */
1925
1926         /* else relaxed write
1927          * - check for fence-release in the same thread (29.8, stmt. 3)
1928          * - check modification order for contiguous subsequence
1929          *   -> rf must be same thread as release */
1930
1931         const ModelAction *fence_release = rf->get_last_fence_release();
1932         /* Synchronize with a fence-release unconditionally; we don't need to
1933          * find any more "contiguous subsequence..." for it */
1934         if (fence_release)
1935                 release_heads->push_back(fence_release);
1936
1937         int tid = id_to_int(rf->get_tid());
1938         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1939         action_list_t *list = &(*thrd_lists)[tid];
1940         action_list_t::const_reverse_iterator rit;
1941
1942         /* Find rf in the thread list */
1943         rit = std::find(list->rbegin(), list->rend(), rf);
1944         ASSERT(rit != list->rend());
1945
1946         /* Find the last {write,fence}-release */
1947         for (; rit != list->rend(); rit++) {
1948                 if (fence_release && *(*rit) < *fence_release)
1949                         break;
1950                 if ((*rit)->is_release())
1951                         break;
1952         }
1953         if (rit == list->rend()) {
1954                 /* No write-release in this thread */
1955                 return true; /* complete */
1956         } else if (fence_release && *(*rit) < *fence_release) {
1957                 /* The fence-release is more recent (and so, "stronger") than
1958                  * the most recent write-release */
1959                 return true; /* complete */
1960         } /* else, need to establish contiguous release sequence */
1961         ModelAction *release = *rit;
1962
1963         ASSERT(rf->same_thread(release));
1964
1965         pending->writes.clear();
1966
1967         bool certain = true;
1968         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1969                 if (id_to_int(rf->get_tid()) == (int)i)
1970                         continue;
1971                 list = &(*thrd_lists)[i];
1972
1973                 /* Can we ensure no future writes from this thread may break
1974                  * the release seq? */
1975                 bool future_ordered = false;
1976
1977                 ModelAction *last = get_last_action(int_to_id(i));
1978                 Thread *th = get_thread(int_to_id(i));
1979                 if ((last && rf->happens_before(last)) ||
1980                                 !is_enabled(th) ||
1981                                 th->is_complete())
1982                         future_ordered = true;
1983
1984                 ASSERT(!th->is_model_thread() || future_ordered);
1985
1986                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1987                         const ModelAction *act = *rit;
1988                         /* Reach synchronization -> this thread is complete */
1989                         if (act->happens_before(release))
1990                                 break;
1991                         if (rf->happens_before(act)) {
1992                                 future_ordered = true;
1993                                 continue;
1994                         }
1995
1996                         /* Only non-RMW writes can break release sequences */
1997                         if (!act->is_write() || act->is_rmw())
1998                                 continue;
1999
2000                         /* Check modification order */
2001                         if (mo_graph->checkReachable(rf, act)) {
2002                                 /* rf --mo--> act */
2003                                 future_ordered = true;
2004                                 continue;
2005                         }
2006                         if (mo_graph->checkReachable(act, release))
2007                                 /* act --mo--> release */
2008                                 break;
2009                         if (mo_graph->checkReachable(release, act) &&
2010                                       mo_graph->checkReachable(act, rf)) {
2011                                 /* release --mo-> act --mo--> rf */
2012                                 return true; /* complete */
2013                         }
2014                         /* act may break release sequence */
2015                         pending->writes.push_back(act);
2016                         certain = false;
2017                 }
2018                 if (!future_ordered)
2019                         certain = false; /* This thread is uncertain */
2020         }
2021
2022         if (certain) {
2023                 release_heads->push_back(release);
2024                 pending->writes.clear();
2025         } else {
2026                 pending->release = release;
2027                 pending->rf = rf;
2028         }
2029         return certain;
2030 }
2031
2032 /**
2033  * An interface for getting the release sequence head(s) with which a
2034  * given ModelAction must synchronize. This function only returns a non-empty
2035  * result when it can locate a release sequence head with certainty. Otherwise,
2036  * it may mark the internal state of the ModelExecution so that it will handle
2037  * the release sequence at a later time, causing @a acquire to update its
2038  * synchronization at some later point in execution.
2039  *
2040  * @param acquire The 'acquire' action that may synchronize with a release
2041  * sequence
2042  * @param read The read action that may read from a release sequence; this may
2043  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2044  * when 'acquire' is a fence-acquire)
2045  * @param release_heads A pass-by-reference return parameter. Will be filled
2046  * with the head(s) of the release sequence(s), if they exists with certainty.
2047  * @see ModelExecution::release_seq_heads
2048  */
2049 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2050                 ModelAction *read, rel_heads_list_t *release_heads)
2051 {
2052         const ModelAction *rf = read->get_reads_from();
2053         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2054         sequence->acquire = acquire;
2055         sequence->read = read;
2056
2057         if (!release_seq_heads(rf, release_heads, sequence)) {
2058                 /* add act to 'lazy checking' list */
2059                 pending_rel_seqs.push_back(sequence);
2060         } else {
2061                 snapshot_free(sequence);
2062         }
2063 }
2064
2065 /**
2066  * Attempt to resolve all stashed operations that might synchronize with a
2067  * release sequence for a given location. This implements the "lazy" portion of
2068  * determining whether or not a release sequence was contiguous, since not all
2069  * modification order information is present at the time an action occurs.
2070  *
2071  * @param location The location/object that should be checked for release
2072  * sequence resolutions. A NULL value means to check all locations.
2073  * @param work_queue The work queue to which to add work items as they are
2074  * generated
2075  * @return True if any updates occurred (new synchronization, new mo_graph
2076  * edges)
2077  */
2078 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2079 {
2080         bool updated = false;
2081         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2082         while (it != pending_rel_seqs.end()) {
2083                 struct release_seq *pending = *it;
2084                 ModelAction *acquire = pending->acquire;
2085                 const ModelAction *read = pending->read;
2086
2087                 /* Only resolve sequences on the given location, if provided */
2088                 if (location && read->get_location() != location) {
2089                         it++;
2090                         continue;
2091                 }
2092
2093                 const ModelAction *rf = read->get_reads_from();
2094                 rel_heads_list_t release_heads;
2095                 bool complete;
2096                 complete = release_seq_heads(rf, &release_heads, pending);
2097                 for (unsigned int i = 0; i < release_heads.size(); i++)
2098                         if (!acquire->has_synchronized_with(release_heads[i]))
2099                                 if (synchronize(release_heads[i], acquire))
2100                                         updated = true;
2101
2102                 if (updated) {
2103                         /* Re-check all pending release sequences */
2104                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2105                         /* Re-check read-acquire for mo_graph edges */
2106                         if (acquire->is_read())
2107                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2108
2109                         /* propagate synchronization to later actions */
2110                         action_list_t::reverse_iterator rit = action_trace.rbegin();
2111                         for (; (*rit) != acquire; rit++) {
2112                                 ModelAction *propagate = *rit;
2113                                 if (acquire->happens_before(propagate)) {
2114                                         synchronize(acquire, propagate);
2115                                         /* Re-check 'propagate' for mo_graph edges */
2116                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2117                                 }
2118                         }
2119                 }
2120                 if (complete) {
2121                         it = pending_rel_seqs.erase(it);
2122                         snapshot_free(pending);
2123                 } else {
2124                         it++;
2125                 }
2126         }
2127
2128         // If we resolved promises or data races, see if we have realized a data race.
2129         checkDataRaces();
2130
2131         return updated;
2132 }
2133
2134 /**
2135  * Performs various bookkeeping operations for the current ModelAction. For
2136  * instance, adds action to the per-object, per-thread action vector and to the
2137  * action trace list of all thread actions.
2138  *
2139  * @param act is the ModelAction to add.
2140  */
2141 void ModelExecution::add_action_to_lists(ModelAction *act)
2142 {
2143         int tid = id_to_int(act->get_tid());
2144         ModelAction *uninit = NULL;
2145         int uninit_id = -1;
2146         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2147         if (list->empty() && act->is_atomic_var()) {
2148                 uninit = get_uninitialized_action(act);
2149                 uninit_id = id_to_int(uninit->get_tid());
2150                 list->push_front(uninit);
2151         }
2152         list->push_back(act);
2153
2154         action_trace.push_back(act);
2155         if (uninit)
2156                 action_trace.push_front(uninit);
2157
2158         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2159         if (tid >= (int)vec->size())
2160                 vec->resize(priv->next_thread_id);
2161         (*vec)[tid].push_back(act);
2162         if (uninit)
2163                 (*vec)[uninit_id].push_front(uninit);
2164
2165         if ((int)thrd_last_action.size() <= tid)
2166                 thrd_last_action.resize(get_num_threads());
2167         thrd_last_action[tid] = act;
2168         if (uninit)
2169                 thrd_last_action[uninit_id] = uninit;
2170
2171         if (act->is_fence() && act->is_release()) {
2172                 if ((int)thrd_last_fence_release.size() <= tid)
2173                         thrd_last_fence_release.resize(get_num_threads());
2174                 thrd_last_fence_release[tid] = act;
2175         }
2176
2177         if (act->is_wait()) {
2178                 void *mutex_loc = (void *) act->get_value();
2179                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2180
2181                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2182                 if (tid >= (int)vec->size())
2183                         vec->resize(priv->next_thread_id);
2184                 (*vec)[tid].push_back(act);
2185         }
2186 }
2187
2188 /**
2189  * @brief Get the last action performed by a particular Thread
2190  * @param tid The thread ID of the Thread in question
2191  * @return The last action in the thread
2192  */
2193 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2194 {
2195         int threadid = id_to_int(tid);
2196         if (threadid < (int)thrd_last_action.size())
2197                 return thrd_last_action[id_to_int(tid)];
2198         else
2199                 return NULL;
2200 }
2201
2202 /**
2203  * @brief Get the last fence release performed by a particular Thread
2204  * @param tid The thread ID of the Thread in question
2205  * @return The last fence release in the thread, if one exists; NULL otherwise
2206  */
2207 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2208 {
2209         int threadid = id_to_int(tid);
2210         if (threadid < (int)thrd_last_fence_release.size())
2211                 return thrd_last_fence_release[id_to_int(tid)];
2212         else
2213                 return NULL;
2214 }
2215
2216 /**
2217  * Gets the last memory_order_seq_cst write (in the total global sequence)
2218  * performed on a particular object (i.e., memory location), not including the
2219  * current action.
2220  * @param curr The current ModelAction; also denotes the object location to
2221  * check
2222  * @return The last seq_cst write
2223  */
2224 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2225 {
2226         void *location = curr->get_location();
2227         action_list_t *list = obj_map.get(location);
2228         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2229         action_list_t::reverse_iterator rit;
2230         for (rit = list->rbegin(); (*rit) != curr; rit++)
2231                 ;
2232         rit++; /* Skip past curr */
2233         for ( ; rit != list->rend(); rit++)
2234                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2235                         return *rit;
2236         return NULL;
2237 }
2238
2239 /**
2240  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2241  * performed in a particular thread, prior to a particular fence.
2242  * @param tid The ID of the thread to check
2243  * @param before_fence The fence from which to begin the search; if NULL, then
2244  * search for the most recent fence in the thread.
2245  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2246  */
2247 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2248 {
2249         /* All fences should have location FENCE_LOCATION */
2250         action_list_t *list = obj_map.get(FENCE_LOCATION);
2251
2252         if (!list)
2253                 return NULL;
2254
2255         action_list_t::reverse_iterator rit = list->rbegin();
2256
2257         if (before_fence) {
2258                 for (; rit != list->rend(); rit++)
2259                         if (*rit == before_fence)
2260                                 break;
2261
2262                 ASSERT(*rit == before_fence);
2263                 rit++;
2264         }
2265
2266         for (; rit != list->rend(); rit++)
2267                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2268                         return *rit;
2269         return NULL;
2270 }
2271
2272 /**
2273  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2274  * location). This function identifies the mutex according to the current
2275  * action, which is presumed to perform on the same mutex.
2276  * @param curr The current ModelAction; also denotes the object location to
2277  * check
2278  * @return The last unlock operation
2279  */
2280 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2281 {
2282         void *location = curr->get_location();
2283         action_list_t *list = obj_map.get(location);
2284         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2285         action_list_t::reverse_iterator rit;
2286         for (rit = list->rbegin(); rit != list->rend(); rit++)
2287                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2288                         return *rit;
2289         return NULL;
2290 }
2291
2292 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2293 {
2294         ModelAction *parent = get_last_action(tid);
2295         if (!parent)
2296                 parent = get_thread(tid)->get_creation();
2297         return parent;
2298 }
2299
2300 /**
2301  * Returns the clock vector for a given thread.
2302  * @param tid The thread whose clock vector we want
2303  * @return Desired clock vector
2304  */
2305 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2306 {
2307         return get_parent_action(tid)->get_cv();
2308 }
2309
2310 /**
2311  * @brief Find the promise (if any) to resolve for the current action and
2312  * remove it from the pending promise vector
2313  * @param curr The current ModelAction. Should be a write.
2314  * @return The Promise to resolve, if any; otherwise NULL
2315  */
2316 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2317 {
2318         for (unsigned int i = 0; i < promises.size(); i++)
2319                 if (curr->get_node()->get_promise(i)) {
2320                         Promise *ret = promises[i];
2321                         promises.erase(promises.begin() + i);
2322                         return ret;
2323                 }
2324         return NULL;
2325 }
2326
2327 /**
2328  * Resolve a Promise with a current write.
2329  * @param write The ModelAction that is fulfilling Promises
2330  * @param promise The Promise to resolve
2331  * @return True if the Promise was successfully resolved; false otherwise
2332  */
2333 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise)
2334 {
2335         ModelVector<ModelAction *> actions_to_check;
2336
2337         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2338                 ModelAction *read = promise->get_reader(i);
2339                 read_from(read, write);
2340                 actions_to_check.push_back(read);
2341         }
2342         /* Make sure the promise's value matches the write's value */
2343         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2344         if (!mo_graph->resolvePromise(promise, write))
2345                 priv->failed_promise = true;
2346
2347         /**
2348          * @todo  It is possible to end up in an inconsistent state, where a
2349          * "resolved" promise may still be referenced if
2350          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2351          *
2352          * Note that the inconsistency only matters when dumping mo_graph to
2353          * file.
2354          *
2355          * delete promise;
2356          */
2357
2358         //Check whether reading these writes has made threads unable to
2359         //resolve promises
2360         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2361                 ModelAction *read = actions_to_check[i];
2362                 mo_check_promises(read, true);
2363         }
2364
2365         return true;
2366 }
2367
2368 /**
2369  * Compute the set of promises that could potentially be satisfied by this
2370  * action. Note that the set computation actually appears in the Node, not in
2371  * ModelExecution.
2372  * @param curr The ModelAction that may satisfy promises
2373  */
2374 void ModelExecution::compute_promises(ModelAction *curr)
2375 {
2376         for (unsigned int i = 0; i < promises.size(); i++) {
2377                 Promise *promise = promises[i];
2378                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2379                         continue;
2380
2381                 bool satisfy = true;
2382                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2383                         const ModelAction *act = promise->get_reader(j);
2384                         if (act->happens_before(curr) ||
2385                                         act->could_synchronize_with(curr)) {
2386                                 satisfy = false;
2387                                 break;
2388                         }
2389                 }
2390                 if (satisfy)
2391                         curr->get_node()->set_promise(i);
2392         }
2393 }
2394
2395 /** Checks promises in response to change in ClockVector Threads. */
2396 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2397 {
2398         for (unsigned int i = 0; i < promises.size(); i++) {
2399                 Promise *promise = promises[i];
2400                 if (!promise->thread_is_available(tid))
2401                         continue;
2402                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2403                         const ModelAction *act = promise->get_reader(j);
2404                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2405                                         merge_cv->synchronized_since(act)) {
2406                                 if (promise->eliminate_thread(tid)) {
2407                                         /* Promise has failed */
2408                                         priv->failed_promise = true;
2409                                         return;
2410                                 }
2411                         }
2412                 }
2413         }
2414 }
2415
2416 void ModelExecution::check_promises_thread_disabled()
2417 {
2418         for (unsigned int i = 0; i < promises.size(); i++) {
2419                 Promise *promise = promises[i];
2420                 if (promise->has_failed()) {
2421                         priv->failed_promise = true;
2422                         return;
2423                 }
2424         }
2425 }
2426
2427 /**
2428  * @brief Checks promises in response to addition to modification order for
2429  * threads.
2430  *
2431  * We test whether threads are still available for satisfying promises after an
2432  * addition to our modification order constraints. Those that are unavailable
2433  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2434  * that promise has failed.
2435  *
2436  * @param act The ModelAction which updated the modification order
2437  * @param is_read_check Should be true if act is a read and we must check for
2438  * updates to the store from which it read (there is a distinction here for
2439  * RMW's, which are both a load and a store)
2440  */
2441 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2442 {
2443         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2444
2445         for (unsigned int i = 0; i < promises.size(); i++) {
2446                 Promise *promise = promises[i];
2447
2448                 // Is this promise on the same location?
2449                 if (!promise->same_location(write))
2450                         continue;
2451
2452                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2453                         const ModelAction *pread = promise->get_reader(j);
2454                         if (!pread->happens_before(act))
2455                                continue;
2456                         if (mo_graph->checkPromise(write, promise)) {
2457                                 priv->failed_promise = true;
2458                                 return;
2459                         }
2460                         break;
2461                 }
2462
2463                 // Don't do any lookups twice for the same thread
2464                 if (!promise->thread_is_available(act->get_tid()))
2465                         continue;
2466
2467                 if (mo_graph->checkReachable(promise, write)) {
2468                         if (mo_graph->checkPromise(write, promise)) {
2469                                 priv->failed_promise = true;
2470                                 return;
2471                         }
2472                 }
2473         }
2474 }
2475
2476 /**
2477  * Compute the set of writes that may break the current pending release
2478  * sequence. This information is extracted from previou release sequence
2479  * calculations.
2480  *
2481  * @param curr The current ModelAction. Must be a release sequence fixup
2482  * action.
2483  */
2484 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2485 {
2486         if (pending_rel_seqs.empty())
2487                 return;
2488
2489         struct release_seq *pending = pending_rel_seqs.back();
2490         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2491                 const ModelAction *write = pending->writes[i];
2492                 curr->get_node()->add_relseq_break(write);
2493         }
2494
2495         /* NULL means don't break the sequence; just synchronize */
2496         curr->get_node()->add_relseq_break(NULL);
2497 }
2498
2499 /**
2500  * Build up an initial set of all past writes that this 'read' action may read
2501  * from, as well as any previously-observed future values that must still be valid.
2502  *
2503  * @param curr is the current ModelAction that we are exploring; it must be a
2504  * 'read' operation.
2505  */
2506 void ModelExecution::build_may_read_from(ModelAction *curr)
2507 {
2508         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2509         unsigned int i;
2510         ASSERT(curr->is_read());
2511
2512         ModelAction *last_sc_write = NULL;
2513
2514         if (curr->is_seqcst())
2515                 last_sc_write = get_last_seq_cst_write(curr);
2516
2517         /* Iterate over all threads */
2518         for (i = 0; i < thrd_lists->size(); i++) {
2519                 /* Iterate over actions in thread, starting from most recent */
2520                 action_list_t *list = &(*thrd_lists)[i];
2521                 action_list_t::reverse_iterator rit;
2522                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2523                         ModelAction *act = *rit;
2524
2525                         /* Only consider 'write' actions */
2526                         if (!act->is_write() || act == curr)
2527                                 continue;
2528
2529                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2530                         bool allow_read = true;
2531
2532                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2533                                 allow_read = false;
2534                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2535                                 allow_read = false;
2536
2537                         if (allow_read) {
2538                                 /* Only add feasible reads */
2539                                 mo_graph->startChanges();
2540                                 r_modification_order(curr, act);
2541                                 if (!is_infeasible())
2542                                         curr->get_node()->add_read_from_past(act);
2543                                 mo_graph->rollbackChanges();
2544                         }
2545
2546                         /* Include at most one act per-thread that "happens before" curr */
2547                         if (act->happens_before(curr))
2548                                 break;
2549                 }
2550         }
2551
2552         /* Inherit existing, promised future values */
2553         for (i = 0; i < promises.size(); i++) {
2554                 const Promise *promise = promises[i];
2555                 const ModelAction *promise_read = promise->get_reader(0);
2556                 if (promise_read->same_var(curr)) {
2557                         /* Only add feasible future-values */
2558                         mo_graph->startChanges();
2559                         r_modification_order(curr, promise);
2560                         if (!is_infeasible())
2561                                 curr->get_node()->add_read_from_promise(promise_read);
2562                         mo_graph->rollbackChanges();
2563                 }
2564         }
2565
2566         /* We may find no valid may-read-from only if the execution is doomed */
2567         if (!curr->get_node()->read_from_size()) {
2568                 priv->no_valid_reads = true;
2569                 set_assert();
2570         }
2571
2572         if (DBG_ENABLED()) {
2573                 model_print("Reached read action:\n");
2574                 curr->print();
2575                 model_print("Printing read_from_past\n");
2576                 curr->get_node()->print_read_from_past();
2577                 model_print("End printing read_from_past\n");
2578         }
2579 }
2580
2581 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2582 {
2583         for ( ; write != NULL; write = write->get_reads_from()) {
2584                 /* UNINIT actions don't have a Node, and they never sleep */
2585                 if (write->is_uninitialized())
2586                         return true;
2587                 Node *prevnode = write->get_node()->get_parent();
2588
2589                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2590                 if (write->is_release() && thread_sleep)
2591                         return true;
2592                 if (!write->is_rmw())
2593                         return false;
2594         }
2595         return true;
2596 }
2597
2598 /**
2599  * @brief Get an action representing an uninitialized atomic
2600  *
2601  * This function may create a new one or try to retrieve one from the NodeStack
2602  *
2603  * @param curr The current action, which prompts the creation of an UNINIT action
2604  * @return A pointer to the UNINIT ModelAction
2605  */
2606 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2607 {
2608         Node *node = curr->get_node();
2609         ModelAction *act = node->get_uninit_action();
2610         if (!act) {
2611                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2612                 node->set_uninit_action(act);
2613         }
2614         act->create_cv(NULL);
2615         return act;
2616 }
2617
2618 static void print_list(const action_list_t *list)
2619 {
2620         action_list_t::const_iterator it;
2621
2622         model_print("---------------------------------------------------------------------\n");
2623
2624         unsigned int hash = 0;
2625
2626         for (it = list->begin(); it != list->end(); it++) {
2627                 const ModelAction *act = *it;
2628                 if (act->get_seq_number() > 0)
2629                         act->print();
2630                 hash = hash^(hash<<3)^((*it)->hash());
2631         }
2632         model_print("HASH %u\n", hash);
2633         model_print("---------------------------------------------------------------------\n");
2634 }
2635
2636 #if SUPPORT_MOD_ORDER_DUMP
2637 void ModelExecution::dumpGraph(char *filename) const
2638 {
2639         char buffer[200];
2640         sprintf(buffer, "%s.dot", filename);
2641         FILE *file = fopen(buffer, "w");
2642         fprintf(file, "digraph %s {\n", filename);
2643         mo_graph->dumpNodes(file);
2644         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2645
2646         for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2647                 ModelAction *act = *it;
2648                 if (act->is_read()) {
2649                         mo_graph->dot_print_node(file, act);
2650                         if (act->get_reads_from())
2651                                 mo_graph->dot_print_edge(file,
2652                                                 act->get_reads_from(),
2653                                                 act,
2654                                                 "label=\"rf\", color=red, weight=2");
2655                         else
2656                                 mo_graph->dot_print_edge(file,
2657                                                 act->get_reads_from_promise(),
2658                                                 act,
2659                                                 "label=\"rf\", color=red");
2660                 }
2661                 if (thread_array[act->get_tid()]) {
2662                         mo_graph->dot_print_edge(file,
2663                                         thread_array[id_to_int(act->get_tid())],
2664                                         act,
2665                                         "label=\"sb\", color=blue, weight=400");
2666                 }
2667
2668                 thread_array[act->get_tid()] = act;
2669         }
2670         fprintf(file, "}\n");
2671         model_free(thread_array);
2672         fclose(file);
2673 }
2674 #endif
2675
2676 /** @brief Prints an execution trace summary. */
2677 void ModelExecution::print_summary() const
2678 {
2679 #if SUPPORT_MOD_ORDER_DUMP
2680         char buffername[100];
2681         sprintf(buffername, "exec%04u", get_execution_number());
2682         mo_graph->dumpGraphToFile(buffername);
2683         sprintf(buffername, "graph%04u", get_execution_number());
2684         dumpGraph(buffername);
2685 #endif
2686
2687         model_print("Execution %d:", get_execution_number());
2688         if (isfeasibleprefix()) {
2689                 if (is_yieldblocked())
2690                         model_print(" YIELD BLOCKED");
2691                 if (scheduler->all_threads_sleeping())
2692                         model_print(" SLEEP-SET REDUNDANT");
2693                 model_print("\n");
2694         } else
2695                 print_infeasibility(" INFEASIBLE");
2696         print_list(&action_trace);
2697         model_print("\n");
2698         if (!promises.empty()) {
2699                 model_print("Pending promises:\n");
2700                 for (unsigned int i = 0; i < promises.size(); i++) {
2701                         model_print(" [P%u] ", i);
2702                         promises[i]->print();
2703                 }
2704                 model_print("\n");
2705         }
2706 }
2707
2708 /**
2709  * Add a Thread to the system for the first time. Should only be called once
2710  * per thread.
2711  * @param t The Thread to add
2712  */
2713 void ModelExecution::add_thread(Thread *t)
2714 {
2715         unsigned int i = id_to_int(t->get_id());
2716         if (i >= thread_map.size())
2717                 thread_map.resize(i + 1);
2718         thread_map[i] = t;
2719         if (!t->is_model_thread())
2720                 scheduler->add_thread(t);
2721 }
2722
2723 /**
2724  * @brief Get a Thread reference by its ID
2725  * @param tid The Thread's ID
2726  * @return A Thread reference
2727  */
2728 Thread * ModelExecution::get_thread(thread_id_t tid) const
2729 {
2730         unsigned int i = id_to_int(tid);
2731         if (i < thread_map.size())
2732                 return thread_map[i];
2733         return NULL;
2734 }
2735
2736 /**
2737  * @brief Get a reference to the Thread in which a ModelAction was executed
2738  * @param act The ModelAction
2739  * @return A Thread reference
2740  */
2741 Thread * ModelExecution::get_thread(const ModelAction *act) const
2742 {
2743         return get_thread(act->get_tid());
2744 }
2745
2746 /**
2747  * @brief Get a Promise's "promise number"
2748  *
2749  * A "promise number" is an index number that is unique to a promise, valid
2750  * only for a specific snapshot of an execution trace. Promises may come and go
2751  * as they are generated an resolved, so an index only retains meaning for the
2752  * current snapshot.
2753  *
2754  * @param promise The Promise to check
2755  * @return The promise index, if the promise still is valid; otherwise -1
2756  */
2757 int ModelExecution::get_promise_number(const Promise *promise) const
2758 {
2759         for (unsigned int i = 0; i < promises.size(); i++)
2760                 if (promises[i] == promise)
2761                         return i;
2762         /* Not found */
2763         return -1;
2764 }
2765
2766 /**
2767  * @brief Check if a Thread is currently enabled
2768  * @param t The Thread to check
2769  * @return True if the Thread is currently enabled
2770  */
2771 bool ModelExecution::is_enabled(Thread *t) const
2772 {
2773         return scheduler->is_enabled(t);
2774 }
2775
2776 /**
2777  * @brief Check if a Thread is currently enabled
2778  * @param tid The ID of the Thread to check
2779  * @return True if the Thread is currently enabled
2780  */
2781 bool ModelExecution::is_enabled(thread_id_t tid) const
2782 {
2783         return scheduler->is_enabled(tid);
2784 }
2785
2786 /**
2787  * @brief Select the next thread to execute based on the curren action
2788  *
2789  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2790  * actions should be followed by the execution of their child thread. In either
2791  * case, the current action should determine the next thread schedule.
2792  *
2793  * @param curr The current action
2794  * @return The next thread to run, if the current action will determine this
2795  * selection; otherwise NULL
2796  */
2797 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2798 {
2799         /* Do not split atomic RMW */
2800         if (curr->is_rmwr())
2801                 return get_thread(curr);
2802         /* Follow CREATE with the created thread */
2803         if (curr->get_type() == THREAD_CREATE)
2804                 return curr->get_thread_operand();
2805         return NULL;
2806 }
2807
2808 /** @return True if the execution has taken too many steps */
2809 bool ModelExecution::too_many_steps() const
2810 {
2811         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2812 }
2813
2814 /**
2815  * Takes the next step in the execution, if possible.
2816  * @param curr The current step to take
2817  * @return Returns the next Thread to run, if any; NULL if this execution
2818  * should terminate
2819  */
2820 Thread * ModelExecution::take_step(ModelAction *curr)
2821 {
2822         Thread *curr_thrd = get_thread(curr);
2823         ASSERT(curr_thrd->get_state() == THREAD_READY);
2824
2825         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2826         curr = check_current_action(curr);
2827         ASSERT(curr);
2828
2829         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2830                 scheduler->remove_thread(curr_thrd);
2831
2832         return action_select_next_thread(curr);
2833 }
2834
2835 /**
2836  * Launch end-of-execution release sequence fixups only when
2837  * the execution is otherwise feasible AND there are:
2838  *
2839  * (1) pending release sequences
2840  * (2) pending assertions that could be invalidated by a change
2841  * in clock vectors (i.e., data races)
2842  * (3) no pending promises
2843  */
2844 void ModelExecution::fixup_release_sequences()
2845 {
2846         while (!pending_rel_seqs.empty() &&
2847                         is_feasible_prefix_ignore_relseq() &&
2848                         haveUnrealizedRaces()) {
2849                 model_print("*** WARNING: release sequence fixup action "
2850                                 "(%zu pending release seuqence(s)) ***\n",
2851                                 pending_rel_seqs.size());
2852                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2853                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2854                                 model_thread);
2855                 take_step(fixup);
2856         };
2857 }