887e2efa03df0f31832ff6ee260a1a8b1ed9794d
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "execution.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
19
20 #include <pthread.h>
21
22 #define INITIAL_THREAD_ID       0
23
24 /**
25  * Structure for holding small ModelChecker members that should be snapshotted
26  */
27 struct model_snapshot_members {
28         model_snapshot_members() :
29                 /* First thread created will have id INITIAL_THREAD_ID */
30                 next_thread_id(INITIAL_THREAD_ID),
31                 used_sequence_numbers(0),
32                 next_backtrack(NULL),
33                 bugs(),
34                 failed_promise(false),
35                 hard_failed_promise(false),
36                 too_many_reads(false),
37                 no_valid_reads(false),
38                 bad_synchronization(false),
39                 bad_sc_read(false),
40                 asserted(false)
41         { }
42
43         ~model_snapshot_members() {
44                 for (unsigned int i = 0; i < bugs.size(); i++)
45                         delete bugs[i];
46                 bugs.clear();
47         }
48
49         unsigned int next_thread_id;
50         modelclock_t used_sequence_numbers;
51         ModelAction *next_backtrack;
52         SnapVector<bug_message *> bugs;
53         bool failed_promise;
54         bool hard_failed_promise;
55         bool too_many_reads;
56         bool no_valid_reads;
57         /** @brief Incorrectly-ordered synchronization was made */
58         bool bad_synchronization;
59         bool bad_sc_read;
60         bool asserted;
61
62         SNAPSHOTALLOC
63 };
64
65 /** @brief Constructor */
66 ModelExecution::ModelExecution(ModelChecker *m,
67                 const struct model_params *params,
68                 Scheduler *scheduler,
69                 NodeStack *node_stack) :
70         model(m),
71         params(params),
72         scheduler(scheduler),
73         action_trace(),
74         thread_map(2), /* We'll always need at least 2 threads */
75         obj_map(),
76         condvar_waiters_map(),
77         obj_thrd_map(),
78         promises(),
79         futurevalues(),
80         pending_rel_seqs(),
81         thrd_last_action(1),
82         thrd_last_fence_release(),
83         node_stack(node_stack),
84         priv(new struct model_snapshot_members()),
85         mo_graph(new CycleGraph())
86 {
87         /* Initialize a model-checker thread, for special ModelActions */
88         model_thread = new Thread(get_next_id());       // L: Create model thread
89         add_thread(model_thread);                       // L: Add model thread to scheduler
90         scheduler->register_engine(this);
91         node_stack->register_engine(this);
92 }
93
94 /** @brief Destructor */
95 ModelExecution::~ModelExecution()
96 {
97         for (unsigned int i = 0; i < get_num_threads(); i++)
98                 delete get_thread(int_to_id(i));
99
100         for (unsigned int i = 0; i < promises.size(); i++)
101                 delete promises[i];
102
103         delete mo_graph;
104         delete priv;
105 }
106
107 int ModelExecution::get_execution_number() const
108 {
109         return model->get_execution_number();
110 }
111
112 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
113 {
114         action_list_t *tmp = hash->get(ptr);
115         if (tmp == NULL) {
116                 tmp = new action_list_t();
117                 hash->put(ptr, tmp);
118         }
119         return tmp;
120 }
121
122 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
123 {
124         SnapVector<action_list_t> *tmp = hash->get(ptr);
125         if (tmp == NULL) {
126                 tmp = new SnapVector<action_list_t>();
127                 hash->put(ptr, tmp);
128         }
129         return tmp;
130 }
131
132 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
133 {
134         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
135         if (wrv==NULL)
136                 return NULL;
137         unsigned int thread=id_to_int(tid);
138         if (thread < wrv->size())
139                 return &(*wrv)[thread];
140         else
141                 return NULL;
142 }
143
144 /** @return a thread ID for a new Thread */
145 thread_id_t ModelExecution::get_next_id()
146 {
147         return priv->next_thread_id++;
148 }
149
150 /** @return the number of user threads created during this execution */
151 unsigned int ModelExecution::get_num_threads() const
152 {
153         return priv->next_thread_id;
154 }
155
156 /** @return a sequence number for a new ModelAction */
157 modelclock_t ModelExecution::get_next_seq_num()
158 {
159         return ++priv->used_sequence_numbers;
160 }
161
162 /**
163  * @brief Should the current action wake up a given thread?
164  *
165  * @param curr The current action
166  * @param thread The thread that we might wake up
167  * @return True, if we should wake up the sleeping thread; false otherwise
168  */
169 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
170 {
171         const ModelAction *asleep = thread->get_pending();
172         /* Don't allow partial RMW to wake anyone up */
173         if (curr->is_rmwr())
174                 return false;
175         /* Synchronizing actions may have been backtracked */
176         if (asleep->could_synchronize_with(curr))
177                 return true;
178         /* All acquire/release fences and fence-acquire/store-release */
179         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
180                 return true;
181         /* Fence-release + store can awake load-acquire on the same location */
182         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
183                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
184                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
185                         return true;
186         }
187         return false;
188 }
189
190 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
191 {
192         for (unsigned int i = 0; i < get_num_threads(); i++) {
193                 Thread *thr = get_thread(int_to_id(i));
194                 if (scheduler->is_sleep_set(thr)) {
195                         if (should_wake_up(curr, thr))
196                                 /* Remove this thread from sleep set */
197                                 scheduler->remove_sleep(thr);
198                 }
199         }
200 }
201
202 /** @brief Alert the model-checker that an incorrectly-ordered
203  * synchronization was made */
204 void ModelExecution::set_bad_synchronization()
205 {
206         priv->bad_synchronization = true;
207 }
208
209 /** @brief Alert the model-checker that an incorrectly-ordered
210  * synchronization was made */
211 void ModelExecution::set_bad_sc_read()
212 {
213         priv->bad_sc_read = true;
214 }
215
216 bool ModelExecution::assert_bug(const char *msg)
217 {
218         priv->bugs.push_back(new bug_message(msg));
219
220         if (isfeasibleprefix()) {
221                 set_assert();
222                 return true;
223         }
224         return false;
225 }
226
227 /** @return True, if any bugs have been reported for this execution */
228 bool ModelExecution::have_bug_reports() const
229 {
230         return priv->bugs.size() != 0;
231 }
232
233 SnapVector<bug_message *> * ModelExecution::get_bugs() const
234 {
235         return &priv->bugs;
236 }
237
238 /**
239  * Check whether the current trace has triggered an assertion which should halt
240  * its execution.
241  *
242  * @return True, if the execution should be aborted; false otherwise
243  */
244 bool ModelExecution::has_asserted() const
245 {
246         return priv->asserted;
247 }
248
249 /**
250  * Trigger a trace assertion which should cause this execution to be halted.
251  * This can be due to a detected bug or due to an infeasibility that should
252  * halt ASAP.
253  */
254 void ModelExecution::set_assert()
255 {
256         priv->asserted = true;
257 }
258
259 /**
260  * Check if we are in a deadlock. Should only be called at the end of an
261  * execution, although it should not give false positives in the middle of an
262  * execution (there should be some ENABLED thread).
263  *
264  * @return True if program is in a deadlock; false otherwise
265  */
266 bool ModelExecution::is_deadlocked() const
267 {
268         bool blocking_threads = false;
269         for (unsigned int i = 0; i < get_num_threads(); i++) {
270                 thread_id_t tid = int_to_id(i);
271                 if (is_enabled(tid))
272                         return false;
273                 Thread *t = get_thread(tid);
274                 if (!t->is_model_thread() && t->get_pending())
275                         blocking_threads = true;
276         }
277         return blocking_threads;
278 }
279
280 /**
281  * @brief Check if we are yield-blocked
282  *
283  * A program can be "yield-blocked" if all threads are ready to execute a
284  * yield.
285  *
286  * @return True if the program is yield-blocked; false otherwise
287  */
288 bool ModelExecution::is_yieldblocked() const
289 {
290         if (!params->yieldblock)
291                 return false;
292
293         for (unsigned int i = 0; i < get_num_threads(); i++) {
294                 thread_id_t tid = int_to_id(i);
295                 Thread *t = get_thread(tid);
296                 if (t->get_pending() && t->get_pending()->is_yield())
297                         return true;
298         }
299         return false;
300 }
301
302 /**
303  * Check if this is a complete execution. That is, have all thread completed
304  * execution (rather than exiting because sleep sets have forced a redundant
305  * execution).
306  *
307  * @return True if the execution is complete.
308  */
309 bool ModelExecution::is_complete_execution() const
310 {
311         if (is_yieldblocked())
312                 return false;
313         for (unsigned int i = 0; i < get_num_threads(); i++)
314                 if (is_enabled(int_to_id(i)))
315                         return false;
316         return true;
317 }
318
319 /**
320  * @brief Find the last fence-related backtracking conflict for a ModelAction
321  *
322  * This function performs the search for the most recent conflicting action
323  * against which we should perform backtracking, as affected by fence
324  * operations. This includes pairs of potentially-synchronizing actions which
325  * occur due to fence-acquire or fence-release, and hence should be explored in
326  * the opposite execution order.
327  *
328  * @param act The current action
329  * @return The most recent action which conflicts with act due to fences
330  */
331 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
332 {
333         /* Only perform release/acquire fence backtracking for stores */
334         if (!act->is_write())
335                 return NULL;
336
337         /* Find a fence-release (or, act is a release) */
338         ModelAction *last_release;
339         if (act->is_release())
340                 last_release = act;
341         else
342                 last_release = get_last_fence_release(act->get_tid());
343         if (!last_release)
344                 return NULL;
345
346         /* Skip past the release */
347         const action_list_t *list = &action_trace;
348         action_list_t::const_reverse_iterator rit;
349         for (rit = list->rbegin(); rit != list->rend(); rit++)
350                 if (*rit == last_release)
351                         break;
352         ASSERT(rit != list->rend());
353
354         /* Find a prior:
355          *   load-acquire
356          * or
357          *   load --sb-> fence-acquire */
358         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
359         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
360         bool found_acquire_fences = false;
361         for ( ; rit != list->rend(); rit++) {
362                 ModelAction *prev = *rit;
363                 if (act->same_thread(prev))
364                         continue;
365
366                 int tid = id_to_int(prev->get_tid());
367
368                 if (prev->is_read() && act->same_var(prev)) {
369                         if (prev->is_acquire()) {
370                                 /* Found most recent load-acquire, don't need
371                                  * to search for more fences */
372                                 if (!found_acquire_fences)
373                                         return NULL;
374                         } else {
375                                 prior_loads[tid] = prev;
376                         }
377                 }
378                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
379                         found_acquire_fences = true;
380                         acquire_fences[tid] = prev;
381                 }
382         }
383
384         ModelAction *latest_backtrack = NULL;
385         for (unsigned int i = 0; i < acquire_fences.size(); i++)
386                 if (acquire_fences[i] && prior_loads[i])
387                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
388                                 latest_backtrack = acquire_fences[i];
389         return latest_backtrack;
390 }
391
392 /**
393  * @brief Find the last backtracking conflict for a ModelAction
394  *
395  * This function performs the search for the most recent conflicting action
396  * against which we should perform backtracking. This primary includes pairs of
397  * synchronizing actions which should be explored in the opposite execution
398  * order.
399  *
400  * @param act The current action
401  * @return The most recent action which conflicts with act
402  */
403 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
404 {
405         switch (act->get_type()) {
406         case ATOMIC_FENCE:
407                 /* Only seq-cst fences can (directly) cause backtracking */
408                 if (!act->is_seqcst())
409                         break;
410         case ATOMIC_READ:
411         case ATOMIC_WRITE:
412         case ATOMIC_RMW: {
413                 ModelAction *ret = NULL;
414
415                 /* linear search: from most recent to oldest */
416                 action_list_t *list = obj_map.get(act->get_location());
417                 action_list_t::reverse_iterator rit;
418                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
419                         ModelAction *prev = *rit;
420                         if (prev == act)
421                                 continue;
422                         if (prev->could_synchronize_with(act)) {
423                                 ret = prev;
424                                 break;
425                         }
426                 }
427
428                 ModelAction *ret2 = get_last_fence_conflict(act);
429                 if (!ret2)
430                         return ret;
431                 if (!ret)
432                         return ret2;
433                 if (*ret < *ret2)
434                         return ret2;
435                 return ret;
436         }
437         case ATOMIC_LOCK:
438         case ATOMIC_TRYLOCK: {
439                 /* linear search: from most recent to oldest */
440                 action_list_t *list = obj_map.get(act->get_location());
441                 action_list_t::reverse_iterator rit;
442                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
443                         ModelAction *prev = *rit;
444                         if (act->is_conflicting_lock(prev))
445                                 return prev;
446                 }
447                 break;
448         }
449         case ATOMIC_UNLOCK: {
450                 /* linear search: from most recent to oldest */
451                 action_list_t *list = obj_map.get(act->get_location());
452                 action_list_t::reverse_iterator rit;
453                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
454                         ModelAction *prev = *rit;
455                         if (!act->same_thread(prev) && prev->is_failed_trylock())
456                                 return prev;
457                 }
458                 break;
459         }
460         case ATOMIC_WAIT: {
461                 /* linear search: from most recent to oldest */
462                 action_list_t *list = obj_map.get(act->get_location());
463                 action_list_t::reverse_iterator rit;
464                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
465                         ModelAction *prev = *rit;
466                         if (!act->same_thread(prev) && prev->is_failed_trylock())
467                                 return prev;
468                         if (!act->same_thread(prev) && prev->is_notify())
469                                 return prev;
470                 }
471                 break;
472         }
473
474         case ATOMIC_NOTIFY_ALL:
475         case ATOMIC_NOTIFY_ONE: {
476                 /* linear search: from most recent to oldest */
477                 action_list_t *list = obj_map.get(act->get_location());
478                 action_list_t::reverse_iterator rit;
479                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
480                         ModelAction *prev = *rit;
481                         if (!act->same_thread(prev) && prev->is_wait())
482                                 return prev;
483                 }
484                 break;
485         }
486         default:
487                 break;
488         }
489         return NULL;
490 }
491
492 /** This method finds backtracking points where we should try to
493  * reorder the parameter ModelAction against.
494  *
495  * @param the ModelAction to find backtracking points for.
496  */
497 void ModelExecution::set_backtracking(ModelAction *act)
498 {
499         Thread *t = get_thread(act);
500         ModelAction *prev = get_last_conflict(act);
501         if (prev == NULL)
502                 return;
503
504         Node *node = prev->get_node()->get_parent();
505
506         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
507         int low_tid, high_tid;
508         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
509                 low_tid = id_to_int(act->get_tid());
510                 high_tid = low_tid + 1;
511         } else {
512                 low_tid = 0;
513                 high_tid = get_num_threads();
514         }
515
516         for (int i = low_tid; i < high_tid; i++) {
517                 thread_id_t tid = int_to_id(i);
518
519                 /* Make sure this thread can be enabled here. */
520                 if (i >= node->get_num_threads())
521                         break;
522
523                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
524                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
525                 if (node->enabled_status(tid) != THREAD_ENABLED)
526                         continue;
527
528                 /* Check if this has been explored already */
529                 if (node->has_been_explored(tid))
530                         continue;
531
532                 /* See if fairness allows */
533                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
534                         bool unfair = false;
535                         for (int t = 0; t < node->get_num_threads(); t++) {
536                                 thread_id_t tother = int_to_id(t);
537                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
538                                         unfair = true;
539                                         break;
540                                 }
541                         }
542                         if (unfair)
543                                 continue;
544                 }
545
546                 /* See if CHESS-like yield fairness allows */
547                 if (params->yieldon) {
548                         bool unfair = false;
549                         for (int t = 0; t < node->get_num_threads(); t++) {
550                                 thread_id_t tother = int_to_id(t);
551                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
552                                         unfair = true;
553                                         break;
554                                 }
555                         }
556                         if (unfair)
557                                 continue;
558                 }
559
560                 /* Cache the latest backtracking point */
561                 set_latest_backtrack(prev);
562
563                 /* If this is a new backtracking point, mark the tree */
564                 if (!node->set_backtrack(tid))
565                         continue;
566                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
567                                         id_to_int(prev->get_tid()),
568                                         id_to_int(t->get_id()));
569                 if (DBG_ENABLED()) {
570                         prev->print();
571                         act->print();
572                 }
573         }
574 }
575
576 /**
577  * @brief Cache the a backtracking point as the "most recent", if eligible
578  *
579  * Note that this does not prepare the NodeStack for this backtracking
580  * operation, it only caches the action on a per-execution basis
581  *
582  * @param act The operation at which we should explore a different next action
583  * (i.e., backtracking point)
584  * @return True, if this action is now the most recent backtracking point;
585  * false otherwise
586  */
587 bool ModelExecution::set_latest_backtrack(ModelAction *act)
588 {
589         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
590                 priv->next_backtrack = act;
591                 return true;
592         }
593         return false;
594 }
595
596 /**
597  * Returns last backtracking point. The model checker will explore a different
598  * path for this point in the next execution.
599  * @return The ModelAction at which the next execution should diverge.
600  */
601 ModelAction * ModelExecution::get_next_backtrack()
602 {
603         ModelAction *next = priv->next_backtrack;
604         priv->next_backtrack = NULL;
605         return next;
606 }
607
608 /**
609  * Processes a read model action.
610  * @param curr is the read model action to process.
611  * @return True if processing this read updates the mo_graph.
612  */
613 bool ModelExecution::process_read(ModelAction *curr)
614 {
615         Node *node = curr->get_node();
616         while (true) {
617                 bool updated = false;
618                 switch (node->get_read_from_status()) {
619                 case READ_FROM_PAST: {
620                         const ModelAction *rf = node->get_read_from_past();
621                         ASSERT(rf);
622
623                         mo_graph->startChanges();
624
625                         ASSERT(!is_infeasible());
626                         if (!check_recency(curr, rf)) {
627                                 if (node->increment_read_from()) {
628                                         mo_graph->rollbackChanges();
629                                         continue;
630                                 } else {
631                                         priv->too_many_reads = true;
632                                 }
633                         }
634
635                         updated = r_modification_order(curr, rf);
636                         read_from(curr, rf);
637                         mo_graph->commitChanges();
638                         mo_check_promises(curr, true);
639                         break;
640                 }
641                 case READ_FROM_PROMISE: {
642                         Promise *promise = curr->get_node()->get_read_from_promise();
643                         if (promise->add_reader(curr))
644                                 priv->failed_promise = true;
645                         curr->set_read_from_promise(promise);
646                         mo_graph->startChanges();
647                         if (!check_recency(curr, promise))
648                                 priv->too_many_reads = true;
649                         updated = r_modification_order(curr, promise);
650                         mo_graph->commitChanges();
651                         break;
652                 }
653                 case READ_FROM_FUTURE: {
654                         /* Read from future value */
655                         struct future_value fv = node->get_future_value();
656                         Promise *promise = new Promise(this, curr, fv);
657                         curr->set_read_from_promise(promise);
658                         promises.push_back(promise);
659                         mo_graph->startChanges();
660                         updated = r_modification_order(curr, promise);
661                         mo_graph->commitChanges();
662                         break;
663                 }
664                 default:
665                         ASSERT(false);
666                 }
667                 get_thread(curr)->set_return_value(curr->get_return_value());
668                 return updated;
669         }
670 }
671
672 /**
673  * Processes a lock, trylock, or unlock model action.  @param curr is
674  * the read model action to process.
675  *
676  * The try lock operation checks whether the lock is taken.  If not,
677  * it falls to the normal lock operation case.  If so, it returns
678  * fail.
679  *
680  * The lock operation has already been checked that it is enabled, so
681  * it just grabs the lock and synchronizes with the previous unlock.
682  *
683  * The unlock operation has to re-enable all of the threads that are
684  * waiting on the lock.
685  *
686  * @return True if synchronization was updated; false otherwise
687  */
688 bool ModelExecution::process_mutex(ModelAction *curr)
689 {
690         std::mutex *mutex = curr->get_mutex();
691         struct std::mutex_state *state = NULL;
692
693         if (mutex)
694                 state = mutex->get_state();
695
696         switch (curr->get_type()) {
697         case ATOMIC_TRYLOCK: {
698                 bool success = !state->locked;
699                 curr->set_try_lock(success);
700                 if (!success) {
701                         get_thread(curr)->set_return_value(0);
702                         break;
703                 }
704                 get_thread(curr)->set_return_value(1);
705         }
706                 //otherwise fall into the lock case
707         case ATOMIC_LOCK: {
708                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
709                         assert_bug("Lock access before initialization");
710                 state->locked = get_thread(curr);
711                 ModelAction *unlock = get_last_unlock(curr);
712                 //synchronize with the previous unlock statement
713                 if (unlock != NULL) {
714                         synchronize(unlock, curr);
715                         return true;
716                 }
717                 break;
718         }
719         case ATOMIC_WAIT:
720         case ATOMIC_UNLOCK: {
721                 /* wake up the other threads */
722                 for (unsigned int i = 0; i < get_num_threads(); i++) {
723                         Thread *t = get_thread(int_to_id(i));
724                         Thread *curr_thrd = get_thread(curr);
725                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
726                                 scheduler->wake(t);
727                 }
728
729                 /* unlock the lock - after checking who was waiting on it */
730                 state->locked = NULL;
731
732                 if (!curr->is_wait())
733                         break; /* The rest is only for ATOMIC_WAIT */
734
735                 /* Should we go to sleep? (simulate spurious failures) */
736                 if (curr->get_node()->get_misc() == 0) {
737                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
738                         /* disable us */
739                         scheduler->sleep(get_thread(curr));
740                 }
741                 break;
742         }
743         case ATOMIC_NOTIFY_ALL: {
744                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
745                 //activate all the waiting threads
746                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
747                         scheduler->wake(get_thread(*rit));
748                 }
749                 waiters->clear();
750                 break;
751         }
752         case ATOMIC_NOTIFY_ONE: {
753                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
754                 int wakeupthread = curr->get_node()->get_misc();
755                 action_list_t::iterator it = waiters->begin();
756                 advance(it, wakeupthread);
757                 scheduler->wake(get_thread(*it));
758                 waiters->erase(it);
759                 break;
760         }
761
762         default:
763                 ASSERT(0);
764         }
765         return false;
766 }
767
768 /**
769  * @brief Check if the current pending promises allow a future value to be sent
770  *
771  * It is unsafe to pass a future value back if there exists a pending promise Pr
772  * such that:
773  *
774  *    reader --exec-> Pr --exec-> writer
775  *
776  * If such Pr exists, we must save the pending future value until Pr is
777  * resolved.
778  *
779  * @param writer The operation which sends the future value. Must be a write.
780  * @param reader The operation which will observe the value. Must be a read.
781  * @return True if the future value can be sent now; false if it must wait.
782  */
783 bool ModelExecution::promises_may_allow(const ModelAction *writer,
784                 const ModelAction *reader) const
785 {
786         for (int i = promises.size() - 1; i >= 0; i--) {
787                 ModelAction *pr = promises[i]->get_reader(0);
788                 //reader is after promise...doesn't cross any promise
789                 if (*reader > *pr)
790                         return true;
791                 //writer is after promise, reader before...bad...
792                 if (*writer > *pr)
793                         return false;
794         }
795         return true;
796 }
797
798 /**
799  * @brief Add a future value to a reader
800  *
801  * This function performs a few additional checks to ensure that the future
802  * value can be feasibly observed by the reader
803  *
804  * @param writer The operation whose value is sent. Must be a write.
805  * @param reader The read operation which may read the future value. Must be a read.
806  */
807 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
808 {
809         /* Do more ambitious checks now that mo is more complete */
810         if (!mo_may_allow(writer, reader))
811                 return;
812
813         Node *node = reader->get_node();
814
815         /* Find an ancestor thread which exists at the time of the reader */
816         Thread *write_thread = get_thread(writer);
817         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
818                 write_thread = write_thread->get_parent();
819
820         struct future_value fv = {
821                 writer->get_write_value(),
822                 writer->get_seq_number() + params->maxfuturedelay,
823                 write_thread->get_id(),
824         };
825         if (node->add_future_value(fv))
826                 set_latest_backtrack(reader);
827 }
828
829 /**
830  * Process a write ModelAction
831  * @param curr The ModelAction to process
832  * @param work The work queue, for adding fixup work
833  * @return True if the mo_graph was updated or promises were resolved
834  */
835 bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
836 {
837         /* Readers to which we may send our future value */
838         ModelVector<ModelAction *> send_fv;
839
840         const ModelAction *earliest_promise_reader;
841         bool updated_promises = false;
842
843         bool updated_mod_order = w_modification_order(curr, &send_fv);
844         Promise *promise = pop_promise_to_resolve(curr);
845
846         if (promise) {
847                 earliest_promise_reader = promise->get_reader(0);
848                 updated_promises = resolve_promise(curr, promise, work);
849         } else
850                 earliest_promise_reader = NULL;
851
852         for (unsigned int i = 0; i < send_fv.size(); i++) {
853                 ModelAction *read = send_fv[i];
854
855                 /* Don't send future values to reads after the Promise we resolve */
856                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
857                         /* Check if future value can be sent immediately */
858                         if (promises_may_allow(curr, read)) {
859                                 add_future_value(curr, read);
860                         } else {
861                                 futurevalues.push_back(PendingFutureValue(curr, read));
862                         }
863                 }
864         }
865
866         /* Check the pending future values */
867         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
868                 struct PendingFutureValue pfv = futurevalues[i];
869                 if (promises_may_allow(pfv.writer, pfv.reader)) {
870                         add_future_value(pfv.writer, pfv.reader);
871                         futurevalues.erase(futurevalues.begin() + i);
872                 }
873         }
874
875         mo_graph->commitChanges();
876         mo_check_promises(curr, false);
877
878         get_thread(curr)->set_return_value(VALUE_NONE);
879         return updated_mod_order || updated_promises;
880 }
881
882 /**
883  * Process a fence ModelAction
884  * @param curr The ModelAction to process
885  * @return True if synchronization was updated
886  */
887 bool ModelExecution::process_fence(ModelAction *curr)
888 {
889         /*
890          * fence-relaxed: no-op
891          * fence-release: only log the occurence (not in this function), for
892          *   use in later synchronization
893          * fence-acquire (this function): search for hypothetical release
894          *   sequences
895          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
896          */
897         bool updated = false;
898         if (curr->is_acquire()) {
899                 action_list_t *list = &action_trace;
900                 action_list_t::reverse_iterator rit;
901                 /* Find X : is_read(X) && X --sb-> curr */
902                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
903                         ModelAction *act = *rit;
904                         if (act == curr)
905                                 continue;
906                         if (act->get_tid() != curr->get_tid())
907                                 continue;
908                         /* Stop at the beginning of the thread */
909                         if (act->is_thread_start())
910                                 break;
911                         /* Stop once we reach a prior fence-acquire */
912                         if (act->is_fence() && act->is_acquire())
913                                 break;
914                         if (!act->is_read())
915                                 continue;
916                         /* read-acquire will find its own release sequences */
917                         if (act->is_acquire())
918                                 continue;
919
920                         /* Establish hypothetical release sequences */
921                         rel_heads_list_t release_heads;
922                         get_release_seq_heads(curr, act, &release_heads);
923                         for (unsigned int i = 0; i < release_heads.size(); i++)
924                                 synchronize(release_heads[i], curr);
925                         if (release_heads.size() != 0)
926                                 updated = true;
927                 }
928         }
929         return updated;
930 }
931
932 /**
933  * @brief Process the current action for thread-related activity
934  *
935  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
936  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
937  * synchronization, etc.  This function is a no-op for non-THREAD actions
938  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
939  *
940  * @param curr The current action
941  * @return True if synchronization was updated or a thread completed
942  */
943 bool ModelExecution::process_thread_action(ModelAction *curr)
944 {
945         bool updated = false;
946
947         switch (curr->get_type()) {
948         case THREAD_CREATE: {
949                 thrd_t *thrd = (thrd_t *)curr->get_location();
950                 struct thread_params *params = (struct thread_params *)curr->get_value();
951                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
952                 curr->set_thread_operand(th);
953                 add_thread(th);
954                 th->set_creation(curr);
955                 /* Promises can be satisfied by children */
956                 for (unsigned int i = 0; i < promises.size(); i++) {
957                         Promise *promise = promises[i];
958                         if (promise->thread_is_available(curr->get_tid()))
959                                 promise->add_thread(th->get_id());
960                 }
961                 break;
962         }
963         case PTHREAD_CREATE: {
964                 thrd_t *thrd = (thrd_t *)curr->get_location();
965                 struct pthread_params *params = (struct pthread_params *)curr->get_value();
966                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
967                 curr->set_thread_operand(th);
968                 add_thread(th);
969                 th->set_creation(curr);
970                 /* Promises can be satisfied by children */
971                 for (unsigned int i = 0; i < promises.size(); i++) {
972                         Promise *promise = promises[i];
973                         if (promise->thread_is_available(curr->get_tid()))
974                                 promise->add_thread(th->get_id());
975                 }
976                 break;
977         }
978         case THREAD_JOIN: {
979                 Thread *blocking = curr->get_thread_operand();
980                 ModelAction *act = get_last_action(blocking->get_id());
981                 synchronize(act, curr);
982                 updated = true; /* trigger rel-seq checks */
983                 break;
984         }
985         case PTHREAD_JOIN: {
986                 Thread *blocking = curr->get_thread_operand();
987                 ModelAction *act = get_last_action(blocking->get_id());
988                 synchronize(act, curr);
989                 updated = true; /* trigger rel-seq checks */
990                 break; // WL: to be add (modified)
991         }
992
993         case THREAD_FINISH: {
994                 Thread *th = get_thread(curr);
995                 /* Wake up any joining threads */
996                 for (unsigned int i = 0; i < get_num_threads(); i++) {
997                         Thread *waiting = get_thread(int_to_id(i));
998                         if (waiting->waiting_on() == th &&
999                                         waiting->get_pending()->is_thread_join())
1000                                 scheduler->wake(waiting);
1001                 }
1002                 th->complete();
1003                 /* Completed thread can't satisfy promises */
1004                 for (unsigned int i = 0; i < promises.size(); i++) {
1005                         Promise *promise = promises[i];
1006                         if (promise->thread_is_available(th->get_id()))
1007                                 if (promise->eliminate_thread(th->get_id()))
1008                                         priv->failed_promise = true;
1009                 }
1010                 updated = true; /* trigger rel-seq checks */
1011                 break;
1012         }
1013         case THREAD_START: {
1014                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1015                 break;
1016         }
1017         default:
1018                 break;
1019         }
1020
1021         return updated;
1022 }
1023
1024 /**
1025  * @brief Process the current action for release sequence fixup activity
1026  *
1027  * Performs model-checker release sequence fixups for the current action,
1028  * forcing a single pending release sequence to break (with a given, potential
1029  * "loose" write) or to complete (i.e., synchronize). If a pending release
1030  * sequence forms a complete release sequence, then we must perform the fixup
1031  * synchronization, mo_graph additions, etc.
1032  *
1033  * @param curr The current action; must be a release sequence fixup action
1034  * @param work_queue The work queue to which to add work items as they are
1035  * generated
1036  */
1037 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1038 {
1039         const ModelAction *write = curr->get_node()->get_relseq_break();
1040         struct release_seq *sequence = pending_rel_seqs.back();
1041         pending_rel_seqs.pop_back();
1042         ASSERT(sequence);
1043         ModelAction *acquire = sequence->acquire;
1044         const ModelAction *rf = sequence->rf;
1045         const ModelAction *release = sequence->release;
1046         ASSERT(acquire);
1047         ASSERT(release);
1048         ASSERT(rf);
1049         ASSERT(release->same_thread(rf));
1050
1051         if (write == NULL) {
1052                 /**
1053                  * @todo Forcing a synchronization requires that we set
1054                  * modification order constraints. For instance, we can't allow
1055                  * a fixup sequence in which two separate read-acquire
1056                  * operations read from the same sequence, where the first one
1057                  * synchronizes and the other doesn't. Essentially, we can't
1058                  * allow any writes to insert themselves between 'release' and
1059                  * 'rf'
1060                  */
1061
1062                 /* Must synchronize */
1063                 if (!synchronize(release, acquire))
1064                         return;
1065
1066                 /* Propagate the changed clock vector */
1067                 propagate_clockvector(acquire, work_queue);
1068         } else {
1069                 /* Break release sequence with new edges:
1070                  *   release --mo--> write --mo--> rf */
1071                 mo_graph->addEdge(release, write);
1072                 mo_graph->addEdge(write, rf);
1073         }
1074
1075         /* See if we have realized a data race */
1076         checkDataRaces();
1077 }
1078
1079 /**
1080  * Initialize the current action by performing one or more of the following
1081  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1082  * in the NodeStack, manipulating backtracking sets, allocating and
1083  * initializing clock vectors, and computing the promises to fulfill.
1084  *
1085  * @param curr The current action, as passed from the user context; may be
1086  * freed/invalidated after the execution of this function, with a different
1087  * action "returned" its place (pass-by-reference)
1088  * @return True if curr is a newly-explored action; false otherwise
1089  */
1090 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1091 {
1092         ModelAction *newcurr;
1093
1094         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1095                 newcurr = process_rmw(*curr);
1096                 delete *curr;
1097
1098                 if (newcurr->is_rmw())
1099                         compute_promises(newcurr);
1100
1101                 *curr = newcurr;
1102                 return false;
1103         }
1104
1105         (*curr)->set_seq_number(get_next_seq_num());
1106
1107         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1108         if (newcurr) {
1109                 /* First restore type and order in case of RMW operation */
1110                 if ((*curr)->is_rmwr())
1111                         newcurr->copy_typeandorder(*curr);
1112
1113                 ASSERT((*curr)->get_location() == newcurr->get_location());
1114                 newcurr->copy_from_new(*curr);
1115
1116                 /* Discard duplicate ModelAction; use action from NodeStack */
1117                 delete *curr;
1118
1119                 /* Always compute new clock vector */
1120                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1121
1122                 *curr = newcurr;
1123                 return false; /* Action was explored previously */
1124         } else {
1125                 newcurr = *curr;
1126
1127                 /* Always compute new clock vector */
1128                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1129
1130                 /* Assign most recent release fence */
1131                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1132
1133                 /*
1134                  * Perform one-time actions when pushing new ModelAction onto
1135                  * NodeStack
1136                  */
1137                 if (newcurr->is_write())
1138                         compute_promises(newcurr);
1139                 else if (newcurr->is_relseq_fixup())
1140                         compute_relseq_breakwrites(newcurr);
1141                 else if (newcurr->is_wait())
1142                         newcurr->get_node()->set_misc_max(2);
1143                 else if (newcurr->is_notify_one()) {
1144                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1145                 }
1146                 return true; /* This was a new ModelAction */
1147         }
1148 }
1149
1150 /**
1151  * @brief Establish reads-from relation between two actions
1152  *
1153  * Perform basic operations involved with establishing a concrete rf relation,
1154  * including setting the ModelAction data and checking for release sequences.
1155  *
1156  * @param act The action that is reading (must be a read)
1157  * @param rf The action from which we are reading (must be a write)
1158  *
1159  * @return True if this read established synchronization
1160  */
1161
1162 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1163 {
1164         ASSERT(rf);
1165         ASSERT(rf->is_write());
1166
1167         act->set_read_from(rf);
1168         if (act->is_acquire()) {
1169                 rel_heads_list_t release_heads;
1170                 get_release_seq_heads(act, act, &release_heads);
1171                 int num_heads = release_heads.size();
1172                 for (unsigned int i = 0; i < release_heads.size(); i++)
1173                         if (!synchronize(release_heads[i], act))
1174                                 num_heads--;
1175                 return num_heads > 0;
1176         }
1177         return false;
1178 }
1179
1180 /**
1181  * @brief Synchronizes two actions
1182  *
1183  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1184  * This function performs the synchronization as well as providing other hooks
1185  * for other checks along with synchronization.
1186  *
1187  * @param first The left-hand side of the synchronizes-with relation
1188  * @param second The right-hand side of the synchronizes-with relation
1189  * @return True if the synchronization was successful (i.e., was consistent
1190  * with the execution order); false otherwise
1191  */
1192 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1193 {
1194         if (*second < *first) {
1195                 set_bad_synchronization();
1196                 return false;
1197         }
1198         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1199         return second->synchronize_with(first);
1200 }
1201
1202 /**
1203  * Check promises and eliminate potentially-satisfying threads when a thread is
1204  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1205  * no longer satisfy a promise generated from that thread.
1206  *
1207  * @param blocker The thread on which a thread is waiting
1208  * @param waiting The waiting thread
1209  */
1210 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1211 {
1212         for (unsigned int i = 0; i < promises.size(); i++) {
1213                 Promise *promise = promises[i];
1214                 if (!promise->thread_is_available(waiting->get_id()))
1215                         continue;
1216                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1217                         ModelAction *reader = promise->get_reader(j);
1218                         if (reader->get_tid() != blocker->get_id())
1219                                 continue;
1220                         if (promise->eliminate_thread(waiting->get_id())) {
1221                                 /* Promise has failed */
1222                                 priv->failed_promise = true;
1223                         } else {
1224                                 /* Only eliminate the 'waiting' thread once */
1225                                 return;
1226                         }
1227                 }
1228         }
1229 }
1230
1231 /**
1232  * @brief Check whether a model action is enabled.
1233  *
1234  * Checks whether an operation would be successful (i.e., is a lock already
1235  * locked, or is the joined thread already complete).
1236  *
1237  * For yield-blocking, yields are never enabled.
1238  *
1239  * @param curr is the ModelAction to check whether it is enabled.
1240  * @return a bool that indicates whether the action is enabled.
1241  */
1242 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1243         if (curr->is_lock()) {
1244                 std::mutex *lock = curr->get_mutex();
1245                 struct std::mutex_state *state = lock->get_state();
1246                 if (state->locked)
1247                         return false;
1248         } else if (curr->is_thread_join()) {
1249                 Thread *blocking = curr->get_thread_operand();
1250                 if (!blocking->is_complete()) {
1251                         thread_blocking_check_promises(blocking, get_thread(curr));
1252                         return false;
1253                 }
1254         } else if (params->yieldblock && curr->is_yield()) {
1255                 return false;
1256         }
1257
1258         return true;
1259 }
1260
1261 /**
1262  * This is the heart of the model checker routine. It performs model-checking
1263  * actions corresponding to a given "current action." Among other processes, it
1264  * calculates reads-from relationships, updates synchronization clock vectors,
1265  * forms a memory_order constraints graph, and handles replay/backtrack
1266  * execution when running permutations of previously-observed executions.
1267  *
1268  * @param curr The current action to process
1269  * @return The ModelAction that is actually executed; may be different than
1270  * curr
1271  */
1272 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1273 {
1274         ASSERT(curr);
1275         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1276         bool newly_explored = initialize_curr_action(&curr);
1277
1278         DBG();
1279
1280         wake_up_sleeping_actions(curr);
1281
1282         /* Compute fairness information for CHESS yield algorithm */
1283         if (params->yieldon) {
1284                 curr->get_node()->update_yield(scheduler);
1285         }
1286
1287         /* Add the action to lists before any other model-checking tasks */
1288         if (!second_part_of_rmw)
1289                 add_action_to_lists(curr);
1290
1291         /* Build may_read_from set for newly-created actions */
1292         if (newly_explored && curr->is_read())
1293                 build_may_read_from(curr);
1294
1295         /* Initialize work_queue with the "current action" work */
1296         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1297         while (!work_queue.empty() && !has_asserted()) {
1298                 WorkQueueEntry work = work_queue.front();
1299                 work_queue.pop_front();
1300
1301                 switch (work.type) {
1302                 case WORK_CHECK_CURR_ACTION: {
1303                         ModelAction *act = work.action;
1304                         bool update = false; /* update this location's release seq's */
1305                         bool update_all = false; /* update all release seq's */
1306
1307                         if (process_thread_action(curr))
1308                                 update_all = true;
1309
1310                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1311                                 update = true;
1312
1313                         if (act->is_write() && process_write(act, &work_queue))
1314                                 update = true;
1315
1316                         if (act->is_fence() && process_fence(act))
1317                                 update_all = true;
1318
1319                         if (act->is_mutex_op() && process_mutex(act))
1320                                 update_all = true;
1321
1322                         if (act->is_relseq_fixup())
1323                                 process_relseq_fixup(curr, &work_queue);
1324
1325                         if (update_all)
1326                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1327                         else if (update)
1328                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1329                         break;
1330                 }
1331                 case WORK_CHECK_RELEASE_SEQ:
1332                         resolve_release_sequences(work.location, &work_queue);
1333                         break;
1334                 case WORK_CHECK_MO_EDGES: {
1335                         /** @todo Complete verification of work_queue */
1336                         ModelAction *act = work.action;
1337                         bool updated = false;
1338
1339                         if (act->is_read()) {
1340                                 const ModelAction *rf = act->get_reads_from();
1341                                 const Promise *promise = act->get_reads_from_promise();
1342                                 if (rf) {
1343                                         if (r_modification_order(act, rf))
1344                                                 updated = true;
1345                                         if (act->is_seqcst()) {
1346                                                 ModelAction *last_sc_write = get_last_seq_cst_write(act);
1347                                                 if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
1348                                                         set_bad_sc_read();
1349                                                 }
1350                                         }
1351                                 } else if (promise) {
1352                                         if (r_modification_order(act, promise))
1353                                                 updated = true;
1354                                 }
1355                         }
1356                         if (act->is_write()) {
1357                                 if (w_modification_order(act, NULL))
1358                                         updated = true;
1359                         }
1360                         mo_graph->commitChanges();
1361
1362                         if (updated)
1363                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1364                         break;
1365                 }
1366                 default:
1367                         ASSERT(false);
1368                         break;
1369                 }
1370         }
1371
1372         check_curr_backtracking(curr);
1373         set_backtracking(curr);
1374         return curr;
1375 }
1376
1377 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1378 {
1379         Node *currnode = curr->get_node();
1380         Node *parnode = currnode->get_parent();
1381
1382         if ((parnode && !parnode->backtrack_empty()) ||
1383                          !currnode->misc_empty() ||
1384                          !currnode->read_from_empty() ||
1385                          !currnode->promise_empty() ||
1386                          !currnode->relseq_break_empty()) {
1387                 set_latest_backtrack(curr);
1388         }
1389 }
1390
1391 bool ModelExecution::promises_expired() const
1392 {
1393         for (unsigned int i = 0; i < promises.size(); i++) {
1394                 Promise *promise = promises[i];
1395                 if (promise->get_expiration() < priv->used_sequence_numbers)
1396                         return true;
1397         }
1398         return false;
1399 }
1400
1401 /**
1402  * This is the strongest feasibility check available.
1403  * @return whether the current trace (partial or complete) must be a prefix of
1404  * a feasible trace.
1405  */
1406 bool ModelExecution::isfeasibleprefix() const
1407 {
1408         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1409 }
1410
1411 /**
1412  * Print disagnostic information about an infeasible execution
1413  * @param prefix A string to prefix the output with; if NULL, then a default
1414  * message prefix will be provided
1415  */
1416 void ModelExecution::print_infeasibility(const char *prefix) const
1417 {
1418         char buf[100];
1419         char *ptr = buf;
1420         if (mo_graph->checkForCycles())
1421                 ptr += sprintf(ptr, "[mo cycle]");
1422         if (priv->failed_promise || priv->hard_failed_promise)
1423                 ptr += sprintf(ptr, "[failed promise]");
1424         if (priv->too_many_reads)
1425                 ptr += sprintf(ptr, "[too many reads]");
1426         if (priv->no_valid_reads)
1427                 ptr += sprintf(ptr, "[no valid reads-from]");
1428         if (priv->bad_synchronization)
1429                 ptr += sprintf(ptr, "[bad sw ordering]");
1430         if (priv->bad_sc_read)
1431                 ptr += sprintf(ptr, "[bad sc read]");
1432         if (promises_expired())
1433                 ptr += sprintf(ptr, "[promise expired]");
1434         if (promises.size() != 0)
1435                 ptr += sprintf(ptr, "[unresolved promise]");
1436         if (ptr != buf)
1437                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
1438 }
1439
1440 /**
1441  * Returns whether the current completed trace is feasible, except for pending
1442  * release sequences.
1443  */
1444 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1445 {
1446         return !is_infeasible() && promises.size() == 0 && ! priv->failed_promise;
1447
1448 }
1449
1450 /**
1451  * Check if the current partial trace is infeasible. Does not check any
1452  * end-of-execution flags, which might rule out the execution. Thus, this is
1453  * useful only for ruling an execution as infeasible.
1454  * @return whether the current partial trace is infeasible.
1455  */
1456 bool ModelExecution::is_infeasible() const
1457 {
1458         return mo_graph->checkForCycles() ||
1459                 priv->no_valid_reads ||
1460                 priv->too_many_reads ||
1461                 priv->bad_synchronization ||
1462                 priv->bad_sc_read ||
1463                 priv->hard_failed_promise ||
1464                 promises_expired();
1465 }
1466
1467 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1468 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1469         ModelAction *lastread = get_last_action(act->get_tid());
1470         lastread->process_rmw(act);
1471         if (act->is_rmw()) {
1472                 if (lastread->get_reads_from())
1473                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1474                 else
1475                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1476                 mo_graph->commitChanges();
1477         }
1478         return lastread;
1479 }
1480
1481 /**
1482  * A helper function for ModelExecution::check_recency, to check if the current
1483  * thread is able to read from a different write/promise for 'params.maxreads'
1484  * number of steps and if that write/promise should become visible (i.e., is
1485  * ordered later in the modification order). This helps model memory liveness.
1486  *
1487  * @param curr The current action. Must be a read.
1488  * @param rf The write/promise from which we plan to read
1489  * @param other_rf The write/promise from which we may read
1490  * @return True if we were able to read from other_rf for params.maxreads steps
1491  */
1492 template <typename T, typename U>
1493 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1494 {
1495         /* Need a different write/promise */
1496         if (other_rf->equals(rf))
1497                 return false;
1498
1499         /* Only look for "newer" writes/promises */
1500         if (!mo_graph->checkReachable(rf, other_rf))
1501                 return false;
1502
1503         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1504         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1505         action_list_t::reverse_iterator rit = list->rbegin();
1506         ASSERT((*rit) == curr);
1507         /* Skip past curr */
1508         rit++;
1509
1510         /* Does this write/promise work for everyone? */
1511         for (int i = 0; i < params->maxreads; i++, rit++) {
1512                 ModelAction *act = *rit;
1513                 if (!act->may_read_from(other_rf))
1514                         return false;
1515         }
1516         return true;
1517 }
1518
1519 /**
1520  * Checks whether a thread has read from the same write or Promise for too many
1521  * times without seeing the effects of a later write/Promise.
1522  *
1523  * Basic idea:
1524  * 1) there must a different write/promise that we could read from,
1525  * 2) we must have read from the same write/promise in excess of maxreads times,
1526  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1527  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1528  *
1529  * If so, we decide that the execution is no longer feasible.
1530  *
1531  * @param curr The current action. Must be a read.
1532  * @param rf The ModelAction/Promise from which we might read.
1533  * @return True if the read should succeed; false otherwise
1534  */
1535 template <typename T>
1536 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1537 {
1538         if (!params->maxreads)
1539                 return true;
1540
1541         //NOTE: Next check is just optimization, not really necessary....
1542         if (curr->get_node()->get_read_from_past_size() +
1543                         curr->get_node()->get_read_from_promise_size() <= 1)
1544                 return true;
1545
1546         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1547         int tid = id_to_int(curr->get_tid());
1548         ASSERT(tid < (int)thrd_lists->size());
1549         action_list_t *list = &(*thrd_lists)[tid];
1550         action_list_t::reverse_iterator rit = list->rbegin();
1551         ASSERT((*rit) == curr);
1552         /* Skip past curr */
1553         rit++;
1554
1555         action_list_t::reverse_iterator ritcopy = rit;
1556         /* See if we have enough reads from the same value */
1557         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1558                 if (ritcopy == list->rend())
1559                         return true;
1560                 ModelAction *act = *ritcopy;
1561                 if (!act->is_read())
1562                         return true;
1563                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1564                         return true;
1565                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1566                         return true;
1567                 if (act->get_node()->get_read_from_past_size() +
1568                                 act->get_node()->get_read_from_promise_size() <= 1)
1569                         return true;
1570         }
1571         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1572                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1573                 if (should_read_instead(curr, rf, write))
1574                         return false; /* liveness failure */
1575         }
1576         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1577                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1578                 if (should_read_instead(curr, rf, promise))
1579                         return false; /* liveness failure */
1580         }
1581         return true;
1582 }
1583
1584 /**
1585  * @brief Updates the mo_graph with the constraints imposed from the current
1586  * read.
1587  *
1588  * Basic idea is the following: Go through each other thread and find
1589  * the last action that happened before our read.  Two cases:
1590  *
1591  * -# The action is a write: that write must either occur before
1592  * the write we read from or be the write we read from.
1593  * -# The action is a read: the write that that action read from
1594  * must occur before the write we read from or be the same write.
1595  *
1596  * @param curr The current action. Must be a read.
1597  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1598  * @return True if modification order edges were added; false otherwise
1599  */
1600 template <typename rf_type>
1601 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1602 {
1603         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1604         unsigned int i;
1605         bool added = false;
1606         ASSERT(curr->is_read());
1607
1608         /* Last SC fence in the current thread */
1609         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1610         ModelAction *last_sc_write = NULL;
1611         if (curr->is_seqcst())
1612                 last_sc_write = get_last_seq_cst_write(curr);
1613
1614         /* Iterate over all threads */
1615         for (i = 0; i < thrd_lists->size(); i++) {
1616                 /* Last SC fence in thread i */
1617                 ModelAction *last_sc_fence_thread_local = NULL;
1618                 if (int_to_id((int)i) != curr->get_tid())
1619                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1620
1621                 /* Last SC fence in thread i, before last SC fence in current thread */
1622                 ModelAction *last_sc_fence_thread_before = NULL;
1623                 if (last_sc_fence_local)
1624                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1625
1626                 /* Iterate over actions in thread, starting from most recent */
1627                 action_list_t *list = &(*thrd_lists)[i];
1628                 action_list_t::reverse_iterator rit;
1629                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1630                         ModelAction *act = *rit;
1631
1632                         /* Skip curr */
1633                         if (act == curr)
1634                                 continue;
1635                         /* Don't want to add reflexive edges on 'rf' */
1636                         if (act->equals(rf)) {
1637                                 if (act->happens_before(curr))
1638                                         break;
1639                                 else
1640                                         continue;
1641                         }
1642
1643                         if (act->is_write()) {
1644                                 /* C++, Section 29.3 statement 5 */
1645                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1646                                                 *act < *last_sc_fence_thread_local) {
1647                                         added = mo_graph->addEdge(act, rf) || added;
1648                                         break;
1649                                 }
1650                                 /* C++, Section 29.3 statement 4 */
1651                                 else if (act->is_seqcst() && last_sc_fence_local &&
1652                                                 *act < *last_sc_fence_local) {
1653                                         added = mo_graph->addEdge(act, rf) || added;
1654                                         break;
1655                                 }
1656                                 /* C++, Section 29.3 statement 6 */
1657                                 else if (last_sc_fence_thread_before &&
1658                                                 *act < *last_sc_fence_thread_before) {
1659                                         added = mo_graph->addEdge(act, rf) || added;
1660                                         break;
1661                                 }
1662                         }
1663
1664                         /*
1665                          * Include at most one act per-thread that "happens
1666                          * before" curr
1667                          */
1668                         if (act->happens_before(curr)) {
1669                                 if (act->is_write()) {
1670                                         added = mo_graph->addEdge(act, rf) || added;
1671                                 } else {
1672                                         const ModelAction *prevrf = act->get_reads_from();
1673                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1674                                         if (prevrf) {
1675                                                 if (!prevrf->equals(rf))
1676                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1677                                         } else if (!prevrf_promise->equals(rf)) {
1678                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1679                                         }
1680                                 }
1681                                 break;
1682                         }
1683                 }
1684         }
1685
1686         /*
1687          * All compatible, thread-exclusive promises must be ordered after any
1688          * concrete loads from the same thread
1689          */
1690         for (unsigned int i = 0; i < promises.size(); i++)
1691                 if (promises[i]->is_compatible_exclusive(curr))
1692                         added = mo_graph->addEdge(rf, promises[i]) || added;
1693
1694         return added;
1695 }
1696
1697 /**
1698  * Updates the mo_graph with the constraints imposed from the current write.
1699  *
1700  * Basic idea is the following: Go through each other thread and find
1701  * the lastest action that happened before our write.  Two cases:
1702  *
1703  * (1) The action is a write => that write must occur before
1704  * the current write
1705  *
1706  * (2) The action is a read => the write that that action read from
1707  * must occur before the current write.
1708  *
1709  * This method also handles two other issues:
1710  *
1711  * (I) Sequential Consistency: Making sure that if the current write is
1712  * seq_cst, that it occurs after the previous seq_cst write.
1713  *
1714  * (II) Sending the write back to non-synchronizing reads.
1715  *
1716  * @param curr The current action. Must be a write.
1717  * @param send_fv A vector for stashing reads to which we may pass our future
1718  * value. If NULL, then don't record any future values.
1719  * @return True if modification order edges were added; false otherwise
1720  */
1721 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1722 {
1723         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1724         unsigned int i;
1725         bool added = false;
1726         ASSERT(curr->is_write());
1727
1728         if (curr->is_seqcst()) {
1729                 /* We have to at least see the last sequentially consistent write,
1730                          so we are initialized. */
1731                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1732                 if (last_seq_cst != NULL) {
1733                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1734                 }
1735         }
1736
1737         /* Last SC fence in the current thread */
1738         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1739
1740         /* Iterate over all threads */
1741         for (i = 0; i < thrd_lists->size(); i++) {
1742                 /* Last SC fence in thread i, before last SC fence in current thread */
1743                 ModelAction *last_sc_fence_thread_before = NULL;
1744                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1745                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1746
1747                 /* Iterate over actions in thread, starting from most recent */
1748                 action_list_t *list = &(*thrd_lists)[i];
1749                 action_list_t::reverse_iterator rit;
1750                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1751                         ModelAction *act = *rit;
1752                         if (act == curr) {
1753                                 /*
1754                                  * 1) If RMW and it actually read from something, then we
1755                                  * already have all relevant edges, so just skip to next
1756                                  * thread.
1757                                  *
1758                                  * 2) If RMW and it didn't read from anything, we should
1759                                  * whatever edge we can get to speed up convergence.
1760                                  *
1761                                  * 3) If normal write, we need to look at earlier actions, so
1762                                  * continue processing list.
1763                                  */
1764                                 if (curr->is_rmw()) {
1765                                         if (curr->get_reads_from() != NULL)
1766                                                 break;
1767                                         else
1768                                                 continue;
1769                                 } else
1770                                         continue;
1771                         }
1772
1773                         /* C++, Section 29.3 statement 7 */
1774                         if (last_sc_fence_thread_before && act->is_write() &&
1775                                         *act < *last_sc_fence_thread_before) {
1776                                 added = mo_graph->addEdge(act, curr) || added;
1777                                 break;
1778                         }
1779
1780                         /*
1781                          * Include at most one act per-thread that "happens
1782                          * before" curr
1783                          */
1784                         if (act->happens_before(curr)) {
1785                                 /*
1786                                  * Note: if act is RMW, just add edge:
1787                                  *   act --mo--> curr
1788                                  * The following edge should be handled elsewhere:
1789                                  *   readfrom(act) --mo--> act
1790                                  */
1791                                 if (act->is_write())
1792                                         added = mo_graph->addEdge(act, curr) || added;
1793                                 else if (act->is_read()) {
1794                                         //if previous read accessed a null, just keep going
1795                                         if (act->get_reads_from() == NULL) {
1796                                                 added = mo_graph->addEdge(act->get_reads_from_promise(), curr) || added;
1797                                         } else
1798                                                 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1799                                 }
1800                                 break;
1801                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1802                                                      !act->same_thread(curr)) {
1803                                 /* We have an action that:
1804                                    (1) did not happen before us
1805                                    (2) is a read and we are a write
1806                                    (3) cannot synchronize with us
1807                                    (4) is in a different thread
1808                                    =>
1809                                    that read could potentially read from our write.  Note that
1810                                    these checks are overly conservative at this point, we'll
1811                                    do more checks before actually removing the
1812                                    pendingfuturevalue.
1813
1814                                  */
1815
1816                                 if (send_fv && thin_air_constraint_may_allow(curr, act) && check_coherence_promise(curr, act)) {
1817                                         if (!is_infeasible())
1818                                                 send_fv->push_back(act);
1819                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1820                                                 add_future_value(curr, act);
1821                                 }
1822                         }
1823                 }
1824         }
1825
1826         /*
1827          * All compatible, thread-exclusive promises must be ordered after any
1828          * concrete stores to the same thread, or else they can be merged with
1829          * this store later
1830          */
1831         for (unsigned int i = 0; i < promises.size(); i++)
1832                 if (promises[i]->is_compatible_exclusive(curr))
1833                         added = mo_graph->addEdge(curr, promises[i]) || added;
1834
1835         return added;
1836 }
1837
1838 //This procedure uses cohere to prune future values that are
1839 //guaranteed to generate a coherence violation.
1840 //
1841 //need to see if there is (1) a promise for thread write, (2)
1842 //the promise is sb before write, (3) the promise can only be
1843 //resolved by the thread read, and (4) the promise has same
1844 //location as read/write
1845
1846 bool ModelExecution::check_coherence_promise(const ModelAction * write, const ModelAction *read) {
1847         thread_id_t write_tid=write->get_tid();
1848         for(unsigned int i = promises.size(); i>0; i--) {
1849                 Promise *pr=promises[i-1];
1850                 if (!pr->same_location(write))
1851                         continue;
1852                 //the reading thread is the only thread that can resolve the promise
1853                 if (pr->get_num_was_available_threads()==1 && pr->thread_was_available(read->get_tid())) {
1854                         for(unsigned int j=0;j<pr->get_num_readers();j++) {
1855                                 ModelAction *prreader=pr->get_reader(j);
1856                                 //the writing thread reads from the promise before the write
1857                                 if (prreader->get_tid()==write_tid &&
1858                                                 (*prreader)<(*write)) {
1859                                         if ((*read)>(*prreader)) {
1860                                                 //check that we don't have a read between the read and promise
1861                                                 //from the same thread as read
1862                                                 bool okay=false;
1863                                                 for(const ModelAction *tmp=read;tmp!=prreader;) {
1864                                                         tmp=tmp->get_node()->get_parent()->get_action();
1865                                                         if (tmp->is_read() && tmp->same_thread(read)) {
1866                                                                 okay=true;
1867                                                                 break;
1868                                                         }
1869                                                 }
1870                                                 if (okay)
1871                                                         continue;
1872                                         }
1873                                         return false;
1874                                 }
1875                         }
1876                 }
1877         }
1878         return true;
1879 }
1880
1881
1882 /** Arbitrary reads from the future are not allowed.  Section 29.3
1883  * part 9 places some constraints.  This method checks one result of constraint
1884  * constraint.  Others require compiler support. */
1885 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1886 {
1887         if (!writer->is_rmw())
1888                 return true;
1889
1890         if (!reader->is_rmw())
1891                 return true;
1892
1893         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1894                 if (search == reader)
1895                         return false;
1896                 if (search->get_tid() == reader->get_tid() &&
1897                                 search->happens_before(reader))
1898                         break;
1899         }
1900
1901         return true;
1902 }
1903
1904 /**
1905  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1906  * some constraints. This method checks one the following constraint (others
1907  * require compiler support):
1908  *
1909  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1910  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1911  */
1912 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1913 {
1914         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1915         unsigned int i;
1916         /* Iterate over all threads */
1917         for (i = 0; i < thrd_lists->size(); i++) {
1918                 const ModelAction *write_after_read = NULL;
1919
1920                 /* Iterate over actions in thread, starting from most recent */
1921                 action_list_t *list = &(*thrd_lists)[i];
1922                 action_list_t::reverse_iterator rit;
1923                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1924                         ModelAction *act = *rit;
1925
1926                         /* Don't disallow due to act == reader */
1927                         if (!reader->happens_before(act) || reader == act)
1928                                 break;
1929                         else if (act->is_write())
1930                                 write_after_read = act;
1931                         else if (act->is_read() && act->get_reads_from() != NULL)
1932                                 write_after_read = act->get_reads_from();
1933                 }
1934
1935                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1936                         return false;
1937         }
1938         return true;
1939 }
1940
1941 /**
1942  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1943  * The ModelAction under consideration is expected to be taking part in
1944  * release/acquire synchronization as an object of the "reads from" relation.
1945  * Note that this can only provide release sequence support for RMW chains
1946  * which do not read from the future, as those actions cannot be traced until
1947  * their "promise" is fulfilled. Similarly, we may not even establish the
1948  * presence of a release sequence with certainty, as some modification order
1949  * constraints may be decided further in the future. Thus, this function
1950  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1951  * and a boolean representing certainty.
1952  *
1953  * @param rf The action that might be part of a release sequence. Must be a
1954  * write.
1955  * @param release_heads A pass-by-reference style return parameter. After
1956  * execution of this function, release_heads will contain the heads of all the
1957  * relevant release sequences, if any exists with certainty
1958  * @param pending A pass-by-reference style return parameter which is only used
1959  * when returning false (i.e., uncertain). Returns most information regarding
1960  * an uncertain release sequence, including any write operations that might
1961  * break the sequence.
1962  * @return true, if the ModelExecution is certain that release_heads is complete;
1963  * false otherwise
1964  */
1965 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1966                 rel_heads_list_t *release_heads,
1967                 struct release_seq *pending) const
1968 {
1969         /* Only check for release sequences if there are no cycles */
1970         if (mo_graph->checkForCycles())
1971                 return false;
1972
1973         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1974                 ASSERT(rf->is_write());
1975
1976                 if (rf->is_release())
1977                         release_heads->push_back(rf);
1978                 else if (rf->get_last_fence_release())
1979                         release_heads->push_back(rf->get_last_fence_release());
1980                 if (!rf->is_rmw())
1981                         break; /* End of RMW chain */
1982
1983                 /** @todo Need to be smarter here...  In the linux lock
1984                  * example, this will run to the beginning of the program for
1985                  * every acquire. */
1986                 /** @todo The way to be smarter here is to keep going until 1
1987                  * thread has a release preceded by an acquire and you've seen
1988                  *       both. */
1989
1990                 /* acq_rel RMW is a sufficient stopping condition */
1991                 if (rf->is_acquire() && rf->is_release())
1992                         return true; /* complete */
1993         };
1994         if (!rf) {
1995                 /* read from future: need to settle this later */
1996                 pending->rf = NULL;
1997                 return false; /* incomplete */
1998         }
1999
2000         if (rf->is_release())
2001                 return true; /* complete */
2002
2003         /* else relaxed write
2004          * - check for fence-release in the same thread (29.8, stmt. 3)
2005          * - check modification order for contiguous subsequence
2006          *   -> rf must be same thread as release */
2007
2008         const ModelAction *fence_release = rf->get_last_fence_release();
2009         /* Synchronize with a fence-release unconditionally; we don't need to
2010          * find any more "contiguous subsequence..." for it */
2011         if (fence_release)
2012                 release_heads->push_back(fence_release);
2013
2014         int tid = id_to_int(rf->get_tid());
2015         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
2016         action_list_t *list = &(*thrd_lists)[tid];
2017         action_list_t::const_reverse_iterator rit;
2018
2019         /* Find rf in the thread list */
2020         rit = std::find(list->rbegin(), list->rend(), rf);
2021         ASSERT(rit != list->rend());
2022
2023         /* Find the last {write,fence}-release */
2024         for (; rit != list->rend(); rit++) {
2025                 if (fence_release && *(*rit) < *fence_release)
2026                         break;
2027                 if ((*rit)->is_release())
2028                         break;
2029         }
2030         if (rit == list->rend()) {
2031                 /* No write-release in this thread */
2032                 return true; /* complete */
2033         } else if (fence_release && *(*rit) < *fence_release) {
2034                 /* The fence-release is more recent (and so, "stronger") than
2035                  * the most recent write-release */
2036                 return true; /* complete */
2037         } /* else, need to establish contiguous release sequence */
2038         ModelAction *release = *rit;
2039
2040         ASSERT(rf->same_thread(release));
2041
2042         pending->writes.clear();
2043
2044         bool certain = true;
2045         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2046                 if (id_to_int(rf->get_tid()) == (int)i)
2047                         continue;
2048                 list = &(*thrd_lists)[i];
2049
2050                 /* Can we ensure no future writes from this thread may break
2051                  * the release seq? */
2052                 bool future_ordered = false;
2053
2054                 ModelAction *last = get_last_action(int_to_id(i));
2055                 Thread *th = get_thread(int_to_id(i));
2056                 if ((last && rf->happens_before(last)) ||
2057                                 !is_enabled(th) ||
2058                                 th->is_complete())
2059                         future_ordered = true;
2060
2061                 ASSERT(!th->is_model_thread() || future_ordered);
2062
2063                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2064                         const ModelAction *act = *rit;
2065                         /* Reach synchronization -> this thread is complete */
2066                         if (act->happens_before(release))
2067                                 break;
2068                         if (rf->happens_before(act)) {
2069                                 future_ordered = true;
2070                                 continue;
2071                         }
2072
2073                         /* Only non-RMW writes can break release sequences */
2074                         if (!act->is_write() || act->is_rmw())
2075                                 continue;
2076
2077                         /* Check modification order */
2078                         if (mo_graph->checkReachable(rf, act)) {
2079                                 /* rf --mo--> act */
2080                                 future_ordered = true;
2081                                 continue;
2082                         }
2083                         if (mo_graph->checkReachable(act, release))
2084                                 /* act --mo--> release */
2085                                 break;
2086                         if (mo_graph->checkReachable(release, act) &&
2087                                       mo_graph->checkReachable(act, rf)) {
2088                                 /* release --mo-> act --mo--> rf */
2089                                 return true; /* complete */
2090                         }
2091                         /* act may break release sequence */
2092                         pending->writes.push_back(act);
2093                         certain = false;
2094                 }
2095                 if (!future_ordered)
2096                         certain = false; /* This thread is uncertain */
2097         }
2098
2099         if (certain) {
2100                 release_heads->push_back(release);
2101                 pending->writes.clear();
2102         } else {
2103                 pending->release = release;
2104                 pending->rf = rf;
2105         }
2106         return certain;
2107 }
2108
2109 /**
2110  * An interface for getting the release sequence head(s) with which a
2111  * given ModelAction must synchronize. This function only returns a non-empty
2112  * result when it can locate a release sequence head with certainty. Otherwise,
2113  * it may mark the internal state of the ModelExecution so that it will handle
2114  * the release sequence at a later time, causing @a acquire to update its
2115  * synchronization at some later point in execution.
2116  *
2117  * @param acquire The 'acquire' action that may synchronize with a release
2118  * sequence
2119  * @param read The read action that may read from a release sequence; this may
2120  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2121  * when 'acquire' is a fence-acquire)
2122  * @param release_heads A pass-by-reference return parameter. Will be filled
2123  * with the head(s) of the release sequence(s), if they exists with certainty.
2124  * @see ModelExecution::release_seq_heads
2125  */
2126 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2127                 ModelAction *read, rel_heads_list_t *release_heads)
2128 {
2129         const ModelAction *rf = read->get_reads_from();
2130         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2131         sequence->acquire = acquire;
2132         sequence->read = read;
2133
2134         if (!release_seq_heads(rf, release_heads, sequence)) {
2135                 /* add act to 'lazy checking' list */
2136                 pending_rel_seqs.push_back(sequence);
2137         } else {
2138                 snapshot_free(sequence);
2139         }
2140 }
2141
2142 /**
2143  * @brief Propagate a modified clock vector to actions later in the execution
2144  * order
2145  *
2146  * After an acquire operation lazily completes a release-sequence
2147  * synchronization, we must update all clock vectors for operations later than
2148  * the acquire in the execution order.
2149  *
2150  * @param acquire The ModelAction whose clock vector must be propagated
2151  * @param work The work queue to which we can add work items, if this
2152  * propagation triggers more updates (e.g., to the modification order)
2153  */
2154 void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
2155 {
2156         /* Re-check all pending release sequences */
2157         work->push_back(CheckRelSeqWorkEntry(NULL));
2158         /* Re-check read-acquire for mo_graph edges */
2159         work->push_back(MOEdgeWorkEntry(acquire));
2160
2161         /* propagate synchronization to later actions */
2162         action_list_t::reverse_iterator rit = action_trace.rbegin();
2163         for (; (*rit) != acquire; rit++) {
2164                 ModelAction *propagate = *rit;
2165                 if (acquire->happens_before(propagate)) {
2166                         synchronize(acquire, propagate);
2167                         /* Re-check 'propagate' for mo_graph edges */
2168                         work->push_back(MOEdgeWorkEntry(propagate));
2169                 }
2170         }
2171 }
2172
2173 /**
2174  * Attempt to resolve all stashed operations that might synchronize with a
2175  * release sequence for a given location. This implements the "lazy" portion of
2176  * determining whether or not a release sequence was contiguous, since not all
2177  * modification order information is present at the time an action occurs.
2178  *
2179  * @param location The location/object that should be checked for release
2180  * sequence resolutions. A NULL value means to check all locations.
2181  * @param work_queue The work queue to which to add work items as they are
2182  * generated
2183  * @return True if any updates occurred (new synchronization, new mo_graph
2184  * edges)
2185  */
2186 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2187 {
2188         bool updated = false;
2189         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2190         while (it != pending_rel_seqs.end()) {
2191                 struct release_seq *pending = *it;
2192                 ModelAction *acquire = pending->acquire;
2193                 const ModelAction *read = pending->read;
2194
2195                 /* Only resolve sequences on the given location, if provided */
2196                 if (location && read->get_location() != location) {
2197                         it++;
2198                         continue;
2199                 }
2200
2201                 const ModelAction *rf = read->get_reads_from();
2202                 rel_heads_list_t release_heads;
2203                 bool complete;
2204                 complete = release_seq_heads(rf, &release_heads, pending);
2205                 for (unsigned int i = 0; i < release_heads.size(); i++)
2206                         if (!acquire->has_synchronized_with(release_heads[i]))
2207                                 if (synchronize(release_heads[i], acquire))
2208                                         updated = true;
2209
2210                 if (updated) {
2211                         /* Propagate the changed clock vector */
2212                         propagate_clockvector(acquire, work_queue);
2213                 }
2214                 if (complete) {
2215                         it = pending_rel_seqs.erase(it);
2216                         snapshot_free(pending);
2217                 } else {
2218                         it++;
2219                 }
2220         }
2221
2222         // If we resolved promises or data races, see if we have realized a data race.
2223         checkDataRaces();
2224
2225         return updated;
2226 }
2227
2228 /**
2229  * Performs various bookkeeping operations for the current ModelAction. For
2230  * instance, adds action to the per-object, per-thread action vector and to the
2231  * action trace list of all thread actions.
2232  *
2233  * @param act is the ModelAction to add.
2234  */
2235 void ModelExecution::add_action_to_lists(ModelAction *act)
2236 {
2237         int tid = id_to_int(act->get_tid());
2238         ModelAction *uninit = NULL;
2239         int uninit_id = -1;
2240         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2241         if (list->empty() && act->is_atomic_var()) {
2242                 uninit = get_uninitialized_action(act);
2243                 uninit_id = id_to_int(uninit->get_tid());
2244                 list->push_front(uninit);
2245         }
2246         list->push_back(act);
2247
2248         action_trace.push_back(act);
2249         if (uninit)
2250                 action_trace.push_front(uninit);
2251
2252         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2253         if (tid >= (int)vec->size())
2254                 vec->resize(priv->next_thread_id);
2255         (*vec)[tid].push_back(act);
2256         if (uninit)
2257                 (*vec)[uninit_id].push_front(uninit);
2258
2259         if ((int)thrd_last_action.size() <= tid)
2260                 thrd_last_action.resize(get_num_threads());
2261         thrd_last_action[tid] = act;
2262         if (uninit)
2263                 thrd_last_action[uninit_id] = uninit;
2264
2265         if (act->is_fence() && act->is_release()) {
2266                 if ((int)thrd_last_fence_release.size() <= tid)
2267                         thrd_last_fence_release.resize(get_num_threads());
2268                 thrd_last_fence_release[tid] = act;
2269         }
2270
2271         if (act->is_wait()) {
2272                 void *mutex_loc = (void *) act->get_value();
2273                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2274
2275                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2276                 if (tid >= (int)vec->size())
2277                         vec->resize(priv->next_thread_id);
2278                 (*vec)[tid].push_back(act);
2279         }
2280 }
2281
2282 /**
2283  * @brief Get the last action performed by a particular Thread
2284  * @param tid The thread ID of the Thread in question
2285  * @return The last action in the thread
2286  */
2287 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2288 {
2289         int threadid = id_to_int(tid);
2290         if (threadid < (int)thrd_last_action.size())
2291                 return thrd_last_action[id_to_int(tid)];
2292         else
2293                 return NULL;
2294 }
2295
2296 /**
2297  * @brief Get the last fence release performed by a particular Thread
2298  * @param tid The thread ID of the Thread in question
2299  * @return The last fence release in the thread, if one exists; NULL otherwise
2300  */
2301 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2302 {
2303         int threadid = id_to_int(tid);
2304         if (threadid < (int)thrd_last_fence_release.size())
2305                 return thrd_last_fence_release[id_to_int(tid)];
2306         else
2307                 return NULL;
2308 }
2309
2310 /**
2311  * Gets the last memory_order_seq_cst write (in the total global sequence)
2312  * performed on a particular object (i.e., memory location), not including the
2313  * current action.
2314  * @param curr The current ModelAction; also denotes the object location to
2315  * check
2316  * @return The last seq_cst write
2317  */
2318 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2319 {
2320         void *location = curr->get_location();
2321         action_list_t *list = obj_map.get(location);
2322         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2323         action_list_t::reverse_iterator rit;
2324         for (rit = list->rbegin(); (*rit) != curr; rit++)
2325                 ;
2326         rit++; /* Skip past curr */
2327         for ( ; rit != list->rend(); rit++)
2328                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2329                         return *rit;
2330         return NULL;
2331 }
2332
2333 /**
2334  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2335  * performed in a particular thread, prior to a particular fence.
2336  * @param tid The ID of the thread to check
2337  * @param before_fence The fence from which to begin the search; if NULL, then
2338  * search for the most recent fence in the thread.
2339  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2340  */
2341 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2342 {
2343         /* All fences should have location FENCE_LOCATION */
2344         action_list_t *list = obj_map.get(FENCE_LOCATION);
2345
2346         if (!list)
2347                 return NULL;
2348
2349         action_list_t::reverse_iterator rit = list->rbegin();
2350
2351         if (before_fence) {
2352                 for (; rit != list->rend(); rit++)
2353                         if (*rit == before_fence)
2354                                 break;
2355
2356                 ASSERT(*rit == before_fence);
2357                 rit++;
2358         }
2359
2360         for (; rit != list->rend(); rit++)
2361                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2362                         return *rit;
2363         return NULL;
2364 }
2365
2366 /**
2367  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2368  * location). This function identifies the mutex according to the current
2369  * action, which is presumed to perform on the same mutex.
2370  * @param curr The current ModelAction; also denotes the object location to
2371  * check
2372  * @return The last unlock operation
2373  */
2374 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2375 {
2376         void *location = curr->get_location();
2377         action_list_t *list = obj_map.get(location);
2378         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2379         action_list_t::reverse_iterator rit;
2380         for (rit = list->rbegin(); rit != list->rend(); rit++)
2381                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2382                         return *rit;
2383         return NULL;
2384 }
2385
2386 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2387 {
2388         ModelAction *parent = get_last_action(tid);
2389         if (!parent)
2390                 parent = get_thread(tid)->get_creation();
2391         return parent;
2392 }
2393
2394 /**
2395  * Returns the clock vector for a given thread.
2396  * @param tid The thread whose clock vector we want
2397  * @return Desired clock vector
2398  */
2399 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2400 {
2401         return get_parent_action(tid)->get_cv();
2402 }
2403
2404 /**
2405  * @brief Find the promise (if any) to resolve for the current action and
2406  * remove it from the pending promise vector
2407  * @param curr The current ModelAction. Should be a write.
2408  * @return The Promise to resolve, if any; otherwise NULL
2409  */
2410 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2411 {
2412         for (unsigned int i = 0; i < promises.size(); i++)
2413                 if (curr->get_node()->get_promise(i)) {
2414                         Promise *ret = promises[i];
2415                         promises.erase(promises.begin() + i);
2416                         return ret;
2417                 }
2418         return NULL;
2419 }
2420
2421 /**
2422  * Resolve a Promise with a current write.
2423  * @param write The ModelAction that is fulfilling Promises
2424  * @param promise The Promise to resolve
2425  * @param work The work queue, for adding new fixup work
2426  * @return True if the Promise was successfully resolved; false otherwise
2427  */
2428 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise,
2429                 work_queue_t *work)
2430 {
2431         ModelVector<ModelAction *> actions_to_check;
2432
2433         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2434                 ModelAction *read = promise->get_reader(i);
2435                 if (read_from(read, write)) {
2436                         /* Propagate the changed clock vector */
2437                         propagate_clockvector(read, work);
2438                 }
2439                 actions_to_check.push_back(read);
2440         }
2441         /* Make sure the promise's value matches the write's value */
2442         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2443         if (!mo_graph->resolvePromise(promise, write))
2444                 priv->hard_failed_promise = true;
2445
2446         /**
2447          * @todo  It is possible to end up in an inconsistent state, where a
2448          * "resolved" promise may still be referenced if
2449          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2450          *
2451          * Note that the inconsistency only matters when dumping mo_graph to
2452          * file.
2453          *
2454          * delete promise;
2455          */
2456
2457         //Check whether reading these writes has made threads unable to
2458         //resolve promises
2459         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2460                 ModelAction *read = actions_to_check[i];
2461                 mo_check_promises(read, true);
2462         }
2463
2464         return true;
2465 }
2466
2467 /**
2468  * Compute the set of promises that could potentially be satisfied by this
2469  * action. Note that the set computation actually appears in the Node, not in
2470  * ModelExecution.
2471  * @param curr The ModelAction that may satisfy promises
2472  */
2473 void ModelExecution::compute_promises(ModelAction *curr)
2474 {
2475         for (unsigned int i = 0; i < promises.size(); i++) {
2476                 Promise *promise = promises[i];
2477                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2478                         continue;
2479
2480                 bool satisfy = true;
2481                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2482                         const ModelAction *act = promise->get_reader(j);
2483                         if (act->happens_before(curr) ||
2484                                         act->could_synchronize_with(curr)) {
2485                                 satisfy = false;
2486                                 break;
2487                         }
2488                 }
2489                 if (satisfy)
2490                         curr->get_node()->set_promise(i);
2491         }
2492 }
2493
2494 /** Checks promises in response to change in ClockVector Threads. */
2495 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2496 {
2497         for (unsigned int i = 0; i < promises.size(); i++) {
2498                 Promise *promise = promises[i];
2499                 if (!promise->thread_is_available(tid))
2500                         continue;
2501                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2502                         const ModelAction *act = promise->get_reader(j);
2503                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2504                                         merge_cv->synchronized_since(act)) {
2505                                 if (promise->eliminate_thread(tid)) {
2506                                         /* Promise has failed */
2507                                         priv->failed_promise = true;
2508                                         return;
2509                                 }
2510                         }
2511                 }
2512         }
2513 }
2514
2515 void ModelExecution::check_promises_thread_disabled()
2516 {
2517         for (unsigned int i = 0; i < promises.size(); i++) {
2518                 Promise *promise = promises[i];
2519                 if (promise->has_failed()) {
2520                         priv->failed_promise = true;
2521                         return;
2522                 }
2523         }
2524 }
2525
2526 /**
2527  * @brief Checks promises in response to addition to modification order for
2528  * threads.
2529  *
2530  * We test whether threads are still available for satisfying promises after an
2531  * addition to our modification order constraints. Those that are unavailable
2532  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2533  * that promise has failed.
2534  *
2535  * @param act The ModelAction which updated the modification order
2536  * @param is_read_check Should be true if act is a read and we must check for
2537  * updates to the store from which it read (there is a distinction here for
2538  * RMW's, which are both a load and a store)
2539  */
2540 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2541 {
2542         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2543
2544         for (unsigned int i = 0; i < promises.size(); i++) {
2545                 Promise *promise = promises[i];
2546
2547                 // Is this promise on the same location?
2548                 if (!promise->same_location(write))
2549                         continue;
2550
2551                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2552                         const ModelAction *pread = promise->get_reader(j);
2553                         if (!pread->happens_before(act))
2554                                continue;
2555                         if (mo_graph->checkPromise(write, promise)) {
2556                                 priv->hard_failed_promise = true;
2557                                 return;
2558                         }
2559                         break;
2560                 }
2561
2562                 // Don't do any lookups twice for the same thread
2563                 if (!promise->thread_is_available(act->get_tid()))
2564                         continue;
2565
2566                 if (mo_graph->checkReachable(promise, write)) {
2567                         if (mo_graph->checkPromise(write, promise)) {
2568                                 priv->hard_failed_promise = true;
2569                                 return;
2570                         }
2571                 }
2572         }
2573 }
2574
2575 /**
2576  * Compute the set of writes that may break the current pending release
2577  * sequence. This information is extracted from previou release sequence
2578  * calculations.
2579  *
2580  * @param curr The current ModelAction. Must be a release sequence fixup
2581  * action.
2582  */
2583 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2584 {
2585         if (pending_rel_seqs.empty())
2586                 return;
2587
2588         struct release_seq *pending = pending_rel_seqs.back();
2589         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2590                 const ModelAction *write = pending->writes[i];
2591                 curr->get_node()->add_relseq_break(write);
2592         }
2593
2594         /* NULL means don't break the sequence; just synchronize */
2595         curr->get_node()->add_relseq_break(NULL);
2596 }
2597
2598 /**
2599  * Build up an initial set of all past writes that this 'read' action may read
2600  * from, as well as any previously-observed future values that must still be valid.
2601  *
2602  * @param curr is the current ModelAction that we are exploring; it must be a
2603  * 'read' operation.
2604  */
2605 void ModelExecution::build_may_read_from(ModelAction *curr)
2606 {
2607         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2608         unsigned int i;
2609         ASSERT(curr->is_read());
2610
2611         ModelAction *last_sc_write = NULL;
2612
2613         if (curr->is_seqcst())
2614                 last_sc_write = get_last_seq_cst_write(curr);
2615
2616         /* Iterate over all threads */
2617         for (i = 0; i < thrd_lists->size(); i++) {
2618                 /* Iterate over actions in thread, starting from most recent */
2619                 action_list_t *list = &(*thrd_lists)[i];
2620                 action_list_t::reverse_iterator rit;
2621                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2622                         ModelAction *act = *rit;
2623
2624                         /* Only consider 'write' actions */
2625                         if (!act->is_write() || act == curr)
2626                                 continue;
2627
2628                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2629                         bool allow_read = true;
2630
2631                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2632                                 allow_read = false;
2633                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2634                                 allow_read = false;
2635
2636                         if (allow_read) {
2637                                 /* Only add feasible reads */
2638                                 mo_graph->startChanges();
2639                                 r_modification_order(curr, act);
2640                                 if (!is_infeasible())
2641                                         curr->get_node()->add_read_from_past(act);
2642                                 mo_graph->rollbackChanges();
2643                         }
2644
2645                         /* Include at most one act per-thread that "happens before" curr */
2646                         if (act->happens_before(curr))
2647                                 break;
2648                 }
2649         }
2650
2651         /* Inherit existing, promised future values */
2652         for (i = 0; i < promises.size(); i++) {
2653                 const Promise *promise = promises[i];
2654                 const ModelAction *promise_read = promise->get_reader(0);
2655                 if (promise_read->same_var(curr)) {
2656                         /* Only add feasible future-values */
2657                         mo_graph->startChanges();
2658                         r_modification_order(curr, promise);
2659                         if (!is_infeasible())
2660                                 curr->get_node()->add_read_from_promise(promise_read);
2661                         mo_graph->rollbackChanges();
2662                 }
2663         }
2664
2665         /* We may find no valid may-read-from only if the execution is doomed */
2666         if (!curr->get_node()->read_from_size()) {
2667                 priv->no_valid_reads = true;
2668                 set_assert();
2669         }
2670
2671         if (DBG_ENABLED()) {
2672                 model_print("Reached read action:\n");
2673                 curr->print();
2674                 model_print("Printing read_from_past\n");
2675                 curr->get_node()->print_read_from_past();
2676                 model_print("End printing read_from_past\n");
2677         }
2678 }
2679
2680 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2681 {
2682         for ( ; write != NULL; write = write->get_reads_from()) {
2683                 /* UNINIT actions don't have a Node, and they never sleep */
2684                 if (write->is_uninitialized())
2685                         return true;
2686                 Node *prevnode = write->get_node()->get_parent();
2687
2688                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2689                 if (write->is_release() && thread_sleep)
2690                         return true;
2691                 if (!write->is_rmw())
2692                         return false;
2693         }
2694         return true;
2695 }
2696
2697 /**
2698  * @brief Get an action representing an uninitialized atomic
2699  *
2700  * This function may create a new one or try to retrieve one from the NodeStack
2701  *
2702  * @param curr The current action, which prompts the creation of an UNINIT action
2703  * @return A pointer to the UNINIT ModelAction
2704  */
2705 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2706 {
2707         Node *node = curr->get_node();
2708         ModelAction *act = node->get_uninit_action();
2709         if (!act) {
2710                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2711                 node->set_uninit_action(act);
2712         }
2713         act->create_cv(NULL);
2714         return act;
2715 }
2716
2717 static void print_list(const action_list_t *list)
2718 {
2719         action_list_t::const_iterator it;
2720
2721         model_print("------------------------------------------------------------------------------------\n");
2722         model_print("#    t     Action type     MO       Location         Value               Rf  CV\n");
2723         model_print("------------------------------------------------------------------------------------\n");
2724
2725         unsigned int hash = 0;
2726
2727         for (it = list->begin(); it != list->end(); it++) {
2728                 const ModelAction *act = *it;
2729                 if (act->get_seq_number() > 0)
2730                         act->print();
2731                 hash = hash^(hash<<3)^((*it)->hash());
2732         }
2733         model_print("HASH %u\n", hash);
2734         model_print("------------------------------------------------------------------------------------\n");
2735 }
2736
2737 #if SUPPORT_MOD_ORDER_DUMP
2738 void ModelExecution::dumpGraph(char *filename) const
2739 {
2740         char buffer[200];
2741         sprintf(buffer, "%s.dot", filename);
2742         FILE *file = fopen(buffer, "w");
2743         fprintf(file, "digraph %s {\n", filename);
2744         mo_graph->dumpNodes(file);
2745         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2746
2747         for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2748                 ModelAction *act = *it;
2749                 if (act->is_read()) {
2750                         mo_graph->dot_print_node(file, act);
2751                         if (act->get_reads_from())
2752                                 mo_graph->dot_print_edge(file,
2753                                                 act->get_reads_from(),
2754                                                 act,
2755                                                 "label=\"rf\", color=red, weight=2");
2756                         else
2757                                 mo_graph->dot_print_edge(file,
2758                                                 act->get_reads_from_promise(),
2759                                                 act,
2760                                                 "label=\"rf\", color=red");
2761                 }
2762                 if (thread_array[act->get_tid()]) {
2763                         mo_graph->dot_print_edge(file,
2764                                         thread_array[id_to_int(act->get_tid())],
2765                                         act,
2766                                         "label=\"sb\", color=blue, weight=400");
2767                 }
2768
2769                 thread_array[act->get_tid()] = act;
2770         }
2771         fprintf(file, "}\n");
2772         model_free(thread_array);
2773         fclose(file);
2774 }
2775 #endif
2776
2777 /** @brief Prints an execution trace summary. */
2778 void ModelExecution::print_summary() const
2779 {
2780 #if SUPPORT_MOD_ORDER_DUMP
2781         char buffername[100];
2782         sprintf(buffername, "exec%04u", get_execution_number());
2783         mo_graph->dumpGraphToFile(buffername);
2784         sprintf(buffername, "graph%04u", get_execution_number());
2785         dumpGraph(buffername);
2786 #endif
2787
2788         model_print("Execution trace %d:", get_execution_number());
2789         if (isfeasibleprefix()) {
2790                 if (is_yieldblocked())
2791                         model_print(" YIELD BLOCKED");
2792                 if (scheduler->all_threads_sleeping())
2793                         model_print(" SLEEP-SET REDUNDANT");
2794                 if (have_bug_reports())
2795                         model_print(" DETECTED BUG(S)");
2796         } else
2797                 print_infeasibility(" INFEASIBLE");
2798         model_print("\n");
2799
2800         print_list(&action_trace);
2801         model_print("\n");
2802
2803         if (!promises.empty()) {
2804                 model_print("Pending promises:\n");
2805                 for (unsigned int i = 0; i < promises.size(); i++) {
2806                         model_print(" [P%u] ", i);
2807                         promises[i]->print();
2808                 }
2809                 model_print("\n");
2810         }
2811 }
2812
2813 /**
2814  * Add a Thread to the system for the first time. Should only be called once
2815  * per thread.
2816  * @param t The Thread to add
2817  */
2818 void ModelExecution::add_thread(Thread *t)
2819 {
2820         unsigned int i = id_to_int(t->get_id());
2821         if (i >= thread_map.size())
2822                 thread_map.resize(i + 1);
2823         thread_map[i] = t;
2824         if (!t->is_model_thread())
2825                 scheduler->add_thread(t);
2826 }
2827
2828 /**
2829  * @brief Get a Thread reference by its ID
2830  * @param tid The Thread's ID
2831  * @return A Thread reference
2832  */
2833 Thread * ModelExecution::get_thread(thread_id_t tid) const
2834 {
2835         unsigned int i = id_to_int(tid);
2836         if (i < thread_map.size())
2837                 return thread_map[i];
2838         return NULL;
2839 }
2840
2841 /**
2842  * @brief Get a reference to the Thread in which a ModelAction was executed
2843  * @param act The ModelAction
2844  * @return A Thread reference
2845  */
2846 Thread * ModelExecution::get_thread(const ModelAction *act) const
2847 {
2848         return get_thread(act->get_tid());
2849 }
2850
2851 /**
2852  * @brief Get a Promise's "promise number"
2853  *
2854  * A "promise number" is an index number that is unique to a promise, valid
2855  * only for a specific snapshot of an execution trace. Promises may come and go
2856  * as they are generated an resolved, so an index only retains meaning for the
2857  * current snapshot.
2858  *
2859  * @param promise The Promise to check
2860  * @return The promise index, if the promise still is valid; otherwise -1
2861  */
2862 int ModelExecution::get_promise_number(const Promise *promise) const
2863 {
2864         for (unsigned int i = 0; i < promises.size(); i++)
2865                 if (promises[i] == promise)
2866                         return i;
2867         /* Not found */
2868         return -1;
2869 }
2870
2871 /**
2872  * @brief Check if a Thread is currently enabled
2873  * @param t The Thread to check
2874  * @return True if the Thread is currently enabled
2875  */
2876 bool ModelExecution::is_enabled(Thread *t) const
2877 {
2878         return scheduler->is_enabled(t);
2879 }
2880
2881 /**
2882  * @brief Check if a Thread is currently enabled
2883  * @param tid The ID of the Thread to check
2884  * @return True if the Thread is currently enabled
2885  */
2886 bool ModelExecution::is_enabled(thread_id_t tid) const
2887 {
2888         return scheduler->is_enabled(tid);
2889 }
2890
2891 /**
2892  * @brief Select the next thread to execute based on the curren action
2893  *
2894  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2895  * actions should be followed by the execution of their child thread. In either
2896  * case, the current action should determine the next thread schedule.
2897  *
2898  * @param curr The current action
2899  * @return The next thread to run, if the current action will determine this
2900  * selection; otherwise NULL
2901  */
2902 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2903 {
2904         /* Do not split atomic RMW */
2905         if (curr->is_rmwr())
2906                 return get_thread(curr);
2907         if (curr->is_write()) {
2908 //              std::memory_order order = curr->get_mo(); 
2909 //              switch(order) {
2910 //                      case std::memory_order_relaxed: 
2911 //                              return get_thread(curr);
2912 //                      case std::memory_order_release:
2913 //                              return get_thread(curr);
2914 //                      defalut:
2915 //                              return NULL;
2916 //              }       
2917                 return NULL;
2918         }
2919
2920         /* Follow CREATE with the created thread */
2921         /* which is not needed, because model.cc takes care of this */
2922         if (curr->get_type() == THREAD_CREATE)
2923                 return curr->get_thread_operand(); 
2924         if (curr->get_type() == PTHREAD_CREATE) {
2925                 return curr->get_thread_operand();
2926         }
2927         return NULL;
2928 }
2929
2930 /** @return True if the execution has taken too many steps */
2931 bool ModelExecution::too_many_steps() const
2932 {
2933         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2934 }
2935
2936 /**
2937  * Takes the next step in the execution, if possible.
2938  * @param curr The current step to take
2939  * @return Returns the next Thread to run, if any; NULL if this execution
2940  * should terminate
2941  */
2942 Thread * ModelExecution::take_step(ModelAction *curr)
2943 {
2944         Thread *curr_thrd = get_thread(curr);
2945         ASSERT(curr_thrd->get_state() == THREAD_READY);
2946
2947         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2948         curr = check_current_action(curr);
2949         ASSERT(curr);
2950
2951         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2952                 scheduler->remove_thread(curr_thrd);
2953
2954         return action_select_next_thread(curr);
2955 }
2956
2957 /**
2958  * Launch end-of-execution release sequence fixups only when
2959  * the execution is otherwise feasible AND there are:
2960  *
2961  * (1) pending release sequences
2962  * (2) pending assertions that could be invalidated by a change
2963  * in clock vectors (i.e., data races)
2964  * (3) no pending promises
2965  */
2966 void ModelExecution::fixup_release_sequences()
2967 {
2968         while (!pending_rel_seqs.empty() &&
2969                         is_feasible_prefix_ignore_relseq() &&
2970                         haveUnrealizedRaces()) {
2971                 model_print("*** WARNING: release sequence fixup action "
2972                                 "(%zu pending release seuqence(s)) ***\n",
2973                                 pending_rel_seqs.size());
2974                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2975                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2976                                 model_thread);
2977                 take_step(fixup);
2978         };
2979 }