move pthread_map and mutex_map inside of execution.h
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <new>
4 #include <stdarg.h>
5
6 #include "model.h"
7 #include "execution.h"
8 #include "action.h"
9 #include "nodestack.h"
10 #include "schedule.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "bugmessage.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 /**
22  * Structure for holding small ModelChecker members that should be snapshotted
23  */
24 struct model_snapshot_members {
25         model_snapshot_members() :
26                 /* First thread created will have id INITIAL_THREAD_ID */
27                 next_thread_id(INITIAL_THREAD_ID),
28                 used_sequence_numbers(0),
29                 next_backtrack(NULL),
30                 bugs(),
31                 failed_promise(false),
32                 hard_failed_promise(false),
33                 too_many_reads(false),
34                 no_valid_reads(false),
35                 bad_synchronization(false),
36                 bad_sc_read(false),
37                 asserted(false)
38         { }
39
40         ~model_snapshot_members() {
41                 for (unsigned int i = 0; i < bugs.size(); i++)
42                         delete bugs[i];
43                 bugs.clear();
44         }
45
46         unsigned int next_thread_id;
47         modelclock_t used_sequence_numbers;
48         ModelAction *next_backtrack;
49         SnapVector<bug_message *> bugs;
50         bool failed_promise;
51         bool hard_failed_promise;
52         bool too_many_reads;
53         bool no_valid_reads;
54         /** @brief Incorrectly-ordered synchronization was made */
55         bool bad_synchronization;
56         bool bad_sc_read;
57         bool asserted;
58
59         SNAPSHOTALLOC
60 };
61
62 /** @brief Constructor */
63 ModelExecution::ModelExecution(ModelChecker *m,
64                 const struct model_params *params,
65                 Scheduler *scheduler,
66                 NodeStack *node_stack) :
67         model(m),
68         params(params),
69         scheduler(scheduler),
70         action_trace(),
71         thread_map(2), /* We'll always need at least 2 threads */
72         pthread_map(0),
73         pthread_counter(0),
74         obj_map(),
75         condvar_waiters_map(),
76         obj_thrd_map(),
77         mutex_map(),
78         promises(),
79         futurevalues(),
80         pending_rel_seqs(),
81         thrd_last_action(1),
82         thrd_last_fence_release(),
83         node_stack(node_stack),
84         priv(new struct model_snapshot_members()),
85         mo_graph(new CycleGraph())
86 {
87         /* Initialize a model-checker thread, for special ModelActions */
88         model_thread = new Thread(get_next_id());       // L: Create model thread
89         add_thread(model_thread);                       // L: Add model thread to scheduler
90         scheduler->register_engine(this);
91         node_stack->register_engine(this);
92 }
93
94 /** @brief Destructor */
95 ModelExecution::~ModelExecution()
96 {
97         for (unsigned int i = 0; i < get_num_threads(); i++)
98                 delete get_thread(int_to_id(i));
99
100         for (unsigned int i = 0; i < promises.size(); i++)
101                 delete promises[i];
102
103         delete mo_graph;
104         delete priv;
105 }
106
107 int ModelExecution::get_execution_number() const
108 {
109         return model->get_execution_number();
110 }
111
112 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
113 {
114         action_list_t *tmp = hash->get(ptr);
115         if (tmp == NULL) {
116                 tmp = new action_list_t();
117                 hash->put(ptr, tmp);
118         }
119         return tmp;
120 }
121
122 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
123 {
124         SnapVector<action_list_t> *tmp = hash->get(ptr);
125         if (tmp == NULL) {
126                 tmp = new SnapVector<action_list_t>();
127                 hash->put(ptr, tmp);
128         }
129         return tmp;
130 }
131
132 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
133 {
134         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
135         if (wrv==NULL)
136                 return NULL;
137         unsigned int thread=id_to_int(tid);
138         if (thread < wrv->size())
139                 return &(*wrv)[thread];
140         else
141                 return NULL;
142 }
143
144 /** @return a thread ID for a new Thread */
145 thread_id_t ModelExecution::get_next_id()
146 {
147         return priv->next_thread_id++;
148 }
149
150 /** @return the number of user threads created during this execution */
151 unsigned int ModelExecution::get_num_threads() const
152 {
153         return priv->next_thread_id;
154 }
155
156 /** @return a sequence number for a new ModelAction */
157 modelclock_t ModelExecution::get_next_seq_num()
158 {
159         return ++priv->used_sequence_numbers;
160 }
161
162 /**
163  * @brief Should the current action wake up a given thread?
164  *
165  * @param curr The current action
166  * @param thread The thread that we might wake up
167  * @return True, if we should wake up the sleeping thread; false otherwise
168  */
169 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
170 {
171         const ModelAction *asleep = thread->get_pending();
172         /* Don't allow partial RMW to wake anyone up */
173         if (curr->is_rmwr())
174                 return false;
175         /* Synchronizing actions may have been backtracked */
176         if (asleep->could_synchronize_with(curr))
177                 return true;
178         /* All acquire/release fences and fence-acquire/store-release */
179         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
180                 return true;
181         /* Fence-release + store can awake load-acquire on the same location */
182         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
183                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
184                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
185                         return true;
186         }
187         return false;
188 }
189
190 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
191 {
192         for (unsigned int i = 0; i < get_num_threads(); i++) {
193                 Thread *thr = get_thread(int_to_id(i));
194                 if (scheduler->is_sleep_set(thr)) {
195                         if (should_wake_up(curr, thr))
196                                 /* Remove this thread from sleep set */
197                                 scheduler->remove_sleep(thr);
198                 }
199         }
200 }
201
202 /** @brief Alert the model-checker that an incorrectly-ordered
203  * synchronization was made */
204 void ModelExecution::set_bad_synchronization()
205 {
206         priv->bad_synchronization = true;
207 }
208
209 /** @brief Alert the model-checker that an incorrectly-ordered
210  * synchronization was made */
211 void ModelExecution::set_bad_sc_read()
212 {
213         priv->bad_sc_read = true;
214 }
215
216 bool ModelExecution::assert_bug(const char *msg)
217 {
218         priv->bugs.push_back(new bug_message(msg));
219
220         if (isfeasibleprefix()) {
221                 set_assert();
222                 return true;
223         }
224         return false;
225 }
226
227 /** @return True, if any bugs have been reported for this execution */
228 bool ModelExecution::have_bug_reports() const
229 {
230         return priv->bugs.size() != 0;
231 }
232
233 SnapVector<bug_message *> * ModelExecution::get_bugs() const
234 {
235         return &priv->bugs;
236 }
237
238 /**
239  * Check whether the current trace has triggered an assertion which should halt
240  * its execution.
241  *
242  * @return True, if the execution should be aborted; false otherwise
243  */
244 bool ModelExecution::has_asserted() const
245 {
246         return priv->asserted;
247 }
248
249 /**
250  * Trigger a trace assertion which should cause this execution to be halted.
251  * This can be due to a detected bug or due to an infeasibility that should
252  * halt ASAP.
253  */
254 void ModelExecution::set_assert()
255 {
256         priv->asserted = true;
257 }
258
259 /**
260  * Check if we are in a deadlock. Should only be called at the end of an
261  * execution, although it should not give false positives in the middle of an
262  * execution (there should be some ENABLED thread).
263  *
264  * @return True if program is in a deadlock; false otherwise
265  */
266 bool ModelExecution::is_deadlocked() const
267 {
268         bool blocking_threads = false;
269         for (unsigned int i = 0; i < get_num_threads(); i++) {
270                 thread_id_t tid = int_to_id(i);
271                 if (is_enabled(tid))
272                         return false;
273                 Thread *t = get_thread(tid);
274                 if (!t->is_model_thread() && t->get_pending())
275                         blocking_threads = true;
276         }
277         return blocking_threads;
278 }
279
280 /**
281  * @brief Check if we are yield-blocked
282  *
283  * A program can be "yield-blocked" if all threads are ready to execute a
284  * yield.
285  *
286  * @return True if the program is yield-blocked; false otherwise
287  */
288 bool ModelExecution::is_yieldblocked() const
289 {
290         if (!params->yieldblock)
291                 return false;
292
293         for (unsigned int i = 0; i < get_num_threads(); i++) {
294                 thread_id_t tid = int_to_id(i);
295                 Thread *t = get_thread(tid);
296                 if (t->get_pending() && t->get_pending()->is_yield())
297                         return true;
298         }
299         return false;
300 }
301
302 /**
303  * Check if this is a complete execution. That is, have all thread completed
304  * execution (rather than exiting because sleep sets have forced a redundant
305  * execution).
306  *
307  * @return True if the execution is complete.
308  */
309 bool ModelExecution::is_complete_execution() const
310 {
311         if (is_yieldblocked())
312                 return false;
313         for (unsigned int i = 0; i < get_num_threads(); i++)
314                 if (is_enabled(int_to_id(i)))
315                         return false;
316         return true;
317 }
318
319 /**
320  * @brief Find the last fence-related backtracking conflict for a ModelAction
321  *
322  * This function performs the search for the most recent conflicting action
323  * against which we should perform backtracking, as affected by fence
324  * operations. This includes pairs of potentially-synchronizing actions which
325  * occur due to fence-acquire or fence-release, and hence should be explored in
326  * the opposite execution order.
327  *
328  * @param act The current action
329  * @return The most recent action which conflicts with act due to fences
330  */
331 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
332 {
333         /* Only perform release/acquire fence backtracking for stores */
334         if (!act->is_write())
335                 return NULL;
336
337         /* Find a fence-release (or, act is a release) */
338         ModelAction *last_release;
339         if (act->is_release())
340                 last_release = act;
341         else
342                 last_release = get_last_fence_release(act->get_tid());
343         if (!last_release)
344                 return NULL;
345
346         /* Skip past the release */
347         const action_list_t *list = &action_trace;
348         action_list_t::const_reverse_iterator rit;
349         for (rit = list->rbegin(); rit != list->rend(); rit++)
350                 if (*rit == last_release)
351                         break;
352         ASSERT(rit != list->rend());
353
354         /* Find a prior:
355          *   load-acquire
356          * or
357          *   load --sb-> fence-acquire */
358         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
359         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
360         bool found_acquire_fences = false;
361         for ( ; rit != list->rend(); rit++) {
362                 ModelAction *prev = *rit;
363                 if (act->same_thread(prev))
364                         continue;
365
366                 int tid = id_to_int(prev->get_tid());
367
368                 if (prev->is_read() && act->same_var(prev)) {
369                         if (prev->is_acquire()) {
370                                 /* Found most recent load-acquire, don't need
371                                  * to search for more fences */
372                                 if (!found_acquire_fences)
373                                         return NULL;
374                         } else {
375                                 prior_loads[tid] = prev;
376                         }
377                 }
378                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
379                         found_acquire_fences = true;
380                         acquire_fences[tid] = prev;
381                 }
382         }
383
384         ModelAction *latest_backtrack = NULL;
385         for (unsigned int i = 0; i < acquire_fences.size(); i++)
386                 if (acquire_fences[i] && prior_loads[i])
387                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
388                                 latest_backtrack = acquire_fences[i];
389         return latest_backtrack;
390 }
391
392 /**
393  * @brief Find the last backtracking conflict for a ModelAction
394  *
395  * This function performs the search for the most recent conflicting action
396  * against which we should perform backtracking. This primary includes pairs of
397  * synchronizing actions which should be explored in the opposite execution
398  * order.
399  *
400  * @param act The current action
401  * @return The most recent action which conflicts with act
402  */
403 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
404 {
405         switch (act->get_type()) {
406         case ATOMIC_FENCE:
407                 /* Only seq-cst fences can (directly) cause backtracking */
408                 if (!act->is_seqcst())
409                         break;
410         case ATOMIC_READ:
411         case ATOMIC_WRITE:
412         case ATOMIC_RMW: {
413                 ModelAction *ret = NULL;
414
415                 /* linear search: from most recent to oldest */
416                 action_list_t *list = obj_map.get(act->get_location());
417                 action_list_t::reverse_iterator rit;
418                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
419                         ModelAction *prev = *rit;
420                         if (prev == act)
421                                 continue;
422                         if (prev->could_synchronize_with(act)) {
423                                 ret = prev;
424                                 break;
425                         }
426                 }
427
428                 ModelAction *ret2 = get_last_fence_conflict(act);
429                 if (!ret2)
430                         return ret;
431                 if (!ret)
432                         return ret2;
433                 if (*ret < *ret2)
434                         return ret2;
435                 return ret;
436         }
437         case ATOMIC_LOCK:
438         case ATOMIC_TRYLOCK: {
439                 /* linear search: from most recent to oldest */
440                 action_list_t *list = obj_map.get(act->get_location());
441                 action_list_t::reverse_iterator rit;
442                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
443                         ModelAction *prev = *rit;
444                         if (act->is_conflicting_lock(prev))
445                                 return prev;
446                 }
447                 break;
448         }
449         case ATOMIC_UNLOCK: {
450                 /* linear search: from most recent to oldest */
451                 action_list_t *list = obj_map.get(act->get_location());
452                 action_list_t::reverse_iterator rit;
453                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
454                         ModelAction *prev = *rit;
455                         if (!act->same_thread(prev) && prev->is_failed_trylock())
456                                 return prev;
457                 }
458                 break;
459         }
460         case ATOMIC_WAIT: {
461                 /* linear search: from most recent to oldest */
462                 action_list_t *list = obj_map.get(act->get_location());
463                 action_list_t::reverse_iterator rit;
464                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
465                         ModelAction *prev = *rit;
466                         if (!act->same_thread(prev) && prev->is_failed_trylock())
467                                 return prev;
468                         if (!act->same_thread(prev) && prev->is_notify())
469                                 return prev;
470                 }
471                 break;
472         }
473
474         case ATOMIC_NOTIFY_ALL:
475         case ATOMIC_NOTIFY_ONE: {
476                 /* linear search: from most recent to oldest */
477                 action_list_t *list = obj_map.get(act->get_location());
478                 action_list_t::reverse_iterator rit;
479                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
480                         ModelAction *prev = *rit;
481                         if (!act->same_thread(prev) && prev->is_wait())
482                                 return prev;
483                 }
484                 break;
485         }
486         default:
487                 break;
488         }
489         return NULL;
490 }
491
492 /** This method finds backtracking points where we should try to
493  * reorder the parameter ModelAction against.
494  *
495  * @param the ModelAction to find backtracking points for.
496  */
497 void ModelExecution::set_backtracking(ModelAction *act)
498 {
499         Thread *t = get_thread(act);
500         ModelAction *prev = get_last_conflict(act);
501         if (prev == NULL)
502                 return;
503
504         Node *node = prev->get_node()->get_parent();
505
506         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
507         int low_tid, high_tid;
508         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
509                 low_tid = id_to_int(act->get_tid());
510                 high_tid = low_tid + 1;
511         } else {
512                 low_tid = 0;
513                 high_tid = get_num_threads();
514         }
515
516         for (int i = low_tid; i < high_tid; i++) {
517                 thread_id_t tid = int_to_id(i);
518
519                 /* Make sure this thread can be enabled here. */
520                 if (i >= node->get_num_threads())
521                         break;
522
523                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
524                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
525                 if (node->enabled_status(tid) != THREAD_ENABLED)
526                         continue;
527
528                 /* Check if this has been explored already */
529                 if (node->has_been_explored(tid))
530                         continue;
531
532                 /* See if fairness allows */
533                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
534                         bool unfair = false;
535                         for (int t = 0; t < node->get_num_threads(); t++) {
536                                 thread_id_t tother = int_to_id(t);
537                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
538                                         unfair = true;
539                                         break;
540                                 }
541                         }
542                         if (unfair)
543                                 continue;
544                 }
545
546                 /* See if CHESS-like yield fairness allows */
547                 if (params->yieldon) {
548                         bool unfair = false;
549                         for (int t = 0; t < node->get_num_threads(); t++) {
550                                 thread_id_t tother = int_to_id(t);
551                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
552                                         unfair = true;
553                                         break;
554                                 }
555                         }
556                         if (unfair)
557                                 continue;
558                 }
559
560                 /* Cache the latest backtracking point */
561                 set_latest_backtrack(prev);
562
563                 /* If this is a new backtracking point, mark the tree */
564                 if (!node->set_backtrack(tid))
565                         continue;
566                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
567                                         id_to_int(prev->get_tid()),
568                                         id_to_int(t->get_id()));
569                 if (DBG_ENABLED()) {
570                         prev->print();
571                         act->print();
572                 }
573         }
574 }
575
576 /**
577  * @brief Cache the a backtracking point as the "most recent", if eligible
578  *
579  * Note that this does not prepare the NodeStack for this backtracking
580  * operation, it only caches the action on a per-execution basis
581  *
582  * @param act The operation at which we should explore a different next action
583  * (i.e., backtracking point)
584  * @return True, if this action is now the most recent backtracking point;
585  * false otherwise
586  */
587 bool ModelExecution::set_latest_backtrack(ModelAction *act)
588 {
589         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
590                 priv->next_backtrack = act;
591                 return true;
592         }
593         return false;
594 }
595
596 /**
597  * Returns last backtracking point. The model checker will explore a different
598  * path for this point in the next execution.
599  * @return The ModelAction at which the next execution should diverge.
600  */
601 ModelAction * ModelExecution::get_next_backtrack()
602 {
603         ModelAction *next = priv->next_backtrack;
604         priv->next_backtrack = NULL;
605         return next;
606 }
607
608 /**
609  * Processes a read model action.
610  * @param curr is the read model action to process.
611  * @return True if processing this read updates the mo_graph.
612  */
613 bool ModelExecution::process_read(ModelAction *curr)
614 {
615         Node *node = curr->get_node();
616         while (true) {
617                 bool updated = false;
618                 switch (node->get_read_from_status()) {
619                 case READ_FROM_PAST: {
620                         const ModelAction *rf = node->get_read_from_past();
621                         ASSERT(rf);
622
623                         mo_graph->startChanges();
624
625                         ASSERT(!is_infeasible());
626                         if (!check_recency(curr, rf)) {
627                                 if (node->increment_read_from()) {
628                                         mo_graph->rollbackChanges();
629                                         continue;
630                                 } else {
631                                         priv->too_many_reads = true;
632                                 }
633                         }
634
635                         updated = r_modification_order(curr, rf);
636                         read_from(curr, rf);
637                         mo_graph->commitChanges();
638                         mo_check_promises(curr, true);
639                         break;
640                 }
641                 case READ_FROM_PROMISE: {
642                         Promise *promise = curr->get_node()->get_read_from_promise();
643                         if (promise->add_reader(curr))
644                                 priv->failed_promise = true;
645                         curr->set_read_from_promise(promise);
646                         mo_graph->startChanges();
647                         if (!check_recency(curr, promise))
648                                 priv->too_many_reads = true;
649                         updated = r_modification_order(curr, promise);
650                         mo_graph->commitChanges();
651                         break;
652                 }
653                 case READ_FROM_FUTURE: {
654                         /* Read from future value */
655                         struct future_value fv = node->get_future_value();
656                         Promise *promise = new Promise(this, curr, fv);
657                         curr->set_read_from_promise(promise);
658                         promises.push_back(promise);
659                         mo_graph->startChanges();
660                         updated = r_modification_order(curr, promise);
661                         mo_graph->commitChanges();
662                         break;
663                 }
664                 default:
665                         ASSERT(false);
666                 }
667                 get_thread(curr)->set_return_value(curr->get_return_value());
668                 return updated;
669         }
670 }
671
672 /**
673  * Processes a lock, trylock, or unlock model action.  @param curr is
674  * the read model action to process.
675  *
676  * The try lock operation checks whether the lock is taken.  If not,
677  * it falls to the normal lock operation case.  If so, it returns
678  * fail.
679  *
680  * The lock operation has already been checked that it is enabled, so
681  * it just grabs the lock and synchronizes with the previous unlock.
682  *
683  * The unlock operation has to re-enable all of the threads that are
684  * waiting on the lock.
685  *
686  * @return True if synchronization was updated; false otherwise
687  */
688 bool ModelExecution::process_mutex(ModelAction *curr)
689 {
690         std::mutex *mutex = curr->get_mutex();
691         struct std::mutex_state *state = NULL;
692
693         if (mutex)
694                 state = mutex->get_state();
695
696         switch (curr->get_type()) {
697         case ATOMIC_TRYLOCK: {
698                 bool success = !state->locked;
699                 curr->set_try_lock(success);
700                 if (!success) {
701                         get_thread(curr)->set_return_value(0);
702                         break;
703                 }
704                 get_thread(curr)->set_return_value(1);
705         }
706                 //otherwise fall into the lock case
707         case ATOMIC_LOCK: {
708                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
709                         assert_bug("Lock access before initialization");
710                 state->locked = get_thread(curr);
711                 ModelAction *unlock = get_last_unlock(curr);
712                 //synchronize with the previous unlock statement
713                 if (unlock != NULL) {
714                         synchronize(unlock, curr);
715                         return true;
716                 }
717                 break;
718         }
719         case ATOMIC_WAIT:
720         case ATOMIC_UNLOCK: {
721                 /* wake up the other threads */
722                 for (unsigned int i = 0; i < get_num_threads(); i++) {
723                         Thread *t = get_thread(int_to_id(i));
724                         Thread *curr_thrd = get_thread(curr);
725                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
726                                 scheduler->wake(t);
727                 }
728
729                 /* unlock the lock - after checking who was waiting on it */
730                 state->locked = NULL;
731
732                 if (!curr->is_wait())
733                         break; /* The rest is only for ATOMIC_WAIT */
734
735                 /* Should we go to sleep? (simulate spurious failures) */
736                 if (curr->get_node()->get_misc() == 0) {
737                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
738                         /* disable us */
739                         scheduler->sleep(get_thread(curr));
740                 }
741                 break;
742         }
743         case ATOMIC_NOTIFY_ALL: {
744                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
745                 //activate all the waiting threads
746                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
747                         scheduler->wake(get_thread(*rit));
748                 }
749                 waiters->clear();
750                 break;
751         }
752         case ATOMIC_NOTIFY_ONE: {
753                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
754                 int wakeupthread = curr->get_node()->get_misc();
755                 action_list_t::iterator it = waiters->begin();
756                 advance(it, wakeupthread);
757                 scheduler->wake(get_thread(*it));
758                 waiters->erase(it);
759                 break;
760         }
761
762         default:
763                 ASSERT(0);
764         }
765         return false;
766 }
767
768 /**
769  * @brief Check if the current pending promises allow a future value to be sent
770  *
771  * It is unsafe to pass a future value back if there exists a pending promise Pr
772  * such that:
773  *
774  *    reader --exec-> Pr --exec-> writer
775  *
776  * If such Pr exists, we must save the pending future value until Pr is
777  * resolved.
778  *
779  * @param writer The operation which sends the future value. Must be a write.
780  * @param reader The operation which will observe the value. Must be a read.
781  * @return True if the future value can be sent now; false if it must wait.
782  */
783 bool ModelExecution::promises_may_allow(const ModelAction *writer,
784                 const ModelAction *reader) const
785 {
786         for (int i = promises.size() - 1; i >= 0; i--) {
787                 ModelAction *pr = promises[i]->get_reader(0);
788                 //reader is after promise...doesn't cross any promise
789                 if (*reader > *pr)
790                         return true;
791                 //writer is after promise, reader before...bad...
792                 if (*writer > *pr)
793                         return false;
794         }
795         return true;
796 }
797
798 /**
799  * @brief Add a future value to a reader
800  *
801  * This function performs a few additional checks to ensure that the future
802  * value can be feasibly observed by the reader
803  *
804  * @param writer The operation whose value is sent. Must be a write.
805  * @param reader The read operation which may read the future value. Must be a read.
806  */
807 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
808 {
809         /* Do more ambitious checks now that mo is more complete */
810         if (!mo_may_allow(writer, reader))
811                 return;
812
813         Node *node = reader->get_node();
814
815         /* Find an ancestor thread which exists at the time of the reader */
816         Thread *write_thread = get_thread(writer);
817         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
818                 write_thread = write_thread->get_parent();
819
820         struct future_value fv = {
821                 writer->get_write_value(),
822                 writer->get_seq_number() + params->maxfuturedelay,
823                 write_thread->get_id(),
824         };
825         if (node->add_future_value(fv))
826                 set_latest_backtrack(reader);
827 }
828
829 /**
830  * Process a write ModelAction
831  * @param curr The ModelAction to process
832  * @param work The work queue, for adding fixup work
833  * @return True if the mo_graph was updated or promises were resolved
834  */
835 bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
836 {
837         /* Readers to which we may send our future value */
838         ModelVector<ModelAction *> send_fv;
839
840         const ModelAction *earliest_promise_reader;
841         bool updated_promises = false;
842
843         bool updated_mod_order = w_modification_order(curr, &send_fv);
844         Promise *promise = pop_promise_to_resolve(curr);
845
846         if (promise) {
847                 earliest_promise_reader = promise->get_reader(0);
848                 updated_promises = resolve_promise(curr, promise, work);
849         } else
850                 earliest_promise_reader = NULL;
851
852         for (unsigned int i = 0; i < send_fv.size(); i++) {
853                 ModelAction *read = send_fv[i];
854
855                 /* Don't send future values to reads after the Promise we resolve */
856                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
857                         /* Check if future value can be sent immediately */
858                         if (promises_may_allow(curr, read)) {
859                                 add_future_value(curr, read);
860                         } else {
861                                 futurevalues.push_back(PendingFutureValue(curr, read));
862                         }
863                 }
864         }
865
866         /* Check the pending future values */
867         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
868                 struct PendingFutureValue pfv = futurevalues[i];
869                 if (promises_may_allow(pfv.writer, pfv.reader)) {
870                         add_future_value(pfv.writer, pfv.reader);
871                         futurevalues.erase(futurevalues.begin() + i);
872                 }
873         }
874
875         mo_graph->commitChanges();
876         mo_check_promises(curr, false);
877
878         get_thread(curr)->set_return_value(VALUE_NONE);
879         return updated_mod_order || updated_promises;
880 }
881
882 /**
883  * Process a fence ModelAction
884  * @param curr The ModelAction to process
885  * @return True if synchronization was updated
886  */
887 bool ModelExecution::process_fence(ModelAction *curr)
888 {
889         /*
890          * fence-relaxed: no-op
891          * fence-release: only log the occurence (not in this function), for
892          *   use in later synchronization
893          * fence-acquire (this function): search for hypothetical release
894          *   sequences
895          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
896          */
897         bool updated = false;
898         if (curr->is_acquire()) {
899                 action_list_t *list = &action_trace;
900                 action_list_t::reverse_iterator rit;
901                 /* Find X : is_read(X) && X --sb-> curr */
902                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
903                         ModelAction *act = *rit;
904                         if (act == curr)
905                                 continue;
906                         if (act->get_tid() != curr->get_tid())
907                                 continue;
908                         /* Stop at the beginning of the thread */
909                         if (act->is_thread_start())
910                                 break;
911                         /* Stop once we reach a prior fence-acquire */
912                         if (act->is_fence() && act->is_acquire())
913                                 break;
914                         if (!act->is_read())
915                                 continue;
916                         /* read-acquire will find its own release sequences */
917                         if (act->is_acquire())
918                                 continue;
919
920                         /* Establish hypothetical release sequences */
921                         rel_heads_list_t release_heads;
922                         get_release_seq_heads(curr, act, &release_heads);
923                         for (unsigned int i = 0; i < release_heads.size(); i++)
924                                 synchronize(release_heads[i], curr);
925                         if (release_heads.size() != 0)
926                                 updated = true;
927                 }
928         }
929         return updated;
930 }
931
932 /**
933  * @brief Process the current action for thread-related activity
934  *
935  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
936  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
937  * synchronization, etc.  This function is a no-op for non-THREAD actions
938  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
939  *
940  * @param curr The current action
941  * @return True if synchronization was updated or a thread completed
942  */
943 bool ModelExecution::process_thread_action(ModelAction *curr)
944 {
945         bool updated = false;
946
947         switch (curr->get_type()) {
948         case THREAD_CREATE: {
949                 thrd_t *thrd = (thrd_t *)curr->get_location();
950                 struct thread_params *params = (struct thread_params *)curr->get_value();
951                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
952                 curr->set_thread_operand(th);
953                 add_thread(th);
954                 th->set_creation(curr);
955                 /* Promises can be satisfied by children */
956                 for (unsigned int i = 0; i < promises.size(); i++) {
957                         Promise *promise = promises[i];
958                         if (promise->thread_is_available(curr->get_tid()))
959                                 promise->add_thread(th->get_id());
960                 }
961                 break;
962         }
963         case PTHREAD_CREATE: {
964                 (*(pthread_t *)curr->get_location()) = pthread_counter++;       
965
966                 struct pthread_params *params = (struct pthread_params *)curr->get_value();
967                 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
968                 curr->set_thread_operand(th);
969                 add_thread(th);
970                 th->set_creation(curr);
971
972                 if ( pthread_map.size() < pthread_counter )
973                         pthread_map.resize( pthread_counter );
974                 pthread_map[ pthread_counter-1 ] = th;
975
976                 /* Promises can be satisfied by children */
977                 for (unsigned int i = 0; i < promises.size(); i++) {
978                         Promise *promise = promises[i];
979                         if (promise->thread_is_available(curr->get_tid()))
980                                 promise->add_thread(th->get_id());
981                 }
982                 
983                 break;
984         }
985         case THREAD_JOIN: {
986                 Thread *blocking = curr->get_thread_operand();
987                 ModelAction *act = get_last_action(blocking->get_id());
988                 synchronize(act, curr);
989                 updated = true; /* trigger rel-seq checks */
990                 break;
991         }
992         case PTHREAD_JOIN: {
993                 Thread *blocking = curr->get_thread_operand();
994                 ModelAction *act = get_last_action(blocking->get_id());
995                 synchronize(act, curr);
996                 updated = true; /* trigger rel-seq checks */
997                 break; // WL: to be add (modified)
998         }
999
1000         case THREAD_FINISH: {
1001                 Thread *th = get_thread(curr);
1002                 /* Wake up any joining threads */
1003                 for (unsigned int i = 0; i < get_num_threads(); i++) {
1004                         Thread *waiting = get_thread(int_to_id(i));
1005                         if (waiting->waiting_on() == th &&
1006                                         waiting->get_pending()->is_thread_join())
1007                                 scheduler->wake(waiting);
1008                 }
1009                 th->complete();
1010                 /* Completed thread can't satisfy promises */
1011                 for (unsigned int i = 0; i < promises.size(); i++) {
1012                         Promise *promise = promises[i];
1013                         if (promise->thread_is_available(th->get_id()))
1014                                 if (promise->eliminate_thread(th->get_id()))
1015                                         priv->failed_promise = true;
1016                 }
1017                 updated = true; /* trigger rel-seq checks */
1018                 break;
1019         }
1020         case THREAD_START: {
1021                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1022                 break;
1023         }
1024         default:
1025                 break;
1026         }
1027
1028         return updated;
1029 }
1030
1031 /**
1032  * @brief Process the current action for release sequence fixup activity
1033  *
1034  * Performs model-checker release sequence fixups for the current action,
1035  * forcing a single pending release sequence to break (with a given, potential
1036  * "loose" write) or to complete (i.e., synchronize). If a pending release
1037  * sequence forms a complete release sequence, then we must perform the fixup
1038  * synchronization, mo_graph additions, etc.
1039  *
1040  * @param curr The current action; must be a release sequence fixup action
1041  * @param work_queue The work queue to which to add work items as they are
1042  * generated
1043  */
1044 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1045 {
1046         const ModelAction *write = curr->get_node()->get_relseq_break();
1047         struct release_seq *sequence = pending_rel_seqs.back();
1048         pending_rel_seqs.pop_back();
1049         ASSERT(sequence);
1050         ModelAction *acquire = sequence->acquire;
1051         const ModelAction *rf = sequence->rf;
1052         const ModelAction *release = sequence->release;
1053         ASSERT(acquire);
1054         ASSERT(release);
1055         ASSERT(rf);
1056         ASSERT(release->same_thread(rf));
1057
1058         if (write == NULL) {
1059                 /**
1060                  * @todo Forcing a synchronization requires that we set
1061                  * modification order constraints. For instance, we can't allow
1062                  * a fixup sequence in which two separate read-acquire
1063                  * operations read from the same sequence, where the first one
1064                  * synchronizes and the other doesn't. Essentially, we can't
1065                  * allow any writes to insert themselves between 'release' and
1066                  * 'rf'
1067                  */
1068
1069                 /* Must synchronize */
1070                 if (!synchronize(release, acquire))
1071                         return;
1072
1073                 /* Propagate the changed clock vector */
1074                 propagate_clockvector(acquire, work_queue);
1075         } else {
1076                 /* Break release sequence with new edges:
1077                  *   release --mo--> write --mo--> rf */
1078                 mo_graph->addEdge(release, write);
1079                 mo_graph->addEdge(write, rf);
1080         }
1081
1082         /* See if we have realized a data race */
1083         checkDataRaces();
1084 }
1085
1086 /**
1087  * Initialize the current action by performing one or more of the following
1088  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1089  * in the NodeStack, manipulating backtracking sets, allocating and
1090  * initializing clock vectors, and computing the promises to fulfill.
1091  *
1092  * @param curr The current action, as passed from the user context; may be
1093  * freed/invalidated after the execution of this function, with a different
1094  * action "returned" its place (pass-by-reference)
1095  * @return True if curr is a newly-explored action; false otherwise
1096  */
1097 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1098 {
1099         ModelAction *newcurr;
1100
1101         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1102                 newcurr = process_rmw(*curr);
1103                 delete *curr;
1104
1105                 if (newcurr->is_rmw())
1106                         compute_promises(newcurr);
1107
1108                 *curr = newcurr;
1109                 return false;
1110         }
1111
1112         (*curr)->set_seq_number(get_next_seq_num());
1113
1114         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1115         if (newcurr) {
1116                 /* First restore type and order in case of RMW operation */
1117                 if ((*curr)->is_rmwr())
1118                         newcurr->copy_typeandorder(*curr);
1119
1120                 ASSERT((*curr)->get_location() == newcurr->get_location());
1121                 newcurr->copy_from_new(*curr);
1122
1123                 /* Discard duplicate ModelAction; use action from NodeStack */
1124                 delete *curr;
1125
1126                 /* Always compute new clock vector */
1127                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1128
1129                 *curr = newcurr;
1130                 return false; /* Action was explored previously */
1131         } else {
1132                 newcurr = *curr;
1133
1134                 /* Always compute new clock vector */
1135                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1136
1137                 /* Assign most recent release fence */
1138                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1139
1140                 /*
1141                  * Perform one-time actions when pushing new ModelAction onto
1142                  * NodeStack
1143                  */
1144                 if (newcurr->is_write())
1145                         compute_promises(newcurr);
1146                 else if (newcurr->is_relseq_fixup())
1147                         compute_relseq_breakwrites(newcurr);
1148                 else if (newcurr->is_wait())
1149                         newcurr->get_node()->set_misc_max(2);
1150                 else if (newcurr->is_notify_one()) {
1151                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1152                 }
1153                 return true; /* This was a new ModelAction */
1154         }
1155 }
1156
1157 /**
1158  * @brief Establish reads-from relation between two actions
1159  *
1160  * Perform basic operations involved with establishing a concrete rf relation,
1161  * including setting the ModelAction data and checking for release sequences.
1162  *
1163  * @param act The action that is reading (must be a read)
1164  * @param rf The action from which we are reading (must be a write)
1165  *
1166  * @return True if this read established synchronization
1167  */
1168
1169 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1170 {
1171         ASSERT(rf);
1172         ASSERT(rf->is_write());
1173
1174         act->set_read_from(rf);
1175         if (act->is_acquire()) {
1176                 rel_heads_list_t release_heads;
1177                 get_release_seq_heads(act, act, &release_heads);
1178                 int num_heads = release_heads.size();
1179                 for (unsigned int i = 0; i < release_heads.size(); i++)
1180                         if (!synchronize(release_heads[i], act))
1181                                 num_heads--;
1182                 return num_heads > 0;
1183         }
1184         return false;
1185 }
1186
1187 /**
1188  * @brief Synchronizes two actions
1189  *
1190  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1191  * This function performs the synchronization as well as providing other hooks
1192  * for other checks along with synchronization.
1193  *
1194  * @param first The left-hand side of the synchronizes-with relation
1195  * @param second The right-hand side of the synchronizes-with relation
1196  * @return True if the synchronization was successful (i.e., was consistent
1197  * with the execution order); false otherwise
1198  */
1199 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1200 {
1201         if (*second < *first) {
1202                 set_bad_synchronization();
1203                 return false;
1204         }
1205         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1206         return second->synchronize_with(first);
1207 }
1208
1209 /**
1210  * Check promises and eliminate potentially-satisfying threads when a thread is
1211  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1212  * no longer satisfy a promise generated from that thread.
1213  *
1214  * @param blocker The thread on which a thread is waiting
1215  * @param waiting The waiting thread
1216  */
1217 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1218 {
1219         for (unsigned int i = 0; i < promises.size(); i++) {
1220                 Promise *promise = promises[i];
1221                 if (!promise->thread_is_available(waiting->get_id()))
1222                         continue;
1223                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1224                         ModelAction *reader = promise->get_reader(j);
1225                         if (reader->get_tid() != blocker->get_id())
1226                                 continue;
1227                         if (promise->eliminate_thread(waiting->get_id())) {
1228                                 /* Promise has failed */
1229                                 priv->failed_promise = true;
1230                         } else {
1231                                 /* Only eliminate the 'waiting' thread once */
1232                                 return;
1233                         }
1234                 }
1235         }
1236 }
1237
1238 /**
1239  * @brief Check whether a model action is enabled.
1240  *
1241  * Checks whether an operation would be successful (i.e., is a lock already
1242  * locked, or is the joined thread already complete).
1243  *
1244  * For yield-blocking, yields are never enabled.
1245  *
1246  * @param curr is the ModelAction to check whether it is enabled.
1247  * @return a bool that indicates whether the action is enabled.
1248  */
1249 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1250         if (curr->is_lock()) {
1251                 std::mutex *lock = curr->get_mutex();
1252                 struct std::mutex_state *state = lock->get_state();
1253                 if (state->locked)
1254                         return false;
1255         } else if (curr->is_thread_join()) {
1256                 Thread *blocking = curr->get_thread_operand();
1257                 if (!blocking->is_complete()) {
1258                         thread_blocking_check_promises(blocking, get_thread(curr));
1259                         return false;
1260                 }
1261         } else if (params->yieldblock && curr->is_yield()) {
1262                 return false;
1263         }
1264
1265         return true;
1266 }
1267
1268 /**
1269  * This is the heart of the model checker routine. It performs model-checking
1270  * actions corresponding to a given "current action." Among other processes, it
1271  * calculates reads-from relationships, updates synchronization clock vectors,
1272  * forms a memory_order constraints graph, and handles replay/backtrack
1273  * execution when running permutations of previously-observed executions.
1274  *
1275  * @param curr The current action to process
1276  * @return The ModelAction that is actually executed; may be different than
1277  * curr
1278  */
1279 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1280 {
1281         ASSERT(curr);
1282         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1283         bool newly_explored = initialize_curr_action(&curr);
1284
1285         DBG();
1286
1287         wake_up_sleeping_actions(curr);
1288
1289         /* Compute fairness information for CHESS yield algorithm */
1290         if (params->yieldon) {
1291                 curr->get_node()->update_yield(scheduler);
1292         }
1293
1294         /* Add the action to lists before any other model-checking tasks */
1295         if (!second_part_of_rmw)
1296                 add_action_to_lists(curr);
1297
1298         /* Build may_read_from set for newly-created actions */
1299         if (newly_explored && curr->is_read())
1300                 build_may_read_from(curr);
1301
1302         /* Initialize work_queue with the "current action" work */
1303         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1304         while (!work_queue.empty() && !has_asserted()) {
1305                 WorkQueueEntry work = work_queue.front();
1306                 work_queue.pop_front();
1307
1308                 switch (work.type) {
1309                 case WORK_CHECK_CURR_ACTION: {
1310                         ModelAction *act = work.action;
1311                         bool update = false; /* update this location's release seq's */
1312                         bool update_all = false; /* update all release seq's */
1313
1314                         if (process_thread_action(curr))
1315                                 update_all = true;
1316
1317                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1318                                 update = true;
1319
1320                         if (act->is_write() && process_write(act, &work_queue))
1321                                 update = true;
1322
1323                         if (act->is_fence() && process_fence(act))
1324                                 update_all = true;
1325
1326                         if (act->is_mutex_op() && process_mutex(act))
1327                                 update_all = true;
1328
1329                         if (act->is_relseq_fixup())
1330                                 process_relseq_fixup(curr, &work_queue);
1331
1332                         if (update_all)
1333                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1334                         else if (update)
1335                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1336                         break;
1337                 }
1338                 case WORK_CHECK_RELEASE_SEQ:
1339                         resolve_release_sequences(work.location, &work_queue);
1340                         break;
1341                 case WORK_CHECK_MO_EDGES: {
1342                         /** @todo Complete verification of work_queue */
1343                         ModelAction *act = work.action;
1344                         bool updated = false;
1345
1346                         if (act->is_read()) {
1347                                 const ModelAction *rf = act->get_reads_from();
1348                                 const Promise *promise = act->get_reads_from_promise();
1349                                 if (rf) {
1350                                         if (r_modification_order(act, rf))
1351                                                 updated = true;
1352                                         if (act->is_seqcst()) {
1353                                                 ModelAction *last_sc_write = get_last_seq_cst_write(act);
1354                                                 if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
1355                                                         set_bad_sc_read();
1356                                                 }
1357                                         }
1358                                 } else if (promise) {
1359                                         if (r_modification_order(act, promise))
1360                                                 updated = true;
1361                                 }
1362                         }
1363                         if (act->is_write()) {
1364                                 if (w_modification_order(act, NULL))
1365                                         updated = true;
1366                         }
1367                         mo_graph->commitChanges();
1368
1369                         if (updated)
1370                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1371                         break;
1372                 }
1373                 default:
1374                         ASSERT(false);
1375                         break;
1376                 }
1377         }
1378
1379         check_curr_backtracking(curr);
1380         set_backtracking(curr);
1381         return curr;
1382 }
1383
1384 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1385 {
1386         Node *currnode = curr->get_node();
1387         Node *parnode = currnode->get_parent();
1388
1389         if ((parnode && !parnode->backtrack_empty()) ||
1390                          !currnode->misc_empty() ||
1391                          !currnode->read_from_empty() ||
1392                          !currnode->promise_empty() ||
1393                          !currnode->relseq_break_empty()) {
1394                 set_latest_backtrack(curr);
1395         }
1396 }
1397
1398 bool ModelExecution::promises_expired() const
1399 {
1400         for (unsigned int i = 0; i < promises.size(); i++) {
1401                 Promise *promise = promises[i];
1402                 if (promise->get_expiration() < priv->used_sequence_numbers)
1403                         return true;
1404         }
1405         return false;
1406 }
1407
1408 /**
1409  * This is the strongest feasibility check available.
1410  * @return whether the current trace (partial or complete) must be a prefix of
1411  * a feasible trace.
1412  */
1413 bool ModelExecution::isfeasibleprefix() const
1414 {
1415         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1416 }
1417
1418 /**
1419  * Print disagnostic information about an infeasible execution
1420  * @param prefix A string to prefix the output with; if NULL, then a default
1421  * message prefix will be provided
1422  */
1423 void ModelExecution::print_infeasibility(const char *prefix) const
1424 {
1425         char buf[100];
1426         char *ptr = buf;
1427         if (mo_graph->checkForCycles())
1428                 ptr += sprintf(ptr, "[mo cycle]");
1429         if (priv->failed_promise || priv->hard_failed_promise)
1430                 ptr += sprintf(ptr, "[failed promise]");
1431         if (priv->too_many_reads)
1432                 ptr += sprintf(ptr, "[too many reads]");
1433         if (priv->no_valid_reads)
1434                 ptr += sprintf(ptr, "[no valid reads-from]");
1435         if (priv->bad_synchronization)
1436                 ptr += sprintf(ptr, "[bad sw ordering]");
1437         if (priv->bad_sc_read)
1438                 ptr += sprintf(ptr, "[bad sc read]");
1439         if (promises_expired())
1440                 ptr += sprintf(ptr, "[promise expired]");
1441         if (promises.size() != 0)
1442                 ptr += sprintf(ptr, "[unresolved promise]");
1443         if (ptr != buf)
1444                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
1445 }
1446
1447 /**
1448  * Returns whether the current completed trace is feasible, except for pending
1449  * release sequences.
1450  */
1451 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1452 {
1453         return !is_infeasible() && promises.size() == 0 && ! priv->failed_promise;
1454
1455 }
1456
1457 /**
1458  * Check if the current partial trace is infeasible. Does not check any
1459  * end-of-execution flags, which might rule out the execution. Thus, this is
1460  * useful only for ruling an execution as infeasible.
1461  * @return whether the current partial trace is infeasible.
1462  */
1463 bool ModelExecution::is_infeasible() const
1464 {
1465         return mo_graph->checkForCycles() ||
1466                 priv->no_valid_reads ||
1467                 priv->too_many_reads ||
1468                 priv->bad_synchronization ||
1469                 priv->bad_sc_read ||
1470                 priv->hard_failed_promise ||
1471                 promises_expired();
1472 }
1473
1474 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1475 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1476         ModelAction *lastread = get_last_action(act->get_tid());
1477         lastread->process_rmw(act);
1478         if (act->is_rmw()) {
1479                 if (lastread->get_reads_from())
1480                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1481                 else
1482                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1483                 mo_graph->commitChanges();
1484         }
1485         return lastread;
1486 }
1487
1488 /**
1489  * A helper function for ModelExecution::check_recency, to check if the current
1490  * thread is able to read from a different write/promise for 'params.maxreads'
1491  * number of steps and if that write/promise should become visible (i.e., is
1492  * ordered later in the modification order). This helps model memory liveness.
1493  *
1494  * @param curr The current action. Must be a read.
1495  * @param rf The write/promise from which we plan to read
1496  * @param other_rf The write/promise from which we may read
1497  * @return True if we were able to read from other_rf for params.maxreads steps
1498  */
1499 template <typename T, typename U>
1500 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1501 {
1502         /* Need a different write/promise */
1503         if (other_rf->equals(rf))
1504                 return false;
1505
1506         /* Only look for "newer" writes/promises */
1507         if (!mo_graph->checkReachable(rf, other_rf))
1508                 return false;
1509
1510         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1511         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1512         action_list_t::reverse_iterator rit = list->rbegin();
1513         ASSERT((*rit) == curr);
1514         /* Skip past curr */
1515         rit++;
1516
1517         /* Does this write/promise work for everyone? */
1518         for (int i = 0; i < params->maxreads; i++, rit++) {
1519                 ModelAction *act = *rit;
1520                 if (!act->may_read_from(other_rf))
1521                         return false;
1522         }
1523         return true;
1524 }
1525
1526 /**
1527  * Checks whether a thread has read from the same write or Promise for too many
1528  * times without seeing the effects of a later write/Promise.
1529  *
1530  * Basic idea:
1531  * 1) there must a different write/promise that we could read from,
1532  * 2) we must have read from the same write/promise in excess of maxreads times,
1533  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1534  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1535  *
1536  * If so, we decide that the execution is no longer feasible.
1537  *
1538  * @param curr The current action. Must be a read.
1539  * @param rf The ModelAction/Promise from which we might read.
1540  * @return True if the read should succeed; false otherwise
1541  */
1542 template <typename T>
1543 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1544 {
1545         if (!params->maxreads)
1546                 return true;
1547
1548         //NOTE: Next check is just optimization, not really necessary....
1549         if (curr->get_node()->get_read_from_past_size() +
1550                         curr->get_node()->get_read_from_promise_size() <= 1)
1551                 return true;
1552
1553         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1554         int tid = id_to_int(curr->get_tid());
1555         ASSERT(tid < (int)thrd_lists->size());
1556         action_list_t *list = &(*thrd_lists)[tid];
1557         action_list_t::reverse_iterator rit = list->rbegin();
1558         ASSERT((*rit) == curr);
1559         /* Skip past curr */
1560         rit++;
1561
1562         action_list_t::reverse_iterator ritcopy = rit;
1563         /* See if we have enough reads from the same value */
1564         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1565                 if (ritcopy == list->rend())
1566                         return true;
1567                 ModelAction *act = *ritcopy;
1568                 if (!act->is_read())
1569                         return true;
1570                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1571                         return true;
1572                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1573                         return true;
1574                 if (act->get_node()->get_read_from_past_size() +
1575                                 act->get_node()->get_read_from_promise_size() <= 1)
1576                         return true;
1577         }
1578         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1579                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1580                 if (should_read_instead(curr, rf, write))
1581                         return false; /* liveness failure */
1582         }
1583         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1584                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1585                 if (should_read_instead(curr, rf, promise))
1586                         return false; /* liveness failure */
1587         }
1588         return true;
1589 }
1590
1591 /**
1592  * @brief Updates the mo_graph with the constraints imposed from the current
1593  * read.
1594  *
1595  * Basic idea is the following: Go through each other thread and find
1596  * the last action that happened before our read.  Two cases:
1597  *
1598  * -# The action is a write: that write must either occur before
1599  * the write we read from or be the write we read from.
1600  * -# The action is a read: the write that that action read from
1601  * must occur before the write we read from or be the same write.
1602  *
1603  * @param curr The current action. Must be a read.
1604  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1605  * @return True if modification order edges were added; false otherwise
1606  */
1607 template <typename rf_type>
1608 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1609 {
1610         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1611         unsigned int i;
1612         bool added = false;
1613         ASSERT(curr->is_read());
1614
1615         /* Last SC fence in the current thread */
1616         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1617         ModelAction *last_sc_write = NULL;
1618         if (curr->is_seqcst())
1619                 last_sc_write = get_last_seq_cst_write(curr);
1620
1621         /* Iterate over all threads */
1622         for (i = 0; i < thrd_lists->size(); i++) {
1623                 /* Last SC fence in thread i */
1624                 ModelAction *last_sc_fence_thread_local = NULL;
1625                 if (int_to_id((int)i) != curr->get_tid())
1626                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1627
1628                 /* Last SC fence in thread i, before last SC fence in current thread */
1629                 ModelAction *last_sc_fence_thread_before = NULL;
1630                 if (last_sc_fence_local)
1631                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1632
1633                 /* Iterate over actions in thread, starting from most recent */
1634                 action_list_t *list = &(*thrd_lists)[i];
1635                 action_list_t::reverse_iterator rit;
1636                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1637                         ModelAction *act = *rit;
1638
1639                         /* Skip curr */
1640                         if (act == curr)
1641                                 continue;
1642                         /* Don't want to add reflexive edges on 'rf' */
1643                         if (act->equals(rf)) {
1644                                 if (act->happens_before(curr))
1645                                         break;
1646                                 else
1647                                         continue;
1648                         }
1649
1650                         if (act->is_write()) {
1651                                 /* C++, Section 29.3 statement 5 */
1652                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1653                                                 *act < *last_sc_fence_thread_local) {
1654                                         added = mo_graph->addEdge(act, rf) || added;
1655                                         break;
1656                                 }
1657                                 /* C++, Section 29.3 statement 4 */
1658                                 else if (act->is_seqcst() && last_sc_fence_local &&
1659                                                 *act < *last_sc_fence_local) {
1660                                         added = mo_graph->addEdge(act, rf) || added;
1661                                         break;
1662                                 }
1663                                 /* C++, Section 29.3 statement 6 */
1664                                 else if (last_sc_fence_thread_before &&
1665                                                 *act < *last_sc_fence_thread_before) {
1666                                         added = mo_graph->addEdge(act, rf) || added;
1667                                         break;
1668                                 }
1669                         }
1670
1671                         /*
1672                          * Include at most one act per-thread that "happens
1673                          * before" curr
1674                          */
1675                         if (act->happens_before(curr)) {
1676                                 if (act->is_write()) {
1677                                         added = mo_graph->addEdge(act, rf) || added;
1678                                 } else {
1679                                         const ModelAction *prevrf = act->get_reads_from();
1680                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1681                                         if (prevrf) {
1682                                                 if (!prevrf->equals(rf))
1683                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1684                                         } else if (!prevrf_promise->equals(rf)) {
1685                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1686                                         }
1687                                 }
1688                                 break;
1689                         }
1690                 }
1691         }
1692
1693         /*
1694          * All compatible, thread-exclusive promises must be ordered after any
1695          * concrete loads from the same thread
1696          */
1697         for (unsigned int i = 0; i < promises.size(); i++)
1698                 if (promises[i]->is_compatible_exclusive(curr))
1699                         added = mo_graph->addEdge(rf, promises[i]) || added;
1700
1701         return added;
1702 }
1703
1704 /**
1705  * Updates the mo_graph with the constraints imposed from the current write.
1706  *
1707  * Basic idea is the following: Go through each other thread and find
1708  * the lastest action that happened before our write.  Two cases:
1709  *
1710  * (1) The action is a write => that write must occur before
1711  * the current write
1712  *
1713  * (2) The action is a read => the write that that action read from
1714  * must occur before the current write.
1715  *
1716  * This method also handles two other issues:
1717  *
1718  * (I) Sequential Consistency: Making sure that if the current write is
1719  * seq_cst, that it occurs after the previous seq_cst write.
1720  *
1721  * (II) Sending the write back to non-synchronizing reads.
1722  *
1723  * @param curr The current action. Must be a write.
1724  * @param send_fv A vector for stashing reads to which we may pass our future
1725  * value. If NULL, then don't record any future values.
1726  * @return True if modification order edges were added; false otherwise
1727  */
1728 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1729 {
1730         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1731         unsigned int i;
1732         bool added = false;
1733         ASSERT(curr->is_write());
1734
1735         if (curr->is_seqcst()) {
1736                 /* We have to at least see the last sequentially consistent write,
1737                          so we are initialized. */
1738                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1739                 if (last_seq_cst != NULL) {
1740                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1741                 }
1742         }
1743
1744         /* Last SC fence in the current thread */
1745         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1746
1747         /* Iterate over all threads */
1748         for (i = 0; i < thrd_lists->size(); i++) {
1749                 /* Last SC fence in thread i, before last SC fence in current thread */
1750                 ModelAction *last_sc_fence_thread_before = NULL;
1751                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1752                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1753
1754                 /* Iterate over actions in thread, starting from most recent */
1755                 action_list_t *list = &(*thrd_lists)[i];
1756                 action_list_t::reverse_iterator rit;
1757                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1758                         ModelAction *act = *rit;
1759                         if (act == curr) {
1760                                 /*
1761                                  * 1) If RMW and it actually read from something, then we
1762                                  * already have all relevant edges, so just skip to next
1763                                  * thread.
1764                                  *
1765                                  * 2) If RMW and it didn't read from anything, we should
1766                                  * whatever edge we can get to speed up convergence.
1767                                  *
1768                                  * 3) If normal write, we need to look at earlier actions, so
1769                                  * continue processing list.
1770                                  */
1771                                 if (curr->is_rmw()) {
1772                                         if (curr->get_reads_from() != NULL)
1773                                                 break;
1774                                         else
1775                                                 continue;
1776                                 } else
1777                                         continue;
1778                         }
1779
1780                         /* C++, Section 29.3 statement 7 */
1781                         if (last_sc_fence_thread_before && act->is_write() &&
1782                                         *act < *last_sc_fence_thread_before) {
1783                                 added = mo_graph->addEdge(act, curr) || added;
1784                                 break;
1785                         }
1786
1787                         /*
1788                          * Include at most one act per-thread that "happens
1789                          * before" curr
1790                          */
1791                         if (act->happens_before(curr)) {
1792                                 /*
1793                                  * Note: if act is RMW, just add edge:
1794                                  *   act --mo--> curr
1795                                  * The following edge should be handled elsewhere:
1796                                  *   readfrom(act) --mo--> act
1797                                  */
1798                                 if (act->is_write())
1799                                         added = mo_graph->addEdge(act, curr) || added;
1800                                 else if (act->is_read()) {
1801                                         //if previous read accessed a null, just keep going
1802                                         if (act->get_reads_from() == NULL) {
1803                                                 added = mo_graph->addEdge(act->get_reads_from_promise(), curr) || added;
1804                                         } else
1805                                                 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1806                                 }
1807                                 break;
1808                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1809                                                      !act->same_thread(curr)) {
1810                                 /* We have an action that:
1811                                    (1) did not happen before us
1812                                    (2) is a read and we are a write
1813                                    (3) cannot synchronize with us
1814                                    (4) is in a different thread
1815                                    =>
1816                                    that read could potentially read from our write.  Note that
1817                                    these checks are overly conservative at this point, we'll
1818                                    do more checks before actually removing the
1819                                    pendingfuturevalue.
1820
1821                                  */
1822
1823                                 if (send_fv && thin_air_constraint_may_allow(curr, act) && check_coherence_promise(curr, act)) {
1824                                         if (!is_infeasible())
1825                                                 send_fv->push_back(act);
1826                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1827                                                 add_future_value(curr, act);
1828                                 }
1829                         }
1830                 }
1831         }
1832
1833         /*
1834          * All compatible, thread-exclusive promises must be ordered after any
1835          * concrete stores to the same thread, or else they can be merged with
1836          * this store later
1837          */
1838         for (unsigned int i = 0; i < promises.size(); i++)
1839                 if (promises[i]->is_compatible_exclusive(curr))
1840                         added = mo_graph->addEdge(curr, promises[i]) || added;
1841
1842         return added;
1843 }
1844
1845 //This procedure uses cohere to prune future values that are
1846 //guaranteed to generate a coherence violation.
1847 //
1848 //need to see if there is (1) a promise for thread write, (2)
1849 //the promise is sb before write, (3) the promise can only be
1850 //resolved by the thread read, and (4) the promise has same
1851 //location as read/write
1852
1853 bool ModelExecution::check_coherence_promise(const ModelAction * write, const ModelAction *read) {
1854         thread_id_t write_tid=write->get_tid();
1855         for(unsigned int i = promises.size(); i>0; i--) {
1856                 Promise *pr=promises[i-1];
1857                 if (!pr->same_location(write))
1858                         continue;
1859                 //the reading thread is the only thread that can resolve the promise
1860                 if (pr->get_num_was_available_threads()==1 && pr->thread_was_available(read->get_tid())) {
1861                         for(unsigned int j=0;j<pr->get_num_readers();j++) {
1862                                 ModelAction *prreader=pr->get_reader(j);
1863                                 //the writing thread reads from the promise before the write
1864                                 if (prreader->get_tid()==write_tid &&
1865                                                 (*prreader)<(*write)) {
1866                                         if ((*read)>(*prreader)) {
1867                                                 //check that we don't have a read between the read and promise
1868                                                 //from the same thread as read
1869                                                 bool okay=false;
1870                                                 for(const ModelAction *tmp=read;tmp!=prreader;) {
1871                                                         tmp=tmp->get_node()->get_parent()->get_action();
1872                                                         if (tmp->is_read() && tmp->same_thread(read)) {
1873                                                                 okay=true;
1874                                                                 break;
1875                                                         }
1876                                                 }
1877                                                 if (okay)
1878                                                         continue;
1879                                         }
1880                                         return false;
1881                                 }
1882                         }
1883                 }
1884         }
1885         return true;
1886 }
1887
1888
1889 /** Arbitrary reads from the future are not allowed.  Section 29.3
1890  * part 9 places some constraints.  This method checks one result of constraint
1891  * constraint.  Others require compiler support. */
1892 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1893 {
1894         if (!writer->is_rmw())
1895                 return true;
1896
1897         if (!reader->is_rmw())
1898                 return true;
1899
1900         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1901                 if (search == reader)
1902                         return false;
1903                 if (search->get_tid() == reader->get_tid() &&
1904                                 search->happens_before(reader))
1905                         break;
1906         }
1907
1908         return true;
1909 }
1910
1911 /**
1912  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1913  * some constraints. This method checks one the following constraint (others
1914  * require compiler support):
1915  *
1916  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1917  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1918  */
1919 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1920 {
1921         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1922         unsigned int i;
1923         /* Iterate over all threads */
1924         for (i = 0; i < thrd_lists->size(); i++) {
1925                 const ModelAction *write_after_read = NULL;
1926
1927                 /* Iterate over actions in thread, starting from most recent */
1928                 action_list_t *list = &(*thrd_lists)[i];
1929                 action_list_t::reverse_iterator rit;
1930                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1931                         ModelAction *act = *rit;
1932
1933                         /* Don't disallow due to act == reader */
1934                         if (!reader->happens_before(act) || reader == act)
1935                                 break;
1936                         else if (act->is_write())
1937                                 write_after_read = act;
1938                         else if (act->is_read() && act->get_reads_from() != NULL)
1939                                 write_after_read = act->get_reads_from();
1940                 }
1941
1942                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1943                         return false;
1944         }
1945         return true;
1946 }
1947
1948 /**
1949  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1950  * The ModelAction under consideration is expected to be taking part in
1951  * release/acquire synchronization as an object of the "reads from" relation.
1952  * Note that this can only provide release sequence support for RMW chains
1953  * which do not read from the future, as those actions cannot be traced until
1954  * their "promise" is fulfilled. Similarly, we may not even establish the
1955  * presence of a release sequence with certainty, as some modification order
1956  * constraints may be decided further in the future. Thus, this function
1957  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1958  * and a boolean representing certainty.
1959  *
1960  * @param rf The action that might be part of a release sequence. Must be a
1961  * write.
1962  * @param release_heads A pass-by-reference style return parameter. After
1963  * execution of this function, release_heads will contain the heads of all the
1964  * relevant release sequences, if any exists with certainty
1965  * @param pending A pass-by-reference style return parameter which is only used
1966  * when returning false (i.e., uncertain). Returns most information regarding
1967  * an uncertain release sequence, including any write operations that might
1968  * break the sequence.
1969  * @return true, if the ModelExecution is certain that release_heads is complete;
1970  * false otherwise
1971  */
1972 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1973                 rel_heads_list_t *release_heads,
1974                 struct release_seq *pending) const
1975 {
1976         /* Only check for release sequences if there are no cycles */
1977         if (mo_graph->checkForCycles())
1978                 return false;
1979
1980         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1981                 ASSERT(rf->is_write());
1982
1983                 if (rf->is_release())
1984                         release_heads->push_back(rf);
1985                 else if (rf->get_last_fence_release())
1986                         release_heads->push_back(rf->get_last_fence_release());
1987                 if (!rf->is_rmw())
1988                         break; /* End of RMW chain */
1989
1990                 /** @todo Need to be smarter here...  In the linux lock
1991                  * example, this will run to the beginning of the program for
1992                  * every acquire. */
1993                 /** @todo The way to be smarter here is to keep going until 1
1994                  * thread has a release preceded by an acquire and you've seen
1995                  *       both. */
1996
1997                 /* acq_rel RMW is a sufficient stopping condition */
1998                 if (rf->is_acquire() && rf->is_release())
1999                         return true; /* complete */
2000         };
2001         if (!rf) {
2002                 /* read from future: need to settle this later */
2003                 pending->rf = NULL;
2004                 return false; /* incomplete */
2005         }
2006
2007         if (rf->is_release())
2008                 return true; /* complete */
2009
2010         /* else relaxed write
2011          * - check for fence-release in the same thread (29.8, stmt. 3)
2012          * - check modification order for contiguous subsequence
2013          *   -> rf must be same thread as release */
2014
2015         const ModelAction *fence_release = rf->get_last_fence_release();
2016         /* Synchronize with a fence-release unconditionally; we don't need to
2017          * find any more "contiguous subsequence..." for it */
2018         if (fence_release)
2019                 release_heads->push_back(fence_release);
2020
2021         int tid = id_to_int(rf->get_tid());
2022         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
2023         action_list_t *list = &(*thrd_lists)[tid];
2024         action_list_t::const_reverse_iterator rit;
2025
2026         /* Find rf in the thread list */
2027         rit = std::find(list->rbegin(), list->rend(), rf);
2028         ASSERT(rit != list->rend());
2029
2030         /* Find the last {write,fence}-release */
2031         for (; rit != list->rend(); rit++) {
2032                 if (fence_release && *(*rit) < *fence_release)
2033                         break;
2034                 if ((*rit)->is_release())
2035                         break;
2036         }
2037         if (rit == list->rend()) {
2038                 /* No write-release in this thread */
2039                 return true; /* complete */
2040         } else if (fence_release && *(*rit) < *fence_release) {
2041                 /* The fence-release is more recent (and so, "stronger") than
2042                  * the most recent write-release */
2043                 return true; /* complete */
2044         } /* else, need to establish contiguous release sequence */
2045         ModelAction *release = *rit;
2046
2047         ASSERT(rf->same_thread(release));
2048
2049         pending->writes.clear();
2050
2051         bool certain = true;
2052         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2053                 if (id_to_int(rf->get_tid()) == (int)i)
2054                         continue;
2055                 list = &(*thrd_lists)[i];
2056
2057                 /* Can we ensure no future writes from this thread may break
2058                  * the release seq? */
2059                 bool future_ordered = false;
2060
2061                 ModelAction *last = get_last_action(int_to_id(i));
2062                 Thread *th = get_thread(int_to_id(i));
2063                 if ((last && rf->happens_before(last)) ||
2064                                 !is_enabled(th) ||
2065                                 th->is_complete())
2066                         future_ordered = true;
2067
2068                 ASSERT(!th->is_model_thread() || future_ordered);
2069
2070                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2071                         const ModelAction *act = *rit;
2072                         /* Reach synchronization -> this thread is complete */
2073                         if (act->happens_before(release))
2074                                 break;
2075                         if (rf->happens_before(act)) {
2076                                 future_ordered = true;
2077                                 continue;
2078                         }
2079
2080                         /* Only non-RMW writes can break release sequences */
2081                         if (!act->is_write() || act->is_rmw())
2082                                 continue;
2083
2084                         /* Check modification order */
2085                         if (mo_graph->checkReachable(rf, act)) {
2086                                 /* rf --mo--> act */
2087                                 future_ordered = true;
2088                                 continue;
2089                         }
2090                         if (mo_graph->checkReachable(act, release))
2091                                 /* act --mo--> release */
2092                                 break;
2093                         if (mo_graph->checkReachable(release, act) &&
2094                                       mo_graph->checkReachable(act, rf)) {
2095                                 /* release --mo-> act --mo--> rf */
2096                                 return true; /* complete */
2097                         }
2098                         /* act may break release sequence */
2099                         pending->writes.push_back(act);
2100                         certain = false;
2101                 }
2102                 if (!future_ordered)
2103                         certain = false; /* This thread is uncertain */
2104         }
2105
2106         if (certain) {
2107                 release_heads->push_back(release);
2108                 pending->writes.clear();
2109         } else {
2110                 pending->release = release;
2111                 pending->rf = rf;
2112         }
2113         return certain;
2114 }
2115
2116 /**
2117  * An interface for getting the release sequence head(s) with which a
2118  * given ModelAction must synchronize. This function only returns a non-empty
2119  * result when it can locate a release sequence head with certainty. Otherwise,
2120  * it may mark the internal state of the ModelExecution so that it will handle
2121  * the release sequence at a later time, causing @a acquire to update its
2122  * synchronization at some later point in execution.
2123  *
2124  * @param acquire The 'acquire' action that may synchronize with a release
2125  * sequence
2126  * @param read The read action that may read from a release sequence; this may
2127  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2128  * when 'acquire' is a fence-acquire)
2129  * @param release_heads A pass-by-reference return parameter. Will be filled
2130  * with the head(s) of the release sequence(s), if they exists with certainty.
2131  * @see ModelExecution::release_seq_heads
2132  */
2133 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2134                 ModelAction *read, rel_heads_list_t *release_heads)
2135 {
2136         const ModelAction *rf = read->get_reads_from();
2137         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2138         sequence->acquire = acquire;
2139         sequence->read = read;
2140
2141         if (!release_seq_heads(rf, release_heads, sequence)) {
2142                 /* add act to 'lazy checking' list */
2143                 pending_rel_seqs.push_back(sequence);
2144         } else {
2145                 snapshot_free(sequence);
2146         }
2147 }
2148
2149 /**
2150  * @brief Propagate a modified clock vector to actions later in the execution
2151  * order
2152  *
2153  * After an acquire operation lazily completes a release-sequence
2154  * synchronization, we must update all clock vectors for operations later than
2155  * the acquire in the execution order.
2156  *
2157  * @param acquire The ModelAction whose clock vector must be propagated
2158  * @param work The work queue to which we can add work items, if this
2159  * propagation triggers more updates (e.g., to the modification order)
2160  */
2161 void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
2162 {
2163         /* Re-check all pending release sequences */
2164         work->push_back(CheckRelSeqWorkEntry(NULL));
2165         /* Re-check read-acquire for mo_graph edges */
2166         work->push_back(MOEdgeWorkEntry(acquire));
2167
2168         /* propagate synchronization to later actions */
2169         action_list_t::reverse_iterator rit = action_trace.rbegin();
2170         for (; (*rit) != acquire; rit++) {
2171                 ModelAction *propagate = *rit;
2172                 if (acquire->happens_before(propagate)) {
2173                         synchronize(acquire, propagate);
2174                         /* Re-check 'propagate' for mo_graph edges */
2175                         work->push_back(MOEdgeWorkEntry(propagate));
2176                 }
2177         }
2178 }
2179
2180 /**
2181  * Attempt to resolve all stashed operations that might synchronize with a
2182  * release sequence for a given location. This implements the "lazy" portion of
2183  * determining whether or not a release sequence was contiguous, since not all
2184  * modification order information is present at the time an action occurs.
2185  *
2186  * @param location The location/object that should be checked for release
2187  * sequence resolutions. A NULL value means to check all locations.
2188  * @param work_queue The work queue to which to add work items as they are
2189  * generated
2190  * @return True if any updates occurred (new synchronization, new mo_graph
2191  * edges)
2192  */
2193 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2194 {
2195         bool updated = false;
2196         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2197         while (it != pending_rel_seqs.end()) {
2198                 struct release_seq *pending = *it;
2199                 ModelAction *acquire = pending->acquire;
2200                 const ModelAction *read = pending->read;
2201
2202                 /* Only resolve sequences on the given location, if provided */
2203                 if (location && read->get_location() != location) {
2204                         it++;
2205                         continue;
2206                 }
2207
2208                 const ModelAction *rf = read->get_reads_from();
2209                 rel_heads_list_t release_heads;
2210                 bool complete;
2211                 complete = release_seq_heads(rf, &release_heads, pending);
2212                 for (unsigned int i = 0; i < release_heads.size(); i++)
2213                         if (!acquire->has_synchronized_with(release_heads[i]))
2214                                 if (synchronize(release_heads[i], acquire))
2215                                         updated = true;
2216
2217                 if (updated) {
2218                         /* Propagate the changed clock vector */
2219                         propagate_clockvector(acquire, work_queue);
2220                 }
2221                 if (complete) {
2222                         it = pending_rel_seqs.erase(it);
2223                         snapshot_free(pending);
2224                 } else {
2225                         it++;
2226                 }
2227         }
2228
2229         // If we resolved promises or data races, see if we have realized a data race.
2230         checkDataRaces();
2231
2232         return updated;
2233 }
2234
2235 /**
2236  * Performs various bookkeeping operations for the current ModelAction. For
2237  * instance, adds action to the per-object, per-thread action vector and to the
2238  * action trace list of all thread actions.
2239  *
2240  * @param act is the ModelAction to add.
2241  */
2242 void ModelExecution::add_action_to_lists(ModelAction *act)
2243 {
2244         int tid = id_to_int(act->get_tid());
2245         ModelAction *uninit = NULL;
2246         int uninit_id = -1;
2247         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2248         if (list->empty() && act->is_atomic_var()) {
2249                 uninit = get_uninitialized_action(act);
2250                 uninit_id = id_to_int(uninit->get_tid());
2251                 list->push_front(uninit);
2252         }
2253         list->push_back(act);
2254
2255         action_trace.push_back(act);
2256         if (uninit)
2257                 action_trace.push_front(uninit);
2258
2259         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2260         if (tid >= (int)vec->size())
2261                 vec->resize(priv->next_thread_id);
2262         (*vec)[tid].push_back(act);
2263         if (uninit)
2264                 (*vec)[uninit_id].push_front(uninit);
2265
2266         if ((int)thrd_last_action.size() <= tid)
2267                 thrd_last_action.resize(get_num_threads());
2268         thrd_last_action[tid] = act;
2269         if (uninit)
2270                 thrd_last_action[uninit_id] = uninit;
2271
2272         if (act->is_fence() && act->is_release()) {
2273                 if ((int)thrd_last_fence_release.size() <= tid)
2274                         thrd_last_fence_release.resize(get_num_threads());
2275                 thrd_last_fence_release[tid] = act;
2276         }
2277
2278         if (act->is_wait()) {
2279                 void *mutex_loc = (void *) act->get_value();
2280                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2281
2282                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2283                 if (tid >= (int)vec->size())
2284                         vec->resize(priv->next_thread_id);
2285                 (*vec)[tid].push_back(act);
2286         }
2287 }
2288
2289 /**
2290  * @brief Get the last action performed by a particular Thread
2291  * @param tid The thread ID of the Thread in question
2292  * @return The last action in the thread
2293  */
2294 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2295 {
2296         int threadid = id_to_int(tid);
2297         if (threadid < (int)thrd_last_action.size())
2298                 return thrd_last_action[id_to_int(tid)];
2299         else
2300                 return NULL;
2301 }
2302
2303 /**
2304  * @brief Get the last fence release performed by a particular Thread
2305  * @param tid The thread ID of the Thread in question
2306  * @return The last fence release in the thread, if one exists; NULL otherwise
2307  */
2308 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2309 {
2310         int threadid = id_to_int(tid);
2311         if (threadid < (int)thrd_last_fence_release.size())
2312                 return thrd_last_fence_release[id_to_int(tid)];
2313         else
2314                 return NULL;
2315 }
2316
2317 /**
2318  * Gets the last memory_order_seq_cst write (in the total global sequence)
2319  * performed on a particular object (i.e., memory location), not including the
2320  * current action.
2321  * @param curr The current ModelAction; also denotes the object location to
2322  * check
2323  * @return The last seq_cst write
2324  */
2325 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2326 {
2327         void *location = curr->get_location();
2328         action_list_t *list = obj_map.get(location);
2329         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2330         action_list_t::reverse_iterator rit;
2331         for (rit = list->rbegin(); (*rit) != curr; rit++)
2332                 ;
2333         rit++; /* Skip past curr */
2334         for ( ; rit != list->rend(); rit++)
2335                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2336                         return *rit;
2337         return NULL;
2338 }
2339
2340 /**
2341  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2342  * performed in a particular thread, prior to a particular fence.
2343  * @param tid The ID of the thread to check
2344  * @param before_fence The fence from which to begin the search; if NULL, then
2345  * search for the most recent fence in the thread.
2346  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2347  */
2348 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2349 {
2350         /* All fences should have location FENCE_LOCATION */
2351         action_list_t *list = obj_map.get(FENCE_LOCATION);
2352
2353         if (!list)
2354                 return NULL;
2355
2356         action_list_t::reverse_iterator rit = list->rbegin();
2357
2358         if (before_fence) {
2359                 for (; rit != list->rend(); rit++)
2360                         if (*rit == before_fence)
2361                                 break;
2362
2363                 ASSERT(*rit == before_fence);
2364                 rit++;
2365         }
2366
2367         for (; rit != list->rend(); rit++)
2368                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2369                         return *rit;
2370         return NULL;
2371 }
2372
2373 /**
2374  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2375  * location). This function identifies the mutex according to the current
2376  * action, which is presumed to perform on the same mutex.
2377  * @param curr The current ModelAction; also denotes the object location to
2378  * check
2379  * @return The last unlock operation
2380  */
2381 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2382 {
2383         void *location = curr->get_location();
2384
2385         action_list_t *list = obj_map.get(location);
2386         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2387         action_list_t::reverse_iterator rit;
2388         for (rit = list->rbegin(); rit != list->rend(); rit++)
2389                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2390                         return *rit;
2391         return NULL;
2392 }
2393
2394 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2395 {
2396         ModelAction *parent = get_last_action(tid);
2397         if (!parent)
2398                 parent = get_thread(tid)->get_creation();
2399         return parent;
2400 }
2401
2402 /**
2403  * Returns the clock vector for a given thread.
2404  * @param tid The thread whose clock vector we want
2405  * @return Desired clock vector
2406  */
2407 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2408 {
2409         return get_parent_action(tid)->get_cv();
2410 }
2411
2412 /**
2413  * @brief Find the promise (if any) to resolve for the current action and
2414  * remove it from the pending promise vector
2415  * @param curr The current ModelAction. Should be a write.
2416  * @return The Promise to resolve, if any; otherwise NULL
2417  */
2418 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2419 {
2420         for (unsigned int i = 0; i < promises.size(); i++)
2421                 if (curr->get_node()->get_promise(i)) {
2422                         Promise *ret = promises[i];
2423                         promises.erase(promises.begin() + i);
2424                         return ret;
2425                 }
2426         return NULL;
2427 }
2428
2429 /**
2430  * Resolve a Promise with a current write.
2431  * @param write The ModelAction that is fulfilling Promises
2432  * @param promise The Promise to resolve
2433  * @param work The work queue, for adding new fixup work
2434  * @return True if the Promise was successfully resolved; false otherwise
2435  */
2436 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise,
2437                 work_queue_t *work)
2438 {
2439         ModelVector<ModelAction *> actions_to_check;
2440
2441         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2442                 ModelAction *read = promise->get_reader(i);
2443                 if (read_from(read, write)) {
2444                         /* Propagate the changed clock vector */
2445                         propagate_clockvector(read, work);
2446                 }
2447                 actions_to_check.push_back(read);
2448         }
2449         /* Make sure the promise's value matches the write's value */
2450         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2451         if (!mo_graph->resolvePromise(promise, write))
2452                 priv->hard_failed_promise = true;
2453
2454         /**
2455          * @todo  It is possible to end up in an inconsistent state, where a
2456          * "resolved" promise may still be referenced if
2457          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2458          *
2459          * Note that the inconsistency only matters when dumping mo_graph to
2460          * file.
2461          *
2462          * delete promise;
2463          */
2464
2465         //Check whether reading these writes has made threads unable to
2466         //resolve promises
2467         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2468                 ModelAction *read = actions_to_check[i];
2469                 mo_check_promises(read, true);
2470         }
2471
2472         return true;
2473 }
2474
2475 /**
2476  * Compute the set of promises that could potentially be satisfied by this
2477  * action. Note that the set computation actually appears in the Node, not in
2478  * ModelExecution.
2479  * @param curr The ModelAction that may satisfy promises
2480  */
2481 void ModelExecution::compute_promises(ModelAction *curr)
2482 {
2483         for (unsigned int i = 0; i < promises.size(); i++) {
2484                 Promise *promise = promises[i];
2485                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2486                         continue;
2487
2488                 bool satisfy = true;
2489                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2490                         const ModelAction *act = promise->get_reader(j);
2491                         if (act->happens_before(curr) ||
2492                                         act->could_synchronize_with(curr)) {
2493                                 satisfy = false;
2494                                 break;
2495                         }
2496                 }
2497                 if (satisfy)
2498                         curr->get_node()->set_promise(i);
2499         }
2500 }
2501
2502 /** Checks promises in response to change in ClockVector Threads. */
2503 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2504 {
2505         for (unsigned int i = 0; i < promises.size(); i++) {
2506                 Promise *promise = promises[i];
2507                 if (!promise->thread_is_available(tid))
2508                         continue;
2509                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2510                         const ModelAction *act = promise->get_reader(j);
2511                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2512                                         merge_cv->synchronized_since(act)) {
2513                                 if (promise->eliminate_thread(tid)) {
2514                                         /* Promise has failed */
2515                                         priv->failed_promise = true;
2516                                         return;
2517                                 }
2518                         }
2519                 }
2520         }
2521 }
2522
2523 void ModelExecution::check_promises_thread_disabled()
2524 {
2525         for (unsigned int i = 0; i < promises.size(); i++) {
2526                 Promise *promise = promises[i];
2527                 if (promise->has_failed()) {
2528                         priv->failed_promise = true;
2529                         return;
2530                 }
2531         }
2532 }
2533
2534 /**
2535  * @brief Checks promises in response to addition to modification order for
2536  * threads.
2537  *
2538  * We test whether threads are still available for satisfying promises after an
2539  * addition to our modification order constraints. Those that are unavailable
2540  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2541  * that promise has failed.
2542  *
2543  * @param act The ModelAction which updated the modification order
2544  * @param is_read_check Should be true if act is a read and we must check for
2545  * updates to the store from which it read (there is a distinction here for
2546  * RMW's, which are both a load and a store)
2547  */
2548 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2549 {
2550         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2551
2552         for (unsigned int i = 0; i < promises.size(); i++) {
2553                 Promise *promise = promises[i];
2554
2555                 // Is this promise on the same location?
2556                 if (!promise->same_location(write))
2557                         continue;
2558
2559                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2560                         const ModelAction *pread = promise->get_reader(j);
2561                         if (!pread->happens_before(act))
2562                                continue;
2563                         if (mo_graph->checkPromise(write, promise)) {
2564                                 priv->hard_failed_promise = true;
2565                                 return;
2566                         }
2567                         break;
2568                 }
2569
2570                 // Don't do any lookups twice for the same thread
2571                 if (!promise->thread_is_available(act->get_tid()))
2572                         continue;
2573
2574                 if (mo_graph->checkReachable(promise, write)) {
2575                         if (mo_graph->checkPromise(write, promise)) {
2576                                 priv->hard_failed_promise = true;
2577                                 return;
2578                         }
2579                 }
2580         }
2581 }
2582
2583 /**
2584  * Compute the set of writes that may break the current pending release
2585  * sequence. This information is extracted from previou release sequence
2586  * calculations.
2587  *
2588  * @param curr The current ModelAction. Must be a release sequence fixup
2589  * action.
2590  */
2591 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2592 {
2593         if (pending_rel_seqs.empty())
2594                 return;
2595
2596         struct release_seq *pending = pending_rel_seqs.back();
2597         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2598                 const ModelAction *write = pending->writes[i];
2599                 curr->get_node()->add_relseq_break(write);
2600         }
2601
2602         /* NULL means don't break the sequence; just synchronize */
2603         curr->get_node()->add_relseq_break(NULL);
2604 }
2605
2606 /**
2607  * Build up an initial set of all past writes that this 'read' action may read
2608  * from, as well as any previously-observed future values that must still be valid.
2609  *
2610  * @param curr is the current ModelAction that we are exploring; it must be a
2611  * 'read' operation.
2612  */
2613 void ModelExecution::build_may_read_from(ModelAction *curr)
2614 {
2615         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2616         unsigned int i;
2617         ASSERT(curr->is_read());
2618
2619         ModelAction *last_sc_write = NULL;
2620
2621         if (curr->is_seqcst())
2622                 last_sc_write = get_last_seq_cst_write(curr);
2623
2624         /* Iterate over all threads */
2625         for (i = 0; i < thrd_lists->size(); i++) {
2626                 /* Iterate over actions in thread, starting from most recent */
2627                 action_list_t *list = &(*thrd_lists)[i];
2628                 action_list_t::reverse_iterator rit;
2629                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2630                         ModelAction *act = *rit;
2631
2632                         /* Only consider 'write' actions */
2633                         if (!act->is_write() || act == curr)
2634                                 continue;
2635
2636                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2637                         bool allow_read = true;
2638
2639                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2640                                 allow_read = false;
2641                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2642                                 allow_read = false;
2643
2644                         if (allow_read) {
2645                                 /* Only add feasible reads */
2646                                 mo_graph->startChanges();
2647                                 r_modification_order(curr, act);
2648                                 if (!is_infeasible())
2649                                         curr->get_node()->add_read_from_past(act);
2650                                 mo_graph->rollbackChanges();
2651                         }
2652
2653                         /* Include at most one act per-thread that "happens before" curr */
2654                         if (act->happens_before(curr))
2655                                 break;
2656                 }
2657         }
2658
2659         /* Inherit existing, promised future values */
2660         for (i = 0; i < promises.size(); i++) {
2661                 const Promise *promise = promises[i];
2662                 const ModelAction *promise_read = promise->get_reader(0);
2663                 if (promise_read->same_var(curr)) {
2664                         /* Only add feasible future-values */
2665                         mo_graph->startChanges();
2666                         r_modification_order(curr, promise);
2667                         if (!is_infeasible())
2668                                 curr->get_node()->add_read_from_promise(promise_read);
2669                         mo_graph->rollbackChanges();
2670                 }
2671         }
2672
2673         /* We may find no valid may-read-from only if the execution is doomed */
2674         if (!curr->get_node()->read_from_size()) {
2675                 priv->no_valid_reads = true;
2676                 set_assert();
2677         }
2678
2679         if (DBG_ENABLED()) {
2680                 model_print("Reached read action:\n");
2681                 curr->print();
2682                 model_print("Printing read_from_past\n");
2683                 curr->get_node()->print_read_from_past();
2684                 model_print("End printing read_from_past\n");
2685         }
2686 }
2687
2688 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2689 {
2690         for ( ; write != NULL; write = write->get_reads_from()) {
2691                 /* UNINIT actions don't have a Node, and they never sleep */
2692                 if (write->is_uninitialized())
2693                         return true;
2694                 Node *prevnode = write->get_node()->get_parent();
2695
2696                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2697                 if (write->is_release() && thread_sleep)
2698                         return true;
2699                 if (!write->is_rmw())
2700                         return false;
2701         }
2702         return true;
2703 }
2704
2705 /**
2706  * @brief Get an action representing an uninitialized atomic
2707  *
2708  * This function may create a new one or try to retrieve one from the NodeStack
2709  *
2710  * @param curr The current action, which prompts the creation of an UNINIT action
2711  * @return A pointer to the UNINIT ModelAction
2712  */
2713 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2714 {
2715         Node *node = curr->get_node();
2716         ModelAction *act = node->get_uninit_action();
2717         if (!act) {
2718                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2719                 node->set_uninit_action(act);
2720         }
2721         act->create_cv(NULL);
2722         return act;
2723 }
2724
2725 static void print_list(const action_list_t *list)
2726 {
2727         action_list_t::const_iterator it;
2728
2729         model_print("------------------------------------------------------------------------------------\n");
2730         model_print("#    t     Action type     MO       Location         Value               Rf  CV\n");
2731         model_print("------------------------------------------------------------------------------------\n");
2732
2733         unsigned int hash = 0;
2734
2735         for (it = list->begin(); it != list->end(); it++) {
2736                 const ModelAction *act = *it;
2737                 if (act->get_seq_number() > 0)
2738                         act->print();
2739                 hash = hash^(hash<<3)^((*it)->hash());
2740         }
2741         model_print("HASH %u\n", hash);
2742         model_print("------------------------------------------------------------------------------------\n");
2743 }
2744
2745 #if SUPPORT_MOD_ORDER_DUMP
2746 void ModelExecution::dumpGraph(char *filename) const
2747 {
2748         char buffer[200];
2749         sprintf(buffer, "%s.dot", filename);
2750         FILE *file = fopen(buffer, "w");
2751         fprintf(file, "digraph %s {\n", filename);
2752         mo_graph->dumpNodes(file);
2753         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2754
2755         for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2756                 ModelAction *act = *it;
2757                 if (act->is_read()) {
2758                         mo_graph->dot_print_node(file, act);
2759                         if (act->get_reads_from())
2760                                 mo_graph->dot_print_edge(file,
2761                                                 act->get_reads_from(),
2762                                                 act,
2763                                                 "label=\"rf\", color=red, weight=2");
2764                         else
2765                                 mo_graph->dot_print_edge(file,
2766                                                 act->get_reads_from_promise(),
2767                                                 act,
2768                                                 "label=\"rf\", color=red");
2769                 }
2770                 if (thread_array[act->get_tid()]) {
2771                         mo_graph->dot_print_edge(file,
2772                                         thread_array[id_to_int(act->get_tid())],
2773                                         act,
2774                                         "label=\"sb\", color=blue, weight=400");
2775                 }
2776
2777                 thread_array[act->get_tid()] = act;
2778         }
2779         fprintf(file, "}\n");
2780         model_free(thread_array);
2781         fclose(file);
2782 }
2783 #endif
2784
2785 /** @brief Prints an execution trace summary. */
2786 void ModelExecution::print_summary() const
2787 {
2788 #if SUPPORT_MOD_ORDER_DUMP
2789         char buffername[100];
2790         sprintf(buffername, "exec%04u", get_execution_number());
2791         mo_graph->dumpGraphToFile(buffername);
2792         sprintf(buffername, "graph%04u", get_execution_number());
2793         dumpGraph(buffername);
2794 #endif
2795
2796         model_print("Execution trace %d:", get_execution_number());
2797         if (isfeasibleprefix()) {
2798                 if (is_yieldblocked())
2799                         model_print(" YIELD BLOCKED");
2800                 if (scheduler->all_threads_sleeping())
2801                         model_print(" SLEEP-SET REDUNDANT");
2802                 if (have_bug_reports())
2803                         model_print(" DETECTED BUG(S)");
2804         } else
2805                 print_infeasibility(" INFEASIBLE");
2806         model_print("\n");
2807
2808         print_list(&action_trace);
2809         model_print("\n");
2810
2811         if (!promises.empty()) {
2812                 model_print("Pending promises:\n");
2813                 for (unsigned int i = 0; i < promises.size(); i++) {
2814                         model_print(" [P%u] ", i);
2815                         promises[i]->print();
2816                 }
2817                 model_print("\n");
2818         }
2819 }
2820
2821 /**
2822  * Add a Thread to the system for the first time. Should only be called once
2823  * per thread.
2824  * @param t The Thread to add
2825  */
2826 void ModelExecution::add_thread(Thread *t)
2827 {
2828         unsigned int i = id_to_int(t->get_id());
2829         if (i >= thread_map.size())
2830                 thread_map.resize(i + 1);
2831         thread_map[i] = t;
2832         if (!t->is_model_thread())
2833                 scheduler->add_thread(t);
2834 }
2835
2836 /**
2837  * @brief Get a Thread reference by its ID
2838  * @param tid The Thread's ID
2839  * @return A Thread reference
2840  */
2841 Thread * ModelExecution::get_thread(thread_id_t tid) const
2842 {
2843         unsigned int i = id_to_int(tid);
2844         if (i < thread_map.size())
2845                 return thread_map[i];
2846         return NULL;
2847 }
2848
2849 /**
2850  * @brief Get a reference to the Thread in which a ModelAction was executed
2851  * @param act The ModelAction
2852  * @return A Thread reference
2853  */
2854 Thread * ModelExecution::get_thread(const ModelAction *act) const
2855 {
2856         return get_thread(act->get_tid());
2857 }
2858
2859 /**
2860  * @brief Get a Thread reference by its pthread ID
2861  * @param index The pthread's ID
2862  * @return A Thread reference
2863  */
2864 Thread * ModelExecution::get_pthread(pthread_t pid) {
2865         if (pid < pthread_counter + 1) return pthread_map[pid];
2866         else return NULL;
2867 }
2868
2869 /**
2870  * @brief Get a Promise's "promise number"
2871  *
2872  * A "promise number" is an index number that is unique to a promise, valid
2873  * only for a specific snapshot of an execution trace. Promises may come and go
2874  * as they are generated an resolved, so an index only retains meaning for the
2875  * current snapshot.
2876  *
2877  * @param promise The Promise to check
2878  * @return The promise index, if the promise still is valid; otherwise -1
2879  */
2880 int ModelExecution::get_promise_number(const Promise *promise) const
2881 {
2882         for (unsigned int i = 0; i < promises.size(); i++)
2883                 if (promises[i] == promise)
2884                         return i;
2885         /* Not found */
2886         return -1;
2887 }
2888
2889 /**
2890  * @brief Check if a Thread is currently enabled
2891  * @param t The Thread to check
2892  * @return True if the Thread is currently enabled
2893  */
2894 bool ModelExecution::is_enabled(Thread *t) const
2895 {
2896         return scheduler->is_enabled(t);
2897 }
2898
2899 /**
2900  * @brief Check if a Thread is currently enabled
2901  * @param tid The ID of the Thread to check
2902  * @return True if the Thread is currently enabled
2903  */
2904 bool ModelExecution::is_enabled(thread_id_t tid) const
2905 {
2906         return scheduler->is_enabled(tid);
2907 }
2908
2909 /**
2910  * @brief Select the next thread to execute based on the curren action
2911  *
2912  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2913  * actions should be followed by the execution of their child thread. In either
2914  * case, the current action should determine the next thread schedule.
2915  *
2916  * @param curr The current action
2917  * @return The next thread to run, if the current action will determine this
2918  * selection; otherwise NULL
2919  */
2920 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2921 {
2922         /* Do not split atomic RMW */
2923         if (curr->is_rmwr())
2924                 return get_thread(curr);
2925         if (curr->is_write()) {
2926 //              std::memory_order order = curr->get_mo(); 
2927 //              switch(order) {
2928 //                      case std::memory_order_relaxed: 
2929 //                              return get_thread(curr);
2930 //                      case std::memory_order_release:
2931 //                              return get_thread(curr);
2932 //                      defalut:
2933 //                              return NULL;
2934 //              }       
2935                 return NULL;
2936         }
2937
2938         /* Follow CREATE with the created thread */
2939         /* which is not needed, because model.cc takes care of this */
2940         if (curr->get_type() == THREAD_CREATE)
2941                 return curr->get_thread_operand(); 
2942         if (curr->get_type() == PTHREAD_CREATE) {
2943                 return curr->get_thread_operand();
2944         }
2945         return NULL;
2946 }
2947
2948 /** @return True if the execution has taken too many steps */
2949 bool ModelExecution::too_many_steps() const
2950 {
2951         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2952 }
2953
2954 /**
2955  * Takes the next step in the execution, if possible.
2956  * @param curr The current step to take
2957  * @return Returns the next Thread to run, if any; NULL if this execution
2958  * should terminate
2959  */
2960 Thread * ModelExecution::take_step(ModelAction *curr)
2961 {
2962         Thread *curr_thrd = get_thread(curr);
2963         ASSERT(curr_thrd->get_state() == THREAD_READY);
2964
2965         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2966         curr = check_current_action(curr);
2967         ASSERT(curr);
2968
2969         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2970                 scheduler->remove_thread(curr_thrd);
2971
2972         return action_select_next_thread(curr);
2973 }
2974
2975 /**
2976  * Launch end-of-execution release sequence fixups only when
2977  * the execution is otherwise feasible AND there are:
2978  *
2979  * (1) pending release sequences
2980  * (2) pending assertions that could be invalidated by a change
2981  * in clock vectors (i.e., data races)
2982  * (3) no pending promises
2983  */
2984 void ModelExecution::fixup_release_sequences()
2985 {
2986         while (!pending_rel_seqs.empty() &&
2987                         is_feasible_prefix_ignore_relseq() &&
2988                         haveUnrealizedRaces()) {
2989                 model_print("*** WARNING: release sequence fixup action "
2990                                 "(%zu pending release seuqence(s)) ***\n",
2991                                 pending_rel_seqs.size());
2992                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2993                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2994                                 model_thread);
2995                 take_step(fixup);
2996         };
2997 }