add support for pthread_create (in progress)
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "execution.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
19
20 #define INITIAL_THREAD_ID       0
21
22 /**
23  * Structure for holding small ModelChecker members that should be snapshotted
24  */
25 struct model_snapshot_members {
26         model_snapshot_members() :
27                 /* First thread created will have id INITIAL_THREAD_ID */
28                 next_thread_id(INITIAL_THREAD_ID),
29                 used_sequence_numbers(0),
30                 next_backtrack(NULL),
31                 bugs(),
32                 failed_promise(false),
33                 hard_failed_promise(false),
34                 too_many_reads(false),
35                 no_valid_reads(false),
36                 bad_synchronization(false),
37                 bad_sc_read(false),
38                 asserted(false)
39         { }
40
41         ~model_snapshot_members() {
42                 for (unsigned int i = 0; i < bugs.size(); i++)
43                         delete bugs[i];
44                 bugs.clear();
45         }
46
47         unsigned int next_thread_id;
48         modelclock_t used_sequence_numbers;
49         ModelAction *next_backtrack;
50         SnapVector<bug_message *> bugs;
51         bool failed_promise;
52         bool hard_failed_promise;
53         bool too_many_reads;
54         bool no_valid_reads;
55         /** @brief Incorrectly-ordered synchronization was made */
56         bool bad_synchronization;
57         bool bad_sc_read;
58         bool asserted;
59
60         SNAPSHOTALLOC
61 };
62
63 /** @brief Constructor */
64 ModelExecution::ModelExecution(ModelChecker *m,
65                 const struct model_params *params,
66                 Scheduler *scheduler,
67                 NodeStack *node_stack) :
68         model(m),
69         params(params),
70         scheduler(scheduler),
71         action_trace(),
72         thread_map(2), /* We'll always need at least 2 threads */
73         obj_map(),
74         condvar_waiters_map(),
75         obj_thrd_map(),
76         promises(),
77         futurevalues(),
78         pending_rel_seqs(),
79         thrd_last_action(1),
80         thrd_last_fence_release(),
81         node_stack(node_stack),
82         priv(new struct model_snapshot_members()),
83         mo_graph(new CycleGraph())
84 {
85         /* Initialize a model-checker thread, for special ModelActions */
86         model_thread = new Thread(get_next_id());       // L: Create model thread
87         add_thread(model_thread);                       // L: Add model thread to scheduler
88         scheduler->register_engine(this);
89         node_stack->register_engine(this);
90 }
91
92 /** @brief Destructor */
93 ModelExecution::~ModelExecution()
94 {
95         for (unsigned int i = 0; i < get_num_threads(); i++)
96                 delete get_thread(int_to_id(i));
97
98         for (unsigned int i = 0; i < promises.size(); i++)
99                 delete promises[i];
100
101         delete mo_graph;
102         delete priv;
103 }
104
105 int ModelExecution::get_execution_number() const
106 {
107         return model->get_execution_number();
108 }
109
110 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
111 {
112         action_list_t *tmp = hash->get(ptr);
113         if (tmp == NULL) {
114                 tmp = new action_list_t();
115                 hash->put(ptr, tmp);
116         }
117         return tmp;
118 }
119
120 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
121 {
122         SnapVector<action_list_t> *tmp = hash->get(ptr);
123         if (tmp == NULL) {
124                 tmp = new SnapVector<action_list_t>();
125                 hash->put(ptr, tmp);
126         }
127         return tmp;
128 }
129
130 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
131 {
132         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
133         if (wrv==NULL)
134                 return NULL;
135         unsigned int thread=id_to_int(tid);
136         if (thread < wrv->size())
137                 return &(*wrv)[thread];
138         else
139                 return NULL;
140 }
141
142 /** @return a thread ID for a new Thread */
143 thread_id_t ModelExecution::get_next_id()
144 {
145         return priv->next_thread_id++;
146 }
147
148 /** @return the number of user threads created during this execution */
149 unsigned int ModelExecution::get_num_threads() const
150 {
151         return priv->next_thread_id;
152 }
153
154 /** @return a sequence number for a new ModelAction */
155 modelclock_t ModelExecution::get_next_seq_num()
156 {
157         return ++priv->used_sequence_numbers;
158 }
159
160 /**
161  * @brief Should the current action wake up a given thread?
162  *
163  * @param curr The current action
164  * @param thread The thread that we might wake up
165  * @return True, if we should wake up the sleeping thread; false otherwise
166  */
167 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
168 {
169         const ModelAction *asleep = thread->get_pending();
170         /* Don't allow partial RMW to wake anyone up */
171         if (curr->is_rmwr())
172                 return false;
173         /* Synchronizing actions may have been backtracked */
174         if (asleep->could_synchronize_with(curr))
175                 return true;
176         /* All acquire/release fences and fence-acquire/store-release */
177         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
178                 return true;
179         /* Fence-release + store can awake load-acquire on the same location */
180         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
181                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
182                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
183                         return true;
184         }
185         return false;
186 }
187
188 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
189 {
190         for (unsigned int i = 0; i < get_num_threads(); i++) {
191                 Thread *thr = get_thread(int_to_id(i));
192                 if (scheduler->is_sleep_set(thr)) {
193                         if (should_wake_up(curr, thr))
194                                 /* Remove this thread from sleep set */
195                                 scheduler->remove_sleep(thr);
196                 }
197         }
198 }
199
200 /** @brief Alert the model-checker that an incorrectly-ordered
201  * synchronization was made */
202 void ModelExecution::set_bad_synchronization()
203 {
204         priv->bad_synchronization = true;
205 }
206
207 /** @brief Alert the model-checker that an incorrectly-ordered
208  * synchronization was made */
209 void ModelExecution::set_bad_sc_read()
210 {
211         priv->bad_sc_read = true;
212 }
213
214 bool ModelExecution::assert_bug(const char *msg)
215 {
216         priv->bugs.push_back(new bug_message(msg));
217
218         if (isfeasibleprefix()) {
219                 set_assert();
220                 return true;
221         }
222         return false;
223 }
224
225 /** @return True, if any bugs have been reported for this execution */
226 bool ModelExecution::have_bug_reports() const
227 {
228         return priv->bugs.size() != 0;
229 }
230
231 SnapVector<bug_message *> * ModelExecution::get_bugs() const
232 {
233         return &priv->bugs;
234 }
235
236 /**
237  * Check whether the current trace has triggered an assertion which should halt
238  * its execution.
239  *
240  * @return True, if the execution should be aborted; false otherwise
241  */
242 bool ModelExecution::has_asserted() const
243 {
244         return priv->asserted;
245 }
246
247 /**
248  * Trigger a trace assertion which should cause this execution to be halted.
249  * This can be due to a detected bug or due to an infeasibility that should
250  * halt ASAP.
251  */
252 void ModelExecution::set_assert()
253 {
254         priv->asserted = true;
255 }
256
257 /**
258  * Check if we are in a deadlock. Should only be called at the end of an
259  * execution, although it should not give false positives in the middle of an
260  * execution (there should be some ENABLED thread).
261  *
262  * @return True if program is in a deadlock; false otherwise
263  */
264 bool ModelExecution::is_deadlocked() const
265 {
266         bool blocking_threads = false;
267         for (unsigned int i = 0; i < get_num_threads(); i++) {
268                 thread_id_t tid = int_to_id(i);
269                 if (is_enabled(tid))
270                         return false;
271                 Thread *t = get_thread(tid);
272                 if (!t->is_model_thread() && t->get_pending())
273                         blocking_threads = true;
274         }
275         return blocking_threads;
276 }
277
278 /**
279  * @brief Check if we are yield-blocked
280  *
281  * A program can be "yield-blocked" if all threads are ready to execute a
282  * yield.
283  *
284  * @return True if the program is yield-blocked; false otherwise
285  */
286 bool ModelExecution::is_yieldblocked() const
287 {
288         if (!params->yieldblock)
289                 return false;
290
291         for (unsigned int i = 0; i < get_num_threads(); i++) {
292                 thread_id_t tid = int_to_id(i);
293                 Thread *t = get_thread(tid);
294                 if (t->get_pending() && t->get_pending()->is_yield())
295                         return true;
296         }
297         return false;
298 }
299
300 /**
301  * Check if this is a complete execution. That is, have all thread completed
302  * execution (rather than exiting because sleep sets have forced a redundant
303  * execution).
304  *
305  * @return True if the execution is complete.
306  */
307 bool ModelExecution::is_complete_execution() const
308 {
309         if (is_yieldblocked())
310                 return false;
311         for (unsigned int i = 0; i < get_num_threads(); i++)
312                 if (is_enabled(int_to_id(i)))
313                         return false;
314         return true;
315 }
316
317 /**
318  * @brief Find the last fence-related backtracking conflict for a ModelAction
319  *
320  * This function performs the search for the most recent conflicting action
321  * against which we should perform backtracking, as affected by fence
322  * operations. This includes pairs of potentially-synchronizing actions which
323  * occur due to fence-acquire or fence-release, and hence should be explored in
324  * the opposite execution order.
325  *
326  * @param act The current action
327  * @return The most recent action which conflicts with act due to fences
328  */
329 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
330 {
331         /* Only perform release/acquire fence backtracking for stores */
332         if (!act->is_write())
333                 return NULL;
334
335         /* Find a fence-release (or, act is a release) */
336         ModelAction *last_release;
337         if (act->is_release())
338                 last_release = act;
339         else
340                 last_release = get_last_fence_release(act->get_tid());
341         if (!last_release)
342                 return NULL;
343
344         /* Skip past the release */
345         const action_list_t *list = &action_trace;
346         action_list_t::const_reverse_iterator rit;
347         for (rit = list->rbegin(); rit != list->rend(); rit++)
348                 if (*rit == last_release)
349                         break;
350         ASSERT(rit != list->rend());
351
352         /* Find a prior:
353          *   load-acquire
354          * or
355          *   load --sb-> fence-acquire */
356         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
357         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
358         bool found_acquire_fences = false;
359         for ( ; rit != list->rend(); rit++) {
360                 ModelAction *prev = *rit;
361                 if (act->same_thread(prev))
362                         continue;
363
364                 int tid = id_to_int(prev->get_tid());
365
366                 if (prev->is_read() && act->same_var(prev)) {
367                         if (prev->is_acquire()) {
368                                 /* Found most recent load-acquire, don't need
369                                  * to search for more fences */
370                                 if (!found_acquire_fences)
371                                         return NULL;
372                         } else {
373                                 prior_loads[tid] = prev;
374                         }
375                 }
376                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
377                         found_acquire_fences = true;
378                         acquire_fences[tid] = prev;
379                 }
380         }
381
382         ModelAction *latest_backtrack = NULL;
383         for (unsigned int i = 0; i < acquire_fences.size(); i++)
384                 if (acquire_fences[i] && prior_loads[i])
385                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
386                                 latest_backtrack = acquire_fences[i];
387         return latest_backtrack;
388 }
389
390 /**
391  * @brief Find the last backtracking conflict for a ModelAction
392  *
393  * This function performs the search for the most recent conflicting action
394  * against which we should perform backtracking. This primary includes pairs of
395  * synchronizing actions which should be explored in the opposite execution
396  * order.
397  *
398  * @param act The current action
399  * @return The most recent action which conflicts with act
400  */
401 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
402 {
403         switch (act->get_type()) {
404         case ATOMIC_FENCE:
405                 /* Only seq-cst fences can (directly) cause backtracking */
406                 if (!act->is_seqcst())
407                         break;
408         case ATOMIC_READ:
409         case ATOMIC_WRITE:
410         case ATOMIC_RMW: {
411                 ModelAction *ret = NULL;
412
413                 /* linear search: from most recent to oldest */
414                 action_list_t *list = obj_map.get(act->get_location());
415                 action_list_t::reverse_iterator rit;
416                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
417                         ModelAction *prev = *rit;
418                         if (prev == act)
419                                 continue;
420                         if (prev->could_synchronize_with(act)) {
421                                 ret = prev;
422                                 break;
423                         }
424                 }
425
426                 ModelAction *ret2 = get_last_fence_conflict(act);
427                 if (!ret2)
428                         return ret;
429                 if (!ret)
430                         return ret2;
431                 if (*ret < *ret2)
432                         return ret2;
433                 return ret;
434         }
435         case ATOMIC_LOCK:
436         case ATOMIC_TRYLOCK: {
437                 /* linear search: from most recent to oldest */
438                 action_list_t *list = obj_map.get(act->get_location());
439                 action_list_t::reverse_iterator rit;
440                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
441                         ModelAction *prev = *rit;
442                         if (act->is_conflicting_lock(prev))
443                                 return prev;
444                 }
445                 break;
446         }
447         case ATOMIC_UNLOCK: {
448                 /* linear search: from most recent to oldest */
449                 action_list_t *list = obj_map.get(act->get_location());
450                 action_list_t::reverse_iterator rit;
451                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
452                         ModelAction *prev = *rit;
453                         if (!act->same_thread(prev) && prev->is_failed_trylock())
454                                 return prev;
455                 }
456                 break;
457         }
458         case ATOMIC_WAIT: {
459                 /* linear search: from most recent to oldest */
460                 action_list_t *list = obj_map.get(act->get_location());
461                 action_list_t::reverse_iterator rit;
462                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
463                         ModelAction *prev = *rit;
464                         if (!act->same_thread(prev) && prev->is_failed_trylock())
465                                 return prev;
466                         if (!act->same_thread(prev) && prev->is_notify())
467                                 return prev;
468                 }
469                 break;
470         }
471
472         case ATOMIC_NOTIFY_ALL:
473         case ATOMIC_NOTIFY_ONE: {
474                 /* linear search: from most recent to oldest */
475                 action_list_t *list = obj_map.get(act->get_location());
476                 action_list_t::reverse_iterator rit;
477                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
478                         ModelAction *prev = *rit;
479                         if (!act->same_thread(prev) && prev->is_wait())
480                                 return prev;
481                 }
482                 break;
483         }
484         default:
485                 break;
486         }
487         return NULL;
488 }
489
490 /** This method finds backtracking points where we should try to
491  * reorder the parameter ModelAction against.
492  *
493  * @param the ModelAction to find backtracking points for.
494  */
495 void ModelExecution::set_backtracking(ModelAction *act)
496 {
497         Thread *t = get_thread(act);
498         ModelAction *prev = get_last_conflict(act);
499         if (prev == NULL)
500                 return;
501
502         Node *node = prev->get_node()->get_parent();
503
504         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
505         int low_tid, high_tid;
506         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
507                 low_tid = id_to_int(act->get_tid());
508                 high_tid = low_tid + 1;
509         } else {
510                 low_tid = 0;
511                 high_tid = get_num_threads();
512         }
513
514         for (int i = low_tid; i < high_tid; i++) {
515                 thread_id_t tid = int_to_id(i);
516
517                 /* Make sure this thread can be enabled here. */
518                 if (i >= node->get_num_threads())
519                         break;
520
521                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
522                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
523                 if (node->enabled_status(tid) != THREAD_ENABLED)
524                         continue;
525
526                 /* Check if this has been explored already */
527                 if (node->has_been_explored(tid))
528                         continue;
529
530                 /* See if fairness allows */
531                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
532                         bool unfair = false;
533                         for (int t = 0; t < node->get_num_threads(); t++) {
534                                 thread_id_t tother = int_to_id(t);
535                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
536                                         unfair = true;
537                                         break;
538                                 }
539                         }
540                         if (unfair)
541                                 continue;
542                 }
543
544                 /* See if CHESS-like yield fairness allows */
545                 if (params->yieldon) {
546                         bool unfair = false;
547                         for (int t = 0; t < node->get_num_threads(); t++) {
548                                 thread_id_t tother = int_to_id(t);
549                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
550                                         unfair = true;
551                                         break;
552                                 }
553                         }
554                         if (unfair)
555                                 continue;
556                 }
557
558                 /* Cache the latest backtracking point */
559                 set_latest_backtrack(prev);
560
561                 /* If this is a new backtracking point, mark the tree */
562                 if (!node->set_backtrack(tid))
563                         continue;
564                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
565                                         id_to_int(prev->get_tid()),
566                                         id_to_int(t->get_id()));
567                 if (DBG_ENABLED()) {
568                         prev->print();
569                         act->print();
570                 }
571         }
572 }
573
574 /**
575  * @brief Cache the a backtracking point as the "most recent", if eligible
576  *
577  * Note that this does not prepare the NodeStack for this backtracking
578  * operation, it only caches the action on a per-execution basis
579  *
580  * @param act The operation at which we should explore a different next action
581  * (i.e., backtracking point)
582  * @return True, if this action is now the most recent backtracking point;
583  * false otherwise
584  */
585 bool ModelExecution::set_latest_backtrack(ModelAction *act)
586 {
587         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
588                 priv->next_backtrack = act;
589                 return true;
590         }
591         return false;
592 }
593
594 /**
595  * Returns last backtracking point. The model checker will explore a different
596  * path for this point in the next execution.
597  * @return The ModelAction at which the next execution should diverge.
598  */
599 ModelAction * ModelExecution::get_next_backtrack()
600 {
601         ModelAction *next = priv->next_backtrack;
602         priv->next_backtrack = NULL;
603         return next;
604 }
605
606 /**
607  * Processes a read model action.
608  * @param curr is the read model action to process.
609  * @return True if processing this read updates the mo_graph.
610  */
611 bool ModelExecution::process_read(ModelAction *curr)
612 {
613         Node *node = curr->get_node();
614         while (true) {
615                 bool updated = false;
616                 switch (node->get_read_from_status()) {
617                 case READ_FROM_PAST: {
618                         const ModelAction *rf = node->get_read_from_past();
619                         ASSERT(rf);
620
621                         mo_graph->startChanges();
622
623                         ASSERT(!is_infeasible());
624                         if (!check_recency(curr, rf)) {
625                                 if (node->increment_read_from()) {
626                                         mo_graph->rollbackChanges();
627                                         continue;
628                                 } else {
629                                         priv->too_many_reads = true;
630                                 }
631                         }
632
633                         updated = r_modification_order(curr, rf);
634                         read_from(curr, rf);
635                         mo_graph->commitChanges();
636                         mo_check_promises(curr, true);
637                         break;
638                 }
639                 case READ_FROM_PROMISE: {
640                         Promise *promise = curr->get_node()->get_read_from_promise();
641                         if (promise->add_reader(curr))
642                                 priv->failed_promise = true;
643                         curr->set_read_from_promise(promise);
644                         mo_graph->startChanges();
645                         if (!check_recency(curr, promise))
646                                 priv->too_many_reads = true;
647                         updated = r_modification_order(curr, promise);
648                         mo_graph->commitChanges();
649                         break;
650                 }
651                 case READ_FROM_FUTURE: {
652                         /* Read from future value */
653                         struct future_value fv = node->get_future_value();
654                         Promise *promise = new Promise(this, curr, fv);
655                         curr->set_read_from_promise(promise);
656                         promises.push_back(promise);
657                         mo_graph->startChanges();
658                         updated = r_modification_order(curr, promise);
659                         mo_graph->commitChanges();
660                         break;
661                 }
662                 default:
663                         ASSERT(false);
664                 }
665                 get_thread(curr)->set_return_value(curr->get_return_value());
666                 return updated;
667         }
668 }
669
670 /**
671  * Processes a lock, trylock, or unlock model action.  @param curr is
672  * the read model action to process.
673  *
674  * The try lock operation checks whether the lock is taken.  If not,
675  * it falls to the normal lock operation case.  If so, it returns
676  * fail.
677  *
678  * The lock operation has already been checked that it is enabled, so
679  * it just grabs the lock and synchronizes with the previous unlock.
680  *
681  * The unlock operation has to re-enable all of the threads that are
682  * waiting on the lock.
683  *
684  * @return True if synchronization was updated; false otherwise
685  */
686 bool ModelExecution::process_mutex(ModelAction *curr)
687 {
688         std::mutex *mutex = curr->get_mutex();
689         struct std::mutex_state *state = NULL;
690
691         if (mutex)
692                 state = mutex->get_state();
693
694         switch (curr->get_type()) {
695         case ATOMIC_TRYLOCK: {
696                 bool success = !state->locked;
697                 curr->set_try_lock(success);
698                 if (!success) {
699                         get_thread(curr)->set_return_value(0);
700                         break;
701                 }
702                 get_thread(curr)->set_return_value(1);
703         }
704                 //otherwise fall into the lock case
705         case ATOMIC_LOCK: {
706                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
707                         assert_bug("Lock access before initialization");
708                 state->locked = get_thread(curr);
709                 ModelAction *unlock = get_last_unlock(curr);
710                 //synchronize with the previous unlock statement
711                 if (unlock != NULL) {
712                         synchronize(unlock, curr);
713                         return true;
714                 }
715                 break;
716         }
717         case ATOMIC_WAIT:
718         case ATOMIC_UNLOCK: {
719                 /* wake up the other threads */
720                 for (unsigned int i = 0; i < get_num_threads(); i++) {
721                         Thread *t = get_thread(int_to_id(i));
722                         Thread *curr_thrd = get_thread(curr);
723                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
724                                 scheduler->wake(t);
725                 }
726
727                 /* unlock the lock - after checking who was waiting on it */
728                 state->locked = NULL;
729
730                 if (!curr->is_wait())
731                         break; /* The rest is only for ATOMIC_WAIT */
732
733                 /* Should we go to sleep? (simulate spurious failures) */
734                 if (curr->get_node()->get_misc() == 0) {
735                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
736                         /* disable us */
737                         scheduler->sleep(get_thread(curr));
738                 }
739                 break;
740         }
741         case ATOMIC_NOTIFY_ALL: {
742                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
743                 //activate all the waiting threads
744                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
745                         scheduler->wake(get_thread(*rit));
746                 }
747                 waiters->clear();
748                 break;
749         }
750         case ATOMIC_NOTIFY_ONE: {
751                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
752                 int wakeupthread = curr->get_node()->get_misc();
753                 action_list_t::iterator it = waiters->begin();
754                 advance(it, wakeupthread);
755                 scheduler->wake(get_thread(*it));
756                 waiters->erase(it);
757                 break;
758         }
759
760         default:
761                 ASSERT(0);
762         }
763         return false;
764 }
765
766 /**
767  * @brief Check if the current pending promises allow a future value to be sent
768  *
769  * It is unsafe to pass a future value back if there exists a pending promise Pr
770  * such that:
771  *
772  *    reader --exec-> Pr --exec-> writer
773  *
774  * If such Pr exists, we must save the pending future value until Pr is
775  * resolved.
776  *
777  * @param writer The operation which sends the future value. Must be a write.
778  * @param reader The operation which will observe the value. Must be a read.
779  * @return True if the future value can be sent now; false if it must wait.
780  */
781 bool ModelExecution::promises_may_allow(const ModelAction *writer,
782                 const ModelAction *reader) const
783 {
784         for (int i = promises.size() - 1; i >= 0; i--) {
785                 ModelAction *pr = promises[i]->get_reader(0);
786                 //reader is after promise...doesn't cross any promise
787                 if (*reader > *pr)
788                         return true;
789                 //writer is after promise, reader before...bad...
790                 if (*writer > *pr)
791                         return false;
792         }
793         return true;
794 }
795
796 /**
797  * @brief Add a future value to a reader
798  *
799  * This function performs a few additional checks to ensure that the future
800  * value can be feasibly observed by the reader
801  *
802  * @param writer The operation whose value is sent. Must be a write.
803  * @param reader The read operation which may read the future value. Must be a read.
804  */
805 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
806 {
807         /* Do more ambitious checks now that mo is more complete */
808         if (!mo_may_allow(writer, reader))
809                 return;
810
811         Node *node = reader->get_node();
812
813         /* Find an ancestor thread which exists at the time of the reader */
814         Thread *write_thread = get_thread(writer);
815         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
816                 write_thread = write_thread->get_parent();
817
818         struct future_value fv = {
819                 writer->get_write_value(),
820                 writer->get_seq_number() + params->maxfuturedelay,
821                 write_thread->get_id(),
822         };
823         if (node->add_future_value(fv))
824                 set_latest_backtrack(reader);
825 }
826
827 /**
828  * Process a write ModelAction
829  * @param curr The ModelAction to process
830  * @param work The work queue, for adding fixup work
831  * @return True if the mo_graph was updated or promises were resolved
832  */
833 bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
834 {
835         /* Readers to which we may send our future value */
836         ModelVector<ModelAction *> send_fv;
837
838         const ModelAction *earliest_promise_reader;
839         bool updated_promises = false;
840
841         bool updated_mod_order = w_modification_order(curr, &send_fv);
842         Promise *promise = pop_promise_to_resolve(curr);
843
844         if (promise) {
845                 earliest_promise_reader = promise->get_reader(0);
846                 updated_promises = resolve_promise(curr, promise, work);
847         } else
848                 earliest_promise_reader = NULL;
849
850         for (unsigned int i = 0; i < send_fv.size(); i++) {
851                 ModelAction *read = send_fv[i];
852
853                 /* Don't send future values to reads after the Promise we resolve */
854                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
855                         /* Check if future value can be sent immediately */
856                         if (promises_may_allow(curr, read)) {
857                                 add_future_value(curr, read);
858                         } else {
859                                 futurevalues.push_back(PendingFutureValue(curr, read));
860                         }
861                 }
862         }
863
864         /* Check the pending future values */
865         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
866                 struct PendingFutureValue pfv = futurevalues[i];
867                 if (promises_may_allow(pfv.writer, pfv.reader)) {
868                         add_future_value(pfv.writer, pfv.reader);
869                         futurevalues.erase(futurevalues.begin() + i);
870                 }
871         }
872
873         mo_graph->commitChanges();
874         mo_check_promises(curr, false);
875
876         get_thread(curr)->set_return_value(VALUE_NONE);
877         return updated_mod_order || updated_promises;
878 }
879
880 /**
881  * Process a fence ModelAction
882  * @param curr The ModelAction to process
883  * @return True if synchronization was updated
884  */
885 bool ModelExecution::process_fence(ModelAction *curr)
886 {
887         /*
888          * fence-relaxed: no-op
889          * fence-release: only log the occurence (not in this function), for
890          *   use in later synchronization
891          * fence-acquire (this function): search for hypothetical release
892          *   sequences
893          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
894          */
895         bool updated = false;
896         if (curr->is_acquire()) {
897                 action_list_t *list = &action_trace;
898                 action_list_t::reverse_iterator rit;
899                 /* Find X : is_read(X) && X --sb-> curr */
900                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
901                         ModelAction *act = *rit;
902                         if (act == curr)
903                                 continue;
904                         if (act->get_tid() != curr->get_tid())
905                                 continue;
906                         /* Stop at the beginning of the thread */
907                         if (act->is_thread_start())
908                                 break;
909                         /* Stop once we reach a prior fence-acquire */
910                         if (act->is_fence() && act->is_acquire())
911                                 break;
912                         if (!act->is_read())
913                                 continue;
914                         /* read-acquire will find its own release sequences */
915                         if (act->is_acquire())
916                                 continue;
917
918                         /* Establish hypothetical release sequences */
919                         rel_heads_list_t release_heads;
920                         get_release_seq_heads(curr, act, &release_heads);
921                         for (unsigned int i = 0; i < release_heads.size(); i++)
922                                 synchronize(release_heads[i], curr);
923                         if (release_heads.size() != 0)
924                                 updated = true;
925                 }
926         }
927         return updated;
928 }
929
930 /**
931  * @brief Process the current action for thread-related activity
932  *
933  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
934  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
935  * synchronization, etc.  This function is a no-op for non-THREAD actions
936  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
937  *
938  * @param curr The current action
939  * @return True if synchronization was updated or a thread completed
940  */
941 bool ModelExecution::process_thread_action(ModelAction *curr)
942 {
943         bool updated = false;
944
945         switch (curr->get_type()) {
946         case THREAD_CREATE: {
947                 thrd_t *thrd = (thrd_t *)curr->get_location();
948                 struct thread_params *params = (struct thread_params *)curr->get_value();
949                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
950                 add_thread(th);
951                 th->set_creation(curr);
952                 /* Promises can be satisfied by children */
953                 for (unsigned int i = 0; i < promises.size(); i++) {
954                         Promise *promise = promises[i];
955                         if (promise->thread_is_available(curr->get_tid()))
956                                 promise->add_thread(th->get_id());
957                 }
958                 break;
959         }
960         case THREAD_JOIN: {
961                 Thread *blocking = curr->get_thread_operand();
962                 ModelAction *act = get_last_action(blocking->get_id());
963                 synchronize(act, curr);
964                 updated = true; /* trigger rel-seq checks */
965                 break;
966         }
967         case THREAD_FINISH: {
968                 Thread *th = get_thread(curr);
969                 /* Wake up any joining threads */
970                 for (unsigned int i = 0; i < get_num_threads(); i++) {
971                         Thread *waiting = get_thread(int_to_id(i));
972                         if (waiting->waiting_on() == th &&
973                                         waiting->get_pending()->is_thread_join())
974                                 scheduler->wake(waiting);
975                 }
976                 th->complete();
977                 /* Completed thread can't satisfy promises */
978                 for (unsigned int i = 0; i < promises.size(); i++) {
979                         Promise *promise = promises[i];
980                         if (promise->thread_is_available(th->get_id()))
981                                 if (promise->eliminate_thread(th->get_id()))
982                                         priv->failed_promise = true;
983                 }
984                 updated = true; /* trigger rel-seq checks */
985                 break;
986         }
987         case THREAD_START: {
988                 check_promises(curr->get_tid(), NULL, curr->get_cv());
989                 break;
990         }
991         default:
992                 break;
993         }
994
995         return updated;
996 }
997
998 /**
999  * @brief Process the current action for release sequence fixup activity
1000  *
1001  * Performs model-checker release sequence fixups for the current action,
1002  * forcing a single pending release sequence to break (with a given, potential
1003  * "loose" write) or to complete (i.e., synchronize). If a pending release
1004  * sequence forms a complete release sequence, then we must perform the fixup
1005  * synchronization, mo_graph additions, etc.
1006  *
1007  * @param curr The current action; must be a release sequence fixup action
1008  * @param work_queue The work queue to which to add work items as they are
1009  * generated
1010  */
1011 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1012 {
1013         const ModelAction *write = curr->get_node()->get_relseq_break();
1014         struct release_seq *sequence = pending_rel_seqs.back();
1015         pending_rel_seqs.pop_back();
1016         ASSERT(sequence);
1017         ModelAction *acquire = sequence->acquire;
1018         const ModelAction *rf = sequence->rf;
1019         const ModelAction *release = sequence->release;
1020         ASSERT(acquire);
1021         ASSERT(release);
1022         ASSERT(rf);
1023         ASSERT(release->same_thread(rf));
1024
1025         if (write == NULL) {
1026                 /**
1027                  * @todo Forcing a synchronization requires that we set
1028                  * modification order constraints. For instance, we can't allow
1029                  * a fixup sequence in which two separate read-acquire
1030                  * operations read from the same sequence, where the first one
1031                  * synchronizes and the other doesn't. Essentially, we can't
1032                  * allow any writes to insert themselves between 'release' and
1033                  * 'rf'
1034                  */
1035
1036                 /* Must synchronize */
1037                 if (!synchronize(release, acquire))
1038                         return;
1039
1040                 /* Propagate the changed clock vector */
1041                 propagate_clockvector(acquire, work_queue);
1042         } else {
1043                 /* Break release sequence with new edges:
1044                  *   release --mo--> write --mo--> rf */
1045                 mo_graph->addEdge(release, write);
1046                 mo_graph->addEdge(write, rf);
1047         }
1048
1049         /* See if we have realized a data race */
1050         checkDataRaces();
1051 }
1052
1053 /**
1054  * Initialize the current action by performing one or more of the following
1055  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1056  * in the NodeStack, manipulating backtracking sets, allocating and
1057  * initializing clock vectors, and computing the promises to fulfill.
1058  *
1059  * @param curr The current action, as passed from the user context; may be
1060  * freed/invalidated after the execution of this function, with a different
1061  * action "returned" its place (pass-by-reference)
1062  * @return True if curr is a newly-explored action; false otherwise
1063  */
1064 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1065 {
1066         ModelAction *newcurr;
1067
1068         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1069                 newcurr = process_rmw(*curr);
1070                 delete *curr;
1071
1072                 if (newcurr->is_rmw())
1073                         compute_promises(newcurr);
1074
1075                 *curr = newcurr;
1076                 return false;
1077         }
1078
1079         (*curr)->set_seq_number(get_next_seq_num());
1080
1081         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1082         if (newcurr) {
1083                 /* First restore type and order in case of RMW operation */
1084                 if ((*curr)->is_rmwr())
1085                         newcurr->copy_typeandorder(*curr);
1086
1087                 ASSERT((*curr)->get_location() == newcurr->get_location());
1088                 newcurr->copy_from_new(*curr);
1089
1090                 /* Discard duplicate ModelAction; use action from NodeStack */
1091                 delete *curr;
1092
1093                 /* Always compute new clock vector */
1094                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1095
1096                 *curr = newcurr;
1097                 return false; /* Action was explored previously */
1098         } else {
1099                 newcurr = *curr;
1100
1101                 /* Always compute new clock vector */
1102                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1103
1104                 /* Assign most recent release fence */
1105                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1106
1107                 /*
1108                  * Perform one-time actions when pushing new ModelAction onto
1109                  * NodeStack
1110                  */
1111                 if (newcurr->is_write())
1112                         compute_promises(newcurr);
1113                 else if (newcurr->is_relseq_fixup())
1114                         compute_relseq_breakwrites(newcurr);
1115                 else if (newcurr->is_wait())
1116                         newcurr->get_node()->set_misc_max(2);
1117                 else if (newcurr->is_notify_one()) {
1118                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1119                 }
1120                 return true; /* This was a new ModelAction */
1121         }
1122 }
1123
1124 /**
1125  * @brief Establish reads-from relation between two actions
1126  *
1127  * Perform basic operations involved with establishing a concrete rf relation,
1128  * including setting the ModelAction data and checking for release sequences.
1129  *
1130  * @param act The action that is reading (must be a read)
1131  * @param rf The action from which we are reading (must be a write)
1132  *
1133  * @return True if this read established synchronization
1134  */
1135
1136 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1137 {
1138         ASSERT(rf);
1139         ASSERT(rf->is_write());
1140
1141         act->set_read_from(rf);
1142         if (act->is_acquire()) {
1143                 rel_heads_list_t release_heads;
1144                 get_release_seq_heads(act, act, &release_heads);
1145                 int num_heads = release_heads.size();
1146                 for (unsigned int i = 0; i < release_heads.size(); i++)
1147                         if (!synchronize(release_heads[i], act))
1148                                 num_heads--;
1149                 return num_heads > 0;
1150         }
1151         return false;
1152 }
1153
1154 /**
1155  * @brief Synchronizes two actions
1156  *
1157  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1158  * This function performs the synchronization as well as providing other hooks
1159  * for other checks along with synchronization.
1160  *
1161  * @param first The left-hand side of the synchronizes-with relation
1162  * @param second The right-hand side of the synchronizes-with relation
1163  * @return True if the synchronization was successful (i.e., was consistent
1164  * with the execution order); false otherwise
1165  */
1166 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1167 {
1168         if (*second < *first) {
1169                 set_bad_synchronization();
1170                 return false;
1171         }
1172         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1173         return second->synchronize_with(first);
1174 }
1175
1176 /**
1177  * Check promises and eliminate potentially-satisfying threads when a thread is
1178  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1179  * no longer satisfy a promise generated from that thread.
1180  *
1181  * @param blocker The thread on which a thread is waiting
1182  * @param waiting The waiting thread
1183  */
1184 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1185 {
1186         for (unsigned int i = 0; i < promises.size(); i++) {
1187                 Promise *promise = promises[i];
1188                 if (!promise->thread_is_available(waiting->get_id()))
1189                         continue;
1190                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1191                         ModelAction *reader = promise->get_reader(j);
1192                         if (reader->get_tid() != blocker->get_id())
1193                                 continue;
1194                         if (promise->eliminate_thread(waiting->get_id())) {
1195                                 /* Promise has failed */
1196                                 priv->failed_promise = true;
1197                         } else {
1198                                 /* Only eliminate the 'waiting' thread once */
1199                                 return;
1200                         }
1201                 }
1202         }
1203 }
1204
1205 /**
1206  * @brief Check whether a model action is enabled.
1207  *
1208  * Checks whether an operation would be successful (i.e., is a lock already
1209  * locked, or is the joined thread already complete).
1210  *
1211  * For yield-blocking, yields are never enabled.
1212  *
1213  * @param curr is the ModelAction to check whether it is enabled.
1214  * @return a bool that indicates whether the action is enabled.
1215  */
1216 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1217         if (curr->is_lock()) {
1218                 std::mutex *lock = curr->get_mutex();
1219                 struct std::mutex_state *state = lock->get_state();
1220                 if (state->locked)
1221                         return false;
1222         } else if (curr->is_thread_join()) {
1223                 Thread *blocking = curr->get_thread_operand();
1224                 if (!blocking->is_complete()) {
1225                         thread_blocking_check_promises(blocking, get_thread(curr));
1226                         return false;
1227                 }
1228         } else if (params->yieldblock && curr->is_yield()) {
1229                 return false;
1230         }
1231
1232         return true;
1233 }
1234
1235 /**
1236  * This is the heart of the model checker routine. It performs model-checking
1237  * actions corresponding to a given "current action." Among other processes, it
1238  * calculates reads-from relationships, updates synchronization clock vectors,
1239  * forms a memory_order constraints graph, and handles replay/backtrack
1240  * execution when running permutations of previously-observed executions.
1241  *
1242  * @param curr The current action to process
1243  * @return The ModelAction that is actually executed; may be different than
1244  * curr
1245  */
1246 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1247 {
1248         ASSERT(curr);
1249         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1250         bool newly_explored = initialize_curr_action(&curr);
1251
1252         DBG();
1253
1254         wake_up_sleeping_actions(curr);
1255
1256         /* Compute fairness information for CHESS yield algorithm */
1257         if (params->yieldon) {
1258                 curr->get_node()->update_yield(scheduler);
1259         }
1260
1261         /* Add the action to lists before any other model-checking tasks */
1262         if (!second_part_of_rmw)
1263                 add_action_to_lists(curr);
1264
1265         /* Build may_read_from set for newly-created actions */
1266         if (newly_explored && curr->is_read())
1267                 build_may_read_from(curr);
1268
1269         /* Initialize work_queue with the "current action" work */
1270         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1271         while (!work_queue.empty() && !has_asserted()) {
1272                 WorkQueueEntry work = work_queue.front();
1273                 work_queue.pop_front();
1274
1275                 switch (work.type) {
1276                 case WORK_CHECK_CURR_ACTION: {
1277                         ModelAction *act = work.action;
1278                         bool update = false; /* update this location's release seq's */
1279                         bool update_all = false; /* update all release seq's */
1280
1281                         if (process_thread_action(curr))
1282                                 update_all = true;
1283
1284                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1285                                 update = true;
1286
1287                         if (act->is_write() && process_write(act, &work_queue))
1288                                 update = true;
1289
1290                         if (act->is_fence() && process_fence(act))
1291                                 update_all = true;
1292
1293                         if (act->is_mutex_op() && process_mutex(act))
1294                                 update_all = true;
1295
1296                         if (act->is_relseq_fixup())
1297                                 process_relseq_fixup(curr, &work_queue);
1298
1299                         if (update_all)
1300                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1301                         else if (update)
1302                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1303                         break;
1304                 }
1305                 case WORK_CHECK_RELEASE_SEQ:
1306                         resolve_release_sequences(work.location, &work_queue);
1307                         break;
1308                 case WORK_CHECK_MO_EDGES: {
1309                         /** @todo Complete verification of work_queue */
1310                         ModelAction *act = work.action;
1311                         bool updated = false;
1312
1313                         if (act->is_read()) {
1314                                 const ModelAction *rf = act->get_reads_from();
1315                                 const Promise *promise = act->get_reads_from_promise();
1316                                 if (rf) {
1317                                         if (r_modification_order(act, rf))
1318                                                 updated = true;
1319                                         if (act->is_seqcst()) {
1320                                                 ModelAction *last_sc_write = get_last_seq_cst_write(act);
1321                                                 if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
1322                                                         set_bad_sc_read();
1323                                                 }
1324                                         }
1325                                 } else if (promise) {
1326                                         if (r_modification_order(act, promise))
1327                                                 updated = true;
1328                                 }
1329                         }
1330                         if (act->is_write()) {
1331                                 if (w_modification_order(act, NULL))
1332                                         updated = true;
1333                         }
1334                         mo_graph->commitChanges();
1335
1336                         if (updated)
1337                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1338                         break;
1339                 }
1340                 default:
1341                         ASSERT(false);
1342                         break;
1343                 }
1344         }
1345
1346         check_curr_backtracking(curr);
1347         set_backtracking(curr);
1348         return curr;
1349 }
1350
1351 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1352 {
1353         Node *currnode = curr->get_node();
1354         Node *parnode = currnode->get_parent();
1355
1356         if ((parnode && !parnode->backtrack_empty()) ||
1357                          !currnode->misc_empty() ||
1358                          !currnode->read_from_empty() ||
1359                          !currnode->promise_empty() ||
1360                          !currnode->relseq_break_empty()) {
1361                 set_latest_backtrack(curr);
1362         }
1363 }
1364
1365 bool ModelExecution::promises_expired() const
1366 {
1367         for (unsigned int i = 0; i < promises.size(); i++) {
1368                 Promise *promise = promises[i];
1369                 if (promise->get_expiration() < priv->used_sequence_numbers)
1370                         return true;
1371         }
1372         return false;
1373 }
1374
1375 /**
1376  * This is the strongest feasibility check available.
1377  * @return whether the current trace (partial or complete) must be a prefix of
1378  * a feasible trace.
1379  */
1380 bool ModelExecution::isfeasibleprefix() const
1381 {
1382         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1383 }
1384
1385 /**
1386  * Print disagnostic information about an infeasible execution
1387  * @param prefix A string to prefix the output with; if NULL, then a default
1388  * message prefix will be provided
1389  */
1390 void ModelExecution::print_infeasibility(const char *prefix) const
1391 {
1392         char buf[100];
1393         char *ptr = buf;
1394         if (mo_graph->checkForCycles())
1395                 ptr += sprintf(ptr, "[mo cycle]");
1396         if (priv->failed_promise || priv->hard_failed_promise)
1397                 ptr += sprintf(ptr, "[failed promise]");
1398         if (priv->too_many_reads)
1399                 ptr += sprintf(ptr, "[too many reads]");
1400         if (priv->no_valid_reads)
1401                 ptr += sprintf(ptr, "[no valid reads-from]");
1402         if (priv->bad_synchronization)
1403                 ptr += sprintf(ptr, "[bad sw ordering]");
1404         if (priv->bad_sc_read)
1405                 ptr += sprintf(ptr, "[bad sc read]");
1406         if (promises_expired())
1407                 ptr += sprintf(ptr, "[promise expired]");
1408         if (promises.size() != 0)
1409                 ptr += sprintf(ptr, "[unresolved promise]");
1410         if (ptr != buf)
1411                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
1412 }
1413
1414 /**
1415  * Returns whether the current completed trace is feasible, except for pending
1416  * release sequences.
1417  */
1418 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1419 {
1420         return !is_infeasible() && promises.size() == 0 && ! priv->failed_promise;
1421
1422 }
1423
1424 /**
1425  * Check if the current partial trace is infeasible. Does not check any
1426  * end-of-execution flags, which might rule out the execution. Thus, this is
1427  * useful only for ruling an execution as infeasible.
1428  * @return whether the current partial trace is infeasible.
1429  */
1430 bool ModelExecution::is_infeasible() const
1431 {
1432         return mo_graph->checkForCycles() ||
1433                 priv->no_valid_reads ||
1434                 priv->too_many_reads ||
1435                 priv->bad_synchronization ||
1436                 priv->bad_sc_read ||
1437                 priv->hard_failed_promise ||
1438                 promises_expired();
1439 }
1440
1441 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1442 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1443         ModelAction *lastread = get_last_action(act->get_tid());
1444         lastread->process_rmw(act);
1445         if (act->is_rmw()) {
1446                 if (lastread->get_reads_from())
1447                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1448                 else
1449                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1450                 mo_graph->commitChanges();
1451         }
1452         return lastread;
1453 }
1454
1455 /**
1456  * A helper function for ModelExecution::check_recency, to check if the current
1457  * thread is able to read from a different write/promise for 'params.maxreads'
1458  * number of steps and if that write/promise should become visible (i.e., is
1459  * ordered later in the modification order). This helps model memory liveness.
1460  *
1461  * @param curr The current action. Must be a read.
1462  * @param rf The write/promise from which we plan to read
1463  * @param other_rf The write/promise from which we may read
1464  * @return True if we were able to read from other_rf for params.maxreads steps
1465  */
1466 template <typename T, typename U>
1467 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1468 {
1469         /* Need a different write/promise */
1470         if (other_rf->equals(rf))
1471                 return false;
1472
1473         /* Only look for "newer" writes/promises */
1474         if (!mo_graph->checkReachable(rf, other_rf))
1475                 return false;
1476
1477         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1478         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1479         action_list_t::reverse_iterator rit = list->rbegin();
1480         ASSERT((*rit) == curr);
1481         /* Skip past curr */
1482         rit++;
1483
1484         /* Does this write/promise work for everyone? */
1485         for (int i = 0; i < params->maxreads; i++, rit++) {
1486                 ModelAction *act = *rit;
1487                 if (!act->may_read_from(other_rf))
1488                         return false;
1489         }
1490         return true;
1491 }
1492
1493 /**
1494  * Checks whether a thread has read from the same write or Promise for too many
1495  * times without seeing the effects of a later write/Promise.
1496  *
1497  * Basic idea:
1498  * 1) there must a different write/promise that we could read from,
1499  * 2) we must have read from the same write/promise in excess of maxreads times,
1500  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1501  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1502  *
1503  * If so, we decide that the execution is no longer feasible.
1504  *
1505  * @param curr The current action. Must be a read.
1506  * @param rf The ModelAction/Promise from which we might read.
1507  * @return True if the read should succeed; false otherwise
1508  */
1509 template <typename T>
1510 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1511 {
1512         if (!params->maxreads)
1513                 return true;
1514
1515         //NOTE: Next check is just optimization, not really necessary....
1516         if (curr->get_node()->get_read_from_past_size() +
1517                         curr->get_node()->get_read_from_promise_size() <= 1)
1518                 return true;
1519
1520         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1521         int tid = id_to_int(curr->get_tid());
1522         ASSERT(tid < (int)thrd_lists->size());
1523         action_list_t *list = &(*thrd_lists)[tid];
1524         action_list_t::reverse_iterator rit = list->rbegin();
1525         ASSERT((*rit) == curr);
1526         /* Skip past curr */
1527         rit++;
1528
1529         action_list_t::reverse_iterator ritcopy = rit;
1530         /* See if we have enough reads from the same value */
1531         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1532                 if (ritcopy == list->rend())
1533                         return true;
1534                 ModelAction *act = *ritcopy;
1535                 if (!act->is_read())
1536                         return true;
1537                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1538                         return true;
1539                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1540                         return true;
1541                 if (act->get_node()->get_read_from_past_size() +
1542                                 act->get_node()->get_read_from_promise_size() <= 1)
1543                         return true;
1544         }
1545         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1546                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1547                 if (should_read_instead(curr, rf, write))
1548                         return false; /* liveness failure */
1549         }
1550         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1551                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1552                 if (should_read_instead(curr, rf, promise))
1553                         return false; /* liveness failure */
1554         }
1555         return true;
1556 }
1557
1558 /**
1559  * @brief Updates the mo_graph with the constraints imposed from the current
1560  * read.
1561  *
1562  * Basic idea is the following: Go through each other thread and find
1563  * the last action that happened before our read.  Two cases:
1564  *
1565  * -# The action is a write: that write must either occur before
1566  * the write we read from or be the write we read from.
1567  * -# The action is a read: the write that that action read from
1568  * must occur before the write we read from or be the same write.
1569  *
1570  * @param curr The current action. Must be a read.
1571  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1572  * @return True if modification order edges were added; false otherwise
1573  */
1574 template <typename rf_type>
1575 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1576 {
1577         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1578         unsigned int i;
1579         bool added = false;
1580         ASSERT(curr->is_read());
1581
1582         /* Last SC fence in the current thread */
1583         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1584         ModelAction *last_sc_write = NULL;
1585         if (curr->is_seqcst())
1586                 last_sc_write = get_last_seq_cst_write(curr);
1587
1588         /* Iterate over all threads */
1589         for (i = 0; i < thrd_lists->size(); i++) {
1590                 /* Last SC fence in thread i */
1591                 ModelAction *last_sc_fence_thread_local = NULL;
1592                 if (int_to_id((int)i) != curr->get_tid())
1593                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1594
1595                 /* Last SC fence in thread i, before last SC fence in current thread */
1596                 ModelAction *last_sc_fence_thread_before = NULL;
1597                 if (last_sc_fence_local)
1598                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1599
1600                 /* Iterate over actions in thread, starting from most recent */
1601                 action_list_t *list = &(*thrd_lists)[i];
1602                 action_list_t::reverse_iterator rit;
1603                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1604                         ModelAction *act = *rit;
1605
1606                         /* Skip curr */
1607                         if (act == curr)
1608                                 continue;
1609                         /* Don't want to add reflexive edges on 'rf' */
1610                         if (act->equals(rf)) {
1611                                 if (act->happens_before(curr))
1612                                         break;
1613                                 else
1614                                         continue;
1615                         }
1616
1617                         if (act->is_write()) {
1618                                 /* C++, Section 29.3 statement 5 */
1619                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1620                                                 *act < *last_sc_fence_thread_local) {
1621                                         added = mo_graph->addEdge(act, rf) || added;
1622                                         break;
1623                                 }
1624                                 /* C++, Section 29.3 statement 4 */
1625                                 else if (act->is_seqcst() && last_sc_fence_local &&
1626                                                 *act < *last_sc_fence_local) {
1627                                         added = mo_graph->addEdge(act, rf) || added;
1628                                         break;
1629                                 }
1630                                 /* C++, Section 29.3 statement 6 */
1631                                 else if (last_sc_fence_thread_before &&
1632                                                 *act < *last_sc_fence_thread_before) {
1633                                         added = mo_graph->addEdge(act, rf) || added;
1634                                         break;
1635                                 }
1636                         }
1637
1638                         /*
1639                          * Include at most one act per-thread that "happens
1640                          * before" curr
1641                          */
1642                         if (act->happens_before(curr)) {
1643                                 if (act->is_write()) {
1644                                         added = mo_graph->addEdge(act, rf) || added;
1645                                 } else {
1646                                         const ModelAction *prevrf = act->get_reads_from();
1647                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1648                                         if (prevrf) {
1649                                                 if (!prevrf->equals(rf))
1650                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1651                                         } else if (!prevrf_promise->equals(rf)) {
1652                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1653                                         }
1654                                 }
1655                                 break;
1656                         }
1657                 }
1658         }
1659
1660         /*
1661          * All compatible, thread-exclusive promises must be ordered after any
1662          * concrete loads from the same thread
1663          */
1664         for (unsigned int i = 0; i < promises.size(); i++)
1665                 if (promises[i]->is_compatible_exclusive(curr))
1666                         added = mo_graph->addEdge(rf, promises[i]) || added;
1667
1668         return added;
1669 }
1670
1671 /**
1672  * Updates the mo_graph with the constraints imposed from the current write.
1673  *
1674  * Basic idea is the following: Go through each other thread and find
1675  * the lastest action that happened before our write.  Two cases:
1676  *
1677  * (1) The action is a write => that write must occur before
1678  * the current write
1679  *
1680  * (2) The action is a read => the write that that action read from
1681  * must occur before the current write.
1682  *
1683  * This method also handles two other issues:
1684  *
1685  * (I) Sequential Consistency: Making sure that if the current write is
1686  * seq_cst, that it occurs after the previous seq_cst write.
1687  *
1688  * (II) Sending the write back to non-synchronizing reads.
1689  *
1690  * @param curr The current action. Must be a write.
1691  * @param send_fv A vector for stashing reads to which we may pass our future
1692  * value. If NULL, then don't record any future values.
1693  * @return True if modification order edges were added; false otherwise
1694  */
1695 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1696 {
1697         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1698         unsigned int i;
1699         bool added = false;
1700         ASSERT(curr->is_write());
1701
1702         if (curr->is_seqcst()) {
1703                 /* We have to at least see the last sequentially consistent write,
1704                          so we are initialized. */
1705                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1706                 if (last_seq_cst != NULL) {
1707                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1708                 }
1709         }
1710
1711         /* Last SC fence in the current thread */
1712         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1713
1714         /* Iterate over all threads */
1715         for (i = 0; i < thrd_lists->size(); i++) {
1716                 /* Last SC fence in thread i, before last SC fence in current thread */
1717                 ModelAction *last_sc_fence_thread_before = NULL;
1718                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1719                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1720
1721                 /* Iterate over actions in thread, starting from most recent */
1722                 action_list_t *list = &(*thrd_lists)[i];
1723                 action_list_t::reverse_iterator rit;
1724                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1725                         ModelAction *act = *rit;
1726                         if (act == curr) {
1727                                 /*
1728                                  * 1) If RMW and it actually read from something, then we
1729                                  * already have all relevant edges, so just skip to next
1730                                  * thread.
1731                                  *
1732                                  * 2) If RMW and it didn't read from anything, we should
1733                                  * whatever edge we can get to speed up convergence.
1734                                  *
1735                                  * 3) If normal write, we need to look at earlier actions, so
1736                                  * continue processing list.
1737                                  */
1738                                 if (curr->is_rmw()) {
1739                                         if (curr->get_reads_from() != NULL)
1740                                                 break;
1741                                         else
1742                                                 continue;
1743                                 } else
1744                                         continue;
1745                         }
1746
1747                         /* C++, Section 29.3 statement 7 */
1748                         if (last_sc_fence_thread_before && act->is_write() &&
1749                                         *act < *last_sc_fence_thread_before) {
1750                                 added = mo_graph->addEdge(act, curr) || added;
1751                                 break;
1752                         }
1753
1754                         /*
1755                          * Include at most one act per-thread that "happens
1756                          * before" curr
1757                          */
1758                         if (act->happens_before(curr)) {
1759                                 /*
1760                                  * Note: if act is RMW, just add edge:
1761                                  *   act --mo--> curr
1762                                  * The following edge should be handled elsewhere:
1763                                  *   readfrom(act) --mo--> act
1764                                  */
1765                                 if (act->is_write())
1766                                         added = mo_graph->addEdge(act, curr) || added;
1767                                 else if (act->is_read()) {
1768                                         //if previous read accessed a null, just keep going
1769                                         if (act->get_reads_from() == NULL) {
1770                                                 added = mo_graph->addEdge(act->get_reads_from_promise(), curr) || added;
1771                                         } else
1772                                                 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1773                                 }
1774                                 break;
1775                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1776                                                      !act->same_thread(curr)) {
1777                                 /* We have an action that:
1778                                    (1) did not happen before us
1779                                    (2) is a read and we are a write
1780                                    (3) cannot synchronize with us
1781                                    (4) is in a different thread
1782                                    =>
1783                                    that read could potentially read from our write.  Note that
1784                                    these checks are overly conservative at this point, we'll
1785                                    do more checks before actually removing the
1786                                    pendingfuturevalue.
1787
1788                                  */
1789
1790                                 if (send_fv && thin_air_constraint_may_allow(curr, act) && check_coherence_promise(curr, act)) {
1791                                         if (!is_infeasible())
1792                                                 send_fv->push_back(act);
1793                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1794                                                 add_future_value(curr, act);
1795                                 }
1796                         }
1797                 }
1798         }
1799
1800         /*
1801          * All compatible, thread-exclusive promises must be ordered after any
1802          * concrete stores to the same thread, or else they can be merged with
1803          * this store later
1804          */
1805         for (unsigned int i = 0; i < promises.size(); i++)
1806                 if (promises[i]->is_compatible_exclusive(curr))
1807                         added = mo_graph->addEdge(curr, promises[i]) || added;
1808
1809         return added;
1810 }
1811
1812 //This procedure uses cohere to prune future values that are
1813 //guaranteed to generate a coherence violation.
1814 //
1815 //need to see if there is (1) a promise for thread write, (2)
1816 //the promise is sb before write, (3) the promise can only be
1817 //resolved by the thread read, and (4) the promise has same
1818 //location as read/write
1819
1820 bool ModelExecution::check_coherence_promise(const ModelAction * write, const ModelAction *read) {
1821         thread_id_t write_tid=write->get_tid();
1822         for(unsigned int i = promises.size(); i>0; i--) {
1823                 Promise *pr=promises[i-1];
1824                 if (!pr->same_location(write))
1825                         continue;
1826                 //the reading thread is the only thread that can resolve the promise
1827                 if (pr->get_num_was_available_threads()==1 && pr->thread_was_available(read->get_tid())) {
1828                         for(unsigned int j=0;j<pr->get_num_readers();j++) {
1829                                 ModelAction *prreader=pr->get_reader(j);
1830                                 //the writing thread reads from the promise before the write
1831                                 if (prreader->get_tid()==write_tid &&
1832                                                 (*prreader)<(*write)) {
1833                                         if ((*read)>(*prreader)) {
1834                                                 //check that we don't have a read between the read and promise
1835                                                 //from the same thread as read
1836                                                 bool okay=false;
1837                                                 for(const ModelAction *tmp=read;tmp!=prreader;) {
1838                                                         tmp=tmp->get_node()->get_parent()->get_action();
1839                                                         if (tmp->is_read() && tmp->same_thread(read)) {
1840                                                                 okay=true;
1841                                                                 break;
1842                                                         }
1843                                                 }
1844                                                 if (okay)
1845                                                         continue;
1846                                         }
1847                                         return false;
1848                                 }
1849                         }
1850                 }
1851         }
1852         return true;
1853 }
1854
1855
1856 /** Arbitrary reads from the future are not allowed.  Section 29.3
1857  * part 9 places some constraints.  This method checks one result of constraint
1858  * constraint.  Others require compiler support. */
1859 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1860 {
1861         if (!writer->is_rmw())
1862                 return true;
1863
1864         if (!reader->is_rmw())
1865                 return true;
1866
1867         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1868                 if (search == reader)
1869                         return false;
1870                 if (search->get_tid() == reader->get_tid() &&
1871                                 search->happens_before(reader))
1872                         break;
1873         }
1874
1875         return true;
1876 }
1877
1878 /**
1879  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1880  * some constraints. This method checks one the following constraint (others
1881  * require compiler support):
1882  *
1883  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1884  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1885  */
1886 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1887 {
1888         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1889         unsigned int i;
1890         /* Iterate over all threads */
1891         for (i = 0; i < thrd_lists->size(); i++) {
1892                 const ModelAction *write_after_read = NULL;
1893
1894                 /* Iterate over actions in thread, starting from most recent */
1895                 action_list_t *list = &(*thrd_lists)[i];
1896                 action_list_t::reverse_iterator rit;
1897                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1898                         ModelAction *act = *rit;
1899
1900                         /* Don't disallow due to act == reader */
1901                         if (!reader->happens_before(act) || reader == act)
1902                                 break;
1903                         else if (act->is_write())
1904                                 write_after_read = act;
1905                         else if (act->is_read() && act->get_reads_from() != NULL)
1906                                 write_after_read = act->get_reads_from();
1907                 }
1908
1909                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1910                         return false;
1911         }
1912         return true;
1913 }
1914
1915 /**
1916  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1917  * The ModelAction under consideration is expected to be taking part in
1918  * release/acquire synchronization as an object of the "reads from" relation.
1919  * Note that this can only provide release sequence support for RMW chains
1920  * which do not read from the future, as those actions cannot be traced until
1921  * their "promise" is fulfilled. Similarly, we may not even establish the
1922  * presence of a release sequence with certainty, as some modification order
1923  * constraints may be decided further in the future. Thus, this function
1924  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1925  * and a boolean representing certainty.
1926  *
1927  * @param rf The action that might be part of a release sequence. Must be a
1928  * write.
1929  * @param release_heads A pass-by-reference style return parameter. After
1930  * execution of this function, release_heads will contain the heads of all the
1931  * relevant release sequences, if any exists with certainty
1932  * @param pending A pass-by-reference style return parameter which is only used
1933  * when returning false (i.e., uncertain). Returns most information regarding
1934  * an uncertain release sequence, including any write operations that might
1935  * break the sequence.
1936  * @return true, if the ModelExecution is certain that release_heads is complete;
1937  * false otherwise
1938  */
1939 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1940                 rel_heads_list_t *release_heads,
1941                 struct release_seq *pending) const
1942 {
1943         /* Only check for release sequences if there are no cycles */
1944         if (mo_graph->checkForCycles())
1945                 return false;
1946
1947         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1948                 ASSERT(rf->is_write());
1949
1950                 if (rf->is_release())
1951                         release_heads->push_back(rf);
1952                 else if (rf->get_last_fence_release())
1953                         release_heads->push_back(rf->get_last_fence_release());
1954                 if (!rf->is_rmw())
1955                         break; /* End of RMW chain */
1956
1957                 /** @todo Need to be smarter here...  In the linux lock
1958                  * example, this will run to the beginning of the program for
1959                  * every acquire. */
1960                 /** @todo The way to be smarter here is to keep going until 1
1961                  * thread has a release preceded by an acquire and you've seen
1962                  *       both. */
1963
1964                 /* acq_rel RMW is a sufficient stopping condition */
1965                 if (rf->is_acquire() && rf->is_release())
1966                         return true; /* complete */
1967         };
1968         if (!rf) {
1969                 /* read from future: need to settle this later */
1970                 pending->rf = NULL;
1971                 return false; /* incomplete */
1972         }
1973
1974         if (rf->is_release())
1975                 return true; /* complete */
1976
1977         /* else relaxed write
1978          * - check for fence-release in the same thread (29.8, stmt. 3)
1979          * - check modification order for contiguous subsequence
1980          *   -> rf must be same thread as release */
1981
1982         const ModelAction *fence_release = rf->get_last_fence_release();
1983         /* Synchronize with a fence-release unconditionally; we don't need to
1984          * find any more "contiguous subsequence..." for it */
1985         if (fence_release)
1986                 release_heads->push_back(fence_release);
1987
1988         int tid = id_to_int(rf->get_tid());
1989         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1990         action_list_t *list = &(*thrd_lists)[tid];
1991         action_list_t::const_reverse_iterator rit;
1992
1993         /* Find rf in the thread list */
1994         rit = std::find(list->rbegin(), list->rend(), rf);
1995         ASSERT(rit != list->rend());
1996
1997         /* Find the last {write,fence}-release */
1998         for (; rit != list->rend(); rit++) {
1999                 if (fence_release && *(*rit) < *fence_release)
2000                         break;
2001                 if ((*rit)->is_release())
2002                         break;
2003         }
2004         if (rit == list->rend()) {
2005                 /* No write-release in this thread */
2006                 return true; /* complete */
2007         } else if (fence_release && *(*rit) < *fence_release) {
2008                 /* The fence-release is more recent (and so, "stronger") than
2009                  * the most recent write-release */
2010                 return true; /* complete */
2011         } /* else, need to establish contiguous release sequence */
2012         ModelAction *release = *rit;
2013
2014         ASSERT(rf->same_thread(release));
2015
2016         pending->writes.clear();
2017
2018         bool certain = true;
2019         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2020                 if (id_to_int(rf->get_tid()) == (int)i)
2021                         continue;
2022                 list = &(*thrd_lists)[i];
2023
2024                 /* Can we ensure no future writes from this thread may break
2025                  * the release seq? */
2026                 bool future_ordered = false;
2027
2028                 ModelAction *last = get_last_action(int_to_id(i));
2029                 Thread *th = get_thread(int_to_id(i));
2030                 if ((last && rf->happens_before(last)) ||
2031                                 !is_enabled(th) ||
2032                                 th->is_complete())
2033                         future_ordered = true;
2034
2035                 ASSERT(!th->is_model_thread() || future_ordered);
2036
2037                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2038                         const ModelAction *act = *rit;
2039                         /* Reach synchronization -> this thread is complete */
2040                         if (act->happens_before(release))
2041                                 break;
2042                         if (rf->happens_before(act)) {
2043                                 future_ordered = true;
2044                                 continue;
2045                         }
2046
2047                         /* Only non-RMW writes can break release sequences */
2048                         if (!act->is_write() || act->is_rmw())
2049                                 continue;
2050
2051                         /* Check modification order */
2052                         if (mo_graph->checkReachable(rf, act)) {
2053                                 /* rf --mo--> act */
2054                                 future_ordered = true;
2055                                 continue;
2056                         }
2057                         if (mo_graph->checkReachable(act, release))
2058                                 /* act --mo--> release */
2059                                 break;
2060                         if (mo_graph->checkReachable(release, act) &&
2061                                       mo_graph->checkReachable(act, rf)) {
2062                                 /* release --mo-> act --mo--> rf */
2063                                 return true; /* complete */
2064                         }
2065                         /* act may break release sequence */
2066                         pending->writes.push_back(act);
2067                         certain = false;
2068                 }
2069                 if (!future_ordered)
2070                         certain = false; /* This thread is uncertain */
2071         }
2072
2073         if (certain) {
2074                 release_heads->push_back(release);
2075                 pending->writes.clear();
2076         } else {
2077                 pending->release = release;
2078                 pending->rf = rf;
2079         }
2080         return certain;
2081 }
2082
2083 /**
2084  * An interface for getting the release sequence head(s) with which a
2085  * given ModelAction must synchronize. This function only returns a non-empty
2086  * result when it can locate a release sequence head with certainty. Otherwise,
2087  * it may mark the internal state of the ModelExecution so that it will handle
2088  * the release sequence at a later time, causing @a acquire to update its
2089  * synchronization at some later point in execution.
2090  *
2091  * @param acquire The 'acquire' action that may synchronize with a release
2092  * sequence
2093  * @param read The read action that may read from a release sequence; this may
2094  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2095  * when 'acquire' is a fence-acquire)
2096  * @param release_heads A pass-by-reference return parameter. Will be filled
2097  * with the head(s) of the release sequence(s), if they exists with certainty.
2098  * @see ModelExecution::release_seq_heads
2099  */
2100 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2101                 ModelAction *read, rel_heads_list_t *release_heads)
2102 {
2103         const ModelAction *rf = read->get_reads_from();
2104         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2105         sequence->acquire = acquire;
2106         sequence->read = read;
2107
2108         if (!release_seq_heads(rf, release_heads, sequence)) {
2109                 /* add act to 'lazy checking' list */
2110                 pending_rel_seqs.push_back(sequence);
2111         } else {
2112                 snapshot_free(sequence);
2113         }
2114 }
2115
2116 /**
2117  * @brief Propagate a modified clock vector to actions later in the execution
2118  * order
2119  *
2120  * After an acquire operation lazily completes a release-sequence
2121  * synchronization, we must update all clock vectors for operations later than
2122  * the acquire in the execution order.
2123  *
2124  * @param acquire The ModelAction whose clock vector must be propagated
2125  * @param work The work queue to which we can add work items, if this
2126  * propagation triggers more updates (e.g., to the modification order)
2127  */
2128 void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
2129 {
2130         /* Re-check all pending release sequences */
2131         work->push_back(CheckRelSeqWorkEntry(NULL));
2132         /* Re-check read-acquire for mo_graph edges */
2133         work->push_back(MOEdgeWorkEntry(acquire));
2134
2135         /* propagate synchronization to later actions */
2136         action_list_t::reverse_iterator rit = action_trace.rbegin();
2137         for (; (*rit) != acquire; rit++) {
2138                 ModelAction *propagate = *rit;
2139                 if (acquire->happens_before(propagate)) {
2140                         synchronize(acquire, propagate);
2141                         /* Re-check 'propagate' for mo_graph edges */
2142                         work->push_back(MOEdgeWorkEntry(propagate));
2143                 }
2144         }
2145 }
2146
2147 /**
2148  * Attempt to resolve all stashed operations that might synchronize with a
2149  * release sequence for a given location. This implements the "lazy" portion of
2150  * determining whether or not a release sequence was contiguous, since not all
2151  * modification order information is present at the time an action occurs.
2152  *
2153  * @param location The location/object that should be checked for release
2154  * sequence resolutions. A NULL value means to check all locations.
2155  * @param work_queue The work queue to which to add work items as they are
2156  * generated
2157  * @return True if any updates occurred (new synchronization, new mo_graph
2158  * edges)
2159  */
2160 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2161 {
2162         bool updated = false;
2163         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2164         while (it != pending_rel_seqs.end()) {
2165                 struct release_seq *pending = *it;
2166                 ModelAction *acquire = pending->acquire;
2167                 const ModelAction *read = pending->read;
2168
2169                 /* Only resolve sequences on the given location, if provided */
2170                 if (location && read->get_location() != location) {
2171                         it++;
2172                         continue;
2173                 }
2174
2175                 const ModelAction *rf = read->get_reads_from();
2176                 rel_heads_list_t release_heads;
2177                 bool complete;
2178                 complete = release_seq_heads(rf, &release_heads, pending);
2179                 for (unsigned int i = 0; i < release_heads.size(); i++)
2180                         if (!acquire->has_synchronized_with(release_heads[i]))
2181                                 if (synchronize(release_heads[i], acquire))
2182                                         updated = true;
2183
2184                 if (updated) {
2185                         /* Propagate the changed clock vector */
2186                         propagate_clockvector(acquire, work_queue);
2187                 }
2188                 if (complete) {
2189                         it = pending_rel_seqs.erase(it);
2190                         snapshot_free(pending);
2191                 } else {
2192                         it++;
2193                 }
2194         }
2195
2196         // If we resolved promises or data races, see if we have realized a data race.
2197         checkDataRaces();
2198
2199         return updated;
2200 }
2201
2202 /**
2203  * Performs various bookkeeping operations for the current ModelAction. For
2204  * instance, adds action to the per-object, per-thread action vector and to the
2205  * action trace list of all thread actions.
2206  *
2207  * @param act is the ModelAction to add.
2208  */
2209 void ModelExecution::add_action_to_lists(ModelAction *act)
2210 {
2211         int tid = id_to_int(act->get_tid());
2212         ModelAction *uninit = NULL;
2213         int uninit_id = -1;
2214         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2215         if (list->empty() && act->is_atomic_var()) {
2216                 uninit = get_uninitialized_action(act);
2217                 uninit_id = id_to_int(uninit->get_tid());
2218                 list->push_front(uninit);
2219         }
2220         list->push_back(act);
2221
2222         action_trace.push_back(act);
2223         if (uninit)
2224                 action_trace.push_front(uninit);
2225
2226         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2227         if (tid >= (int)vec->size())
2228                 vec->resize(priv->next_thread_id);
2229         (*vec)[tid].push_back(act);
2230         if (uninit)
2231                 (*vec)[uninit_id].push_front(uninit);
2232
2233         if ((int)thrd_last_action.size() <= tid)
2234                 thrd_last_action.resize(get_num_threads());
2235         thrd_last_action[tid] = act;
2236         if (uninit)
2237                 thrd_last_action[uninit_id] = uninit;
2238
2239         if (act->is_fence() && act->is_release()) {
2240                 if ((int)thrd_last_fence_release.size() <= tid)
2241                         thrd_last_fence_release.resize(get_num_threads());
2242                 thrd_last_fence_release[tid] = act;
2243         }
2244
2245         if (act->is_wait()) {
2246                 void *mutex_loc = (void *) act->get_value();
2247                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2248
2249                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2250                 if (tid >= (int)vec->size())
2251                         vec->resize(priv->next_thread_id);
2252                 (*vec)[tid].push_back(act);
2253         }
2254 }
2255
2256 /**
2257  * @brief Get the last action performed by a particular Thread
2258  * @param tid The thread ID of the Thread in question
2259  * @return The last action in the thread
2260  */
2261 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2262 {
2263         int threadid = id_to_int(tid);
2264         if (threadid < (int)thrd_last_action.size())
2265                 return thrd_last_action[id_to_int(tid)];
2266         else
2267                 return NULL;
2268 }
2269
2270 /**
2271  * @brief Get the last fence release performed by a particular Thread
2272  * @param tid The thread ID of the Thread in question
2273  * @return The last fence release in the thread, if one exists; NULL otherwise
2274  */
2275 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2276 {
2277         int threadid = id_to_int(tid);
2278         if (threadid < (int)thrd_last_fence_release.size())
2279                 return thrd_last_fence_release[id_to_int(tid)];
2280         else
2281                 return NULL;
2282 }
2283
2284 /**
2285  * Gets the last memory_order_seq_cst write (in the total global sequence)
2286  * performed on a particular object (i.e., memory location), not including the
2287  * current action.
2288  * @param curr The current ModelAction; also denotes the object location to
2289  * check
2290  * @return The last seq_cst write
2291  */
2292 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2293 {
2294         void *location = curr->get_location();
2295         action_list_t *list = obj_map.get(location);
2296         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2297         action_list_t::reverse_iterator rit;
2298         for (rit = list->rbegin(); (*rit) != curr; rit++)
2299                 ;
2300         rit++; /* Skip past curr */
2301         for ( ; rit != list->rend(); rit++)
2302                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2303                         return *rit;
2304         return NULL;
2305 }
2306
2307 /**
2308  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2309  * performed in a particular thread, prior to a particular fence.
2310  * @param tid The ID of the thread to check
2311  * @param before_fence The fence from which to begin the search; if NULL, then
2312  * search for the most recent fence in the thread.
2313  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2314  */
2315 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2316 {
2317         /* All fences should have location FENCE_LOCATION */
2318         action_list_t *list = obj_map.get(FENCE_LOCATION);
2319
2320         if (!list)
2321                 return NULL;
2322
2323         action_list_t::reverse_iterator rit = list->rbegin();
2324
2325         if (before_fence) {
2326                 for (; rit != list->rend(); rit++)
2327                         if (*rit == before_fence)
2328                                 break;
2329
2330                 ASSERT(*rit == before_fence);
2331                 rit++;
2332         }
2333
2334         for (; rit != list->rend(); rit++)
2335                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2336                         return *rit;
2337         return NULL;
2338 }
2339
2340 /**
2341  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2342  * location). This function identifies the mutex according to the current
2343  * action, which is presumed to perform on the same mutex.
2344  * @param curr The current ModelAction; also denotes the object location to
2345  * check
2346  * @return The last unlock operation
2347  */
2348 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2349 {
2350         void *location = curr->get_location();
2351         action_list_t *list = obj_map.get(location);
2352         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2353         action_list_t::reverse_iterator rit;
2354         for (rit = list->rbegin(); rit != list->rend(); rit++)
2355                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2356                         return *rit;
2357         return NULL;
2358 }
2359
2360 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2361 {
2362         ModelAction *parent = get_last_action(tid);
2363         if (!parent)
2364                 parent = get_thread(tid)->get_creation();
2365         return parent;
2366 }
2367
2368 /**
2369  * Returns the clock vector for a given thread.
2370  * @param tid The thread whose clock vector we want
2371  * @return Desired clock vector
2372  */
2373 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2374 {
2375         return get_parent_action(tid)->get_cv();
2376 }
2377
2378 /**
2379  * @brief Find the promise (if any) to resolve for the current action and
2380  * remove it from the pending promise vector
2381  * @param curr The current ModelAction. Should be a write.
2382  * @return The Promise to resolve, if any; otherwise NULL
2383  */
2384 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2385 {
2386         for (unsigned int i = 0; i < promises.size(); i++)
2387                 if (curr->get_node()->get_promise(i)) {
2388                         Promise *ret = promises[i];
2389                         promises.erase(promises.begin() + i);
2390                         return ret;
2391                 }
2392         return NULL;
2393 }
2394
2395 /**
2396  * Resolve a Promise with a current write.
2397  * @param write The ModelAction that is fulfilling Promises
2398  * @param promise The Promise to resolve
2399  * @param work The work queue, for adding new fixup work
2400  * @return True if the Promise was successfully resolved; false otherwise
2401  */
2402 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise,
2403                 work_queue_t *work)
2404 {
2405         ModelVector<ModelAction *> actions_to_check;
2406
2407         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2408                 ModelAction *read = promise->get_reader(i);
2409                 if (read_from(read, write)) {
2410                         /* Propagate the changed clock vector */
2411                         propagate_clockvector(read, work);
2412                 }
2413                 actions_to_check.push_back(read);
2414         }
2415         /* Make sure the promise's value matches the write's value */
2416         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2417         if (!mo_graph->resolvePromise(promise, write))
2418                 priv->hard_failed_promise = true;
2419
2420         /**
2421          * @todo  It is possible to end up in an inconsistent state, where a
2422          * "resolved" promise may still be referenced if
2423          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2424          *
2425          * Note that the inconsistency only matters when dumping mo_graph to
2426          * file.
2427          *
2428          * delete promise;
2429          */
2430
2431         //Check whether reading these writes has made threads unable to
2432         //resolve promises
2433         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2434                 ModelAction *read = actions_to_check[i];
2435                 mo_check_promises(read, true);
2436         }
2437
2438         return true;
2439 }
2440
2441 /**
2442  * Compute the set of promises that could potentially be satisfied by this
2443  * action. Note that the set computation actually appears in the Node, not in
2444  * ModelExecution.
2445  * @param curr The ModelAction that may satisfy promises
2446  */
2447 void ModelExecution::compute_promises(ModelAction *curr)
2448 {
2449         for (unsigned int i = 0; i < promises.size(); i++) {
2450                 Promise *promise = promises[i];
2451                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2452                         continue;
2453
2454                 bool satisfy = true;
2455                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2456                         const ModelAction *act = promise->get_reader(j);
2457                         if (act->happens_before(curr) ||
2458                                         act->could_synchronize_with(curr)) {
2459                                 satisfy = false;
2460                                 break;
2461                         }
2462                 }
2463                 if (satisfy)
2464                         curr->get_node()->set_promise(i);
2465         }
2466 }
2467
2468 /** Checks promises in response to change in ClockVector Threads. */
2469 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2470 {
2471         for (unsigned int i = 0; i < promises.size(); i++) {
2472                 Promise *promise = promises[i];
2473                 if (!promise->thread_is_available(tid))
2474                         continue;
2475                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2476                         const ModelAction *act = promise->get_reader(j);
2477                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2478                                         merge_cv->synchronized_since(act)) {
2479                                 if (promise->eliminate_thread(tid)) {
2480                                         /* Promise has failed */
2481                                         priv->failed_promise = true;
2482                                         return;
2483                                 }
2484                         }
2485                 }
2486         }
2487 }
2488
2489 void ModelExecution::check_promises_thread_disabled()
2490 {
2491         for (unsigned int i = 0; i < promises.size(); i++) {
2492                 Promise *promise = promises[i];
2493                 if (promise->has_failed()) {
2494                         priv->failed_promise = true;
2495                         return;
2496                 }
2497         }
2498 }
2499
2500 /**
2501  * @brief Checks promises in response to addition to modification order for
2502  * threads.
2503  *
2504  * We test whether threads are still available for satisfying promises after an
2505  * addition to our modification order constraints. Those that are unavailable
2506  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2507  * that promise has failed.
2508  *
2509  * @param act The ModelAction which updated the modification order
2510  * @param is_read_check Should be true if act is a read and we must check for
2511  * updates to the store from which it read (there is a distinction here for
2512  * RMW's, which are both a load and a store)
2513  */
2514 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2515 {
2516         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2517
2518         for (unsigned int i = 0; i < promises.size(); i++) {
2519                 Promise *promise = promises[i];
2520
2521                 // Is this promise on the same location?
2522                 if (!promise->same_location(write))
2523                         continue;
2524
2525                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2526                         const ModelAction *pread = promise->get_reader(j);
2527                         if (!pread->happens_before(act))
2528                                continue;
2529                         if (mo_graph->checkPromise(write, promise)) {
2530                                 priv->hard_failed_promise = true;
2531                                 return;
2532                         }
2533                         break;
2534                 }
2535
2536                 // Don't do any lookups twice for the same thread
2537                 if (!promise->thread_is_available(act->get_tid()))
2538                         continue;
2539
2540                 if (mo_graph->checkReachable(promise, write)) {
2541                         if (mo_graph->checkPromise(write, promise)) {
2542                                 priv->hard_failed_promise = true;
2543                                 return;
2544                         }
2545                 }
2546         }
2547 }
2548
2549 /**
2550  * Compute the set of writes that may break the current pending release
2551  * sequence. This information is extracted from previou release sequence
2552  * calculations.
2553  *
2554  * @param curr The current ModelAction. Must be a release sequence fixup
2555  * action.
2556  */
2557 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2558 {
2559         if (pending_rel_seqs.empty())
2560                 return;
2561
2562         struct release_seq *pending = pending_rel_seqs.back();
2563         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2564                 const ModelAction *write = pending->writes[i];
2565                 curr->get_node()->add_relseq_break(write);
2566         }
2567
2568         /* NULL means don't break the sequence; just synchronize */
2569         curr->get_node()->add_relseq_break(NULL);
2570 }
2571
2572 /**
2573  * Build up an initial set of all past writes that this 'read' action may read
2574  * from, as well as any previously-observed future values that must still be valid.
2575  *
2576  * @param curr is the current ModelAction that we are exploring; it must be a
2577  * 'read' operation.
2578  */
2579 void ModelExecution::build_may_read_from(ModelAction *curr)
2580 {
2581         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2582         unsigned int i;
2583         ASSERT(curr->is_read());
2584
2585         ModelAction *last_sc_write = NULL;
2586
2587         if (curr->is_seqcst())
2588                 last_sc_write = get_last_seq_cst_write(curr);
2589
2590         /* Iterate over all threads */
2591         for (i = 0; i < thrd_lists->size(); i++) {
2592                 /* Iterate over actions in thread, starting from most recent */
2593                 action_list_t *list = &(*thrd_lists)[i];
2594                 action_list_t::reverse_iterator rit;
2595                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2596                         ModelAction *act = *rit;
2597
2598                         /* Only consider 'write' actions */
2599                         if (!act->is_write() || act == curr)
2600                                 continue;
2601
2602                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2603                         bool allow_read = true;
2604
2605                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2606                                 allow_read = false;
2607                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2608                                 allow_read = false;
2609
2610                         if (allow_read) {
2611                                 /* Only add feasible reads */
2612                                 mo_graph->startChanges();
2613                                 r_modification_order(curr, act);
2614                                 if (!is_infeasible())
2615                                         curr->get_node()->add_read_from_past(act);
2616                                 mo_graph->rollbackChanges();
2617                         }
2618
2619                         /* Include at most one act per-thread that "happens before" curr */
2620                         if (act->happens_before(curr))
2621                                 break;
2622                 }
2623         }
2624
2625         /* Inherit existing, promised future values */
2626         for (i = 0; i < promises.size(); i++) {
2627                 const Promise *promise = promises[i];
2628                 const ModelAction *promise_read = promise->get_reader(0);
2629                 if (promise_read->same_var(curr)) {
2630                         /* Only add feasible future-values */
2631                         mo_graph->startChanges();
2632                         r_modification_order(curr, promise);
2633                         if (!is_infeasible())
2634                                 curr->get_node()->add_read_from_promise(promise_read);
2635                         mo_graph->rollbackChanges();
2636                 }
2637         }
2638
2639         /* We may find no valid may-read-from only if the execution is doomed */
2640         if (!curr->get_node()->read_from_size()) {
2641                 priv->no_valid_reads = true;
2642                 set_assert();
2643         }
2644
2645         if (DBG_ENABLED()) {
2646                 model_print("Reached read action:\n");
2647                 curr->print();
2648                 model_print("Printing read_from_past\n");
2649                 curr->get_node()->print_read_from_past();
2650                 model_print("End printing read_from_past\n");
2651         }
2652 }
2653
2654 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2655 {
2656         for ( ; write != NULL; write = write->get_reads_from()) {
2657                 /* UNINIT actions don't have a Node, and they never sleep */
2658                 if (write->is_uninitialized())
2659                         return true;
2660                 Node *prevnode = write->get_node()->get_parent();
2661
2662                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2663                 if (write->is_release() && thread_sleep)
2664                         return true;
2665                 if (!write->is_rmw())
2666                         return false;
2667         }
2668         return true;
2669 }
2670
2671 /**
2672  * @brief Get an action representing an uninitialized atomic
2673  *
2674  * This function may create a new one or try to retrieve one from the NodeStack
2675  *
2676  * @param curr The current action, which prompts the creation of an UNINIT action
2677  * @return A pointer to the UNINIT ModelAction
2678  */
2679 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2680 {
2681         Node *node = curr->get_node();
2682         ModelAction *act = node->get_uninit_action();
2683         if (!act) {
2684                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2685                 node->set_uninit_action(act);
2686         }
2687         act->create_cv(NULL);
2688         return act;
2689 }
2690
2691 static void print_list(const action_list_t *list)
2692 {
2693         action_list_t::const_iterator it;
2694
2695         model_print("------------------------------------------------------------------------------------\n");
2696         model_print("#    t    Action type     MO       Location         Value               Rf  CV\n");
2697         model_print("------------------------------------------------------------------------------------\n");
2698
2699         unsigned int hash = 0;
2700
2701         for (it = list->begin(); it != list->end(); it++) {
2702                 const ModelAction *act = *it;
2703                 if (act->get_seq_number() > 0)
2704                         act->print();
2705                 hash = hash^(hash<<3)^((*it)->hash());
2706         }
2707         model_print("HASH %u\n", hash);
2708         model_print("------------------------------------------------------------------------------------\n");
2709 }
2710
2711 #if SUPPORT_MOD_ORDER_DUMP
2712 void ModelExecution::dumpGraph(char *filename) const
2713 {
2714         char buffer[200];
2715         sprintf(buffer, "%s.dot", filename);
2716         FILE *file = fopen(buffer, "w");
2717         fprintf(file, "digraph %s {\n", filename);
2718         mo_graph->dumpNodes(file);
2719         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2720
2721         for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2722                 ModelAction *act = *it;
2723                 if (act->is_read()) {
2724                         mo_graph->dot_print_node(file, act);
2725                         if (act->get_reads_from())
2726                                 mo_graph->dot_print_edge(file,
2727                                                 act->get_reads_from(),
2728                                                 act,
2729                                                 "label=\"rf\", color=red, weight=2");
2730                         else
2731                                 mo_graph->dot_print_edge(file,
2732                                                 act->get_reads_from_promise(),
2733                                                 act,
2734                                                 "label=\"rf\", color=red");
2735                 }
2736                 if (thread_array[act->get_tid()]) {
2737                         mo_graph->dot_print_edge(file,
2738                                         thread_array[id_to_int(act->get_tid())],
2739                                         act,
2740                                         "label=\"sb\", color=blue, weight=400");
2741                 }
2742
2743                 thread_array[act->get_tid()] = act;
2744         }
2745         fprintf(file, "}\n");
2746         model_free(thread_array);
2747         fclose(file);
2748 }
2749 #endif
2750
2751 /** @brief Prints an execution trace summary. */
2752 void ModelExecution::print_summary() const
2753 {
2754 #if SUPPORT_MOD_ORDER_DUMP
2755         char buffername[100];
2756         sprintf(buffername, "exec%04u", get_execution_number());
2757         mo_graph->dumpGraphToFile(buffername);
2758         sprintf(buffername, "graph%04u", get_execution_number());
2759         dumpGraph(buffername);
2760 #endif
2761
2762         model_print("Execution trace %d:", get_execution_number());
2763         if (isfeasibleprefix()) {
2764                 if (is_yieldblocked())
2765                         model_print(" YIELD BLOCKED");
2766                 if (scheduler->all_threads_sleeping())
2767                         model_print(" SLEEP-SET REDUNDANT");
2768                 if (have_bug_reports())
2769                         model_print(" DETECTED BUG(S)");
2770         } else
2771                 print_infeasibility(" INFEASIBLE");
2772         model_print("\n");
2773
2774         print_list(&action_trace);
2775         model_print("\n");
2776
2777         if (!promises.empty()) {
2778                 model_print("Pending promises:\n");
2779                 for (unsigned int i = 0; i < promises.size(); i++) {
2780                         model_print(" [P%u] ", i);
2781                         promises[i]->print();
2782                 }
2783                 model_print("\n");
2784         }
2785 }
2786
2787 /**
2788  * Add a Thread to the system for the first time. Should only be called once
2789  * per thread.
2790  * @param t The Thread to add
2791  */
2792 void ModelExecution::add_thread(Thread *t)
2793 {
2794         unsigned int i = id_to_int(t->get_id());
2795         if (i >= thread_map.size())
2796                 thread_map.resize(i + 1);
2797         thread_map[i] = t;
2798         if (!t->is_model_thread())
2799                 scheduler->add_thread(t);
2800 }
2801
2802 /**
2803  * @brief Get a Thread reference by its ID
2804  * @param tid The Thread's ID
2805  * @return A Thread reference
2806  */
2807 Thread * ModelExecution::get_thread(thread_id_t tid) const
2808 {
2809         unsigned int i = id_to_int(tid);
2810         if (i < thread_map.size())
2811                 return thread_map[i];
2812         return NULL;
2813 }
2814
2815 /**
2816  * @brief Get a reference to the Thread in which a ModelAction was executed
2817  * @param act The ModelAction
2818  * @return A Thread reference
2819  */
2820 Thread * ModelExecution::get_thread(const ModelAction *act) const
2821 {
2822         return get_thread(act->get_tid());
2823 }
2824
2825 /**
2826  * @brief Get a Promise's "promise number"
2827  *
2828  * A "promise number" is an index number that is unique to a promise, valid
2829  * only for a specific snapshot of an execution trace. Promises may come and go
2830  * as they are generated an resolved, so an index only retains meaning for the
2831  * current snapshot.
2832  *
2833  * @param promise The Promise to check
2834  * @return The promise index, if the promise still is valid; otherwise -1
2835  */
2836 int ModelExecution::get_promise_number(const Promise *promise) const
2837 {
2838         for (unsigned int i = 0; i < promises.size(); i++)
2839                 if (promises[i] == promise)
2840                         return i;
2841         /* Not found */
2842         return -1;
2843 }
2844
2845 /**
2846  * @brief Check if a Thread is currently enabled
2847  * @param t The Thread to check
2848  * @return True if the Thread is currently enabled
2849  */
2850 bool ModelExecution::is_enabled(Thread *t) const
2851 {
2852         return scheduler->is_enabled(t);
2853 }
2854
2855 /**
2856  * @brief Check if a Thread is currently enabled
2857  * @param tid The ID of the Thread to check
2858  * @return True if the Thread is currently enabled
2859  */
2860 bool ModelExecution::is_enabled(thread_id_t tid) const
2861 {
2862         return scheduler->is_enabled(tid);
2863 }
2864
2865 /**
2866  * @brief Select the next thread to execute based on the curren action
2867  *
2868  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2869  * actions should be followed by the execution of their child thread. In either
2870  * case, the current action should determine the next thread schedule.
2871  *
2872  * @param curr The current action
2873  * @return The next thread to run, if the current action will determine this
2874  * selection; otherwise NULL
2875  */
2876 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2877 {
2878         /* Do not split atomic RMW */
2879         if (curr->is_rmwr())
2880                 return get_thread(curr);
2881         if (curr->is_write()) {
2882 //              std::memory_order order = curr->get_mo(); 
2883 //              switch(order) {
2884 //                      case std::memory_order_relaxed: 
2885 //                              return get_thread(curr);
2886 //                      case std::memory_order_release:
2887 //                              return get_thread(curr);
2888 //                      defalut:
2889 //                              return NULL;
2890 //              }       
2891                 return NULL;
2892         }
2893
2894         /* Follow CREATE with the created thread */
2895         if (curr->get_type() == THREAD_CREATE)
2896                 return curr->get_thread_operand();
2897         return NULL;
2898 }
2899
2900 /** @return True if the execution has taken too many steps */
2901 bool ModelExecution::too_many_steps() const
2902 {
2903         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2904 }
2905
2906 /**
2907  * Takes the next step in the execution, if possible.
2908  * @param curr The current step to take
2909  * @return Returns the next Thread to run, if any; NULL if this execution
2910  * should terminate
2911  */
2912 Thread * ModelExecution::take_step(ModelAction *curr)
2913 {
2914         Thread *curr_thrd = get_thread(curr);
2915         ASSERT(curr_thrd->get_state() == THREAD_READY);
2916
2917         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2918         curr = check_current_action(curr);
2919         ASSERT(curr);
2920
2921         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2922                 scheduler->remove_thread(curr_thrd);
2923
2924         return action_select_next_thread(curr);
2925 }
2926
2927 /**
2928  * Launch end-of-execution release sequence fixups only when
2929  * the execution is otherwise feasible AND there are:
2930  *
2931  * (1) pending release sequences
2932  * (2) pending assertions that could be invalidated by a change
2933  * in clock vectors (i.e., data races)
2934  * (3) no pending promises
2935  */
2936 void ModelExecution::fixup_release_sequences()
2937 {
2938         while (!pending_rel_seqs.empty() &&
2939                         is_feasible_prefix_ignore_relseq() &&
2940                         haveUnrealizedRaces()) {
2941                 model_print("*** WARNING: release sequence fixup action "
2942                                 "(%zu pending release seuqence(s)) ***\n",
2943                                 pending_rel_seqs.size());
2944                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2945                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2946                                 model_thread);
2947                 take_step(fixup);
2948         };
2949 }