3ed8203fe9a2111618ced00d3ac07bf0cc5a6ac1
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4
5 #include "model.h"
6 #include "action.h"
7 #include "nodestack.h"
8 #include "schedule.h"
9 #include "snapshot-interface.h"
10 #include "common.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
13 #include "promise.h"
14 #include "datarace.h"
15 #include "threads-model.h"
16
17 #define INITIAL_THREAD_ID       0
18
19 ModelChecker *model;
20
21 /**
22  * Structure for holding small ModelChecker members that should be snapshotted
23  */
24 struct model_snapshot_members {
25         ModelAction *current_action;
26         unsigned int next_thread_id;
27         modelclock_t used_sequence_numbers;
28         Thread *nextThread;
29         ModelAction *next_backtrack;
30 };
31
32 /** @brief Constructor */
33 ModelChecker::ModelChecker(struct model_params params) :
34         /* Initialize default scheduler */
35         params(params),
36         scheduler(new Scheduler()),
37         num_executions(0),
38         num_feasible_executions(0),
39         diverge(NULL),
40         earliest_diverge(NULL),
41         action_trace(new action_list_t()),
42         thread_map(new HashTable<int, Thread *, int>()),
43         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
44         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
45         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
46         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
47         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
48         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
49         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
50         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
51         node_stack(new NodeStack()),
52         mo_graph(new CycleGraph()),
53         failed_promise(false),
54         too_many_reads(false),
55         asserted(false),
56         bad_synchronization(false)
57 {
58         /* Allocate this "size" on the snapshotting heap */
59         priv = (struct model_snapshot_members *)snapshot_calloc(1, sizeof(*priv));
60         /* First thread created will have id INITIAL_THREAD_ID */
61         priv->next_thread_id = INITIAL_THREAD_ID;
62
63         /* Initialize a model-checker thread, for special ModelActions */
64         model_thread = new Thread(get_next_id());
65         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
66 }
67
68 /** @brief Destructor */
69 ModelChecker::~ModelChecker()
70 {
71         for (unsigned int i = 0; i < get_num_threads(); i++)
72                 delete thread_map->get(i);
73         delete thread_map;
74
75         delete obj_thrd_map;
76         delete obj_map;
77         delete lock_waiters_map;
78         delete condvar_waiters_map;
79         delete action_trace;
80
81         for (unsigned int i = 0; i < promises->size(); i++)
82                 delete (*promises)[i];
83         delete promises;
84
85         delete pending_rel_seqs;
86
87         delete thrd_last_action;
88         delete node_stack;
89         delete scheduler;
90         delete mo_graph;
91
92         snapshot_free(priv);
93 }
94
95 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
96         action_list_t * tmp=hash->get(ptr);
97         if (tmp==NULL) {
98                 tmp=new action_list_t();
99                 hash->put(ptr, tmp);
100         }
101         return tmp;
102 }
103
104 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
105         std::vector<action_list_t> * tmp=hash->get(ptr);
106         if (tmp==NULL) {
107                 tmp=new std::vector<action_list_t>();
108                 hash->put(ptr, tmp);
109         }
110         return tmp;
111 }
112
113 /**
114  * Restores user program to initial state and resets all model-checker data
115  * structures.
116  */
117 void ModelChecker::reset_to_initial_state()
118 {
119         DEBUG("+++ Resetting to initial state +++\n");
120         node_stack->reset_execution();
121         failed_promise = false;
122         too_many_reads = false;
123         bad_synchronization = false;
124         reset_asserted();
125         snapshotObject->backTrackBeforeStep(0);
126 }
127
128 /** @return a thread ID for a new Thread */
129 thread_id_t ModelChecker::get_next_id()
130 {
131         return priv->next_thread_id++;
132 }
133
134 /** @return the number of user threads created during this execution */
135 unsigned int ModelChecker::get_num_threads() const
136 {
137         return priv->next_thread_id;
138 }
139
140 /** @return The currently executing Thread. */
141 Thread * ModelChecker::get_current_thread()
142 {
143         return scheduler->get_current_thread();
144 }
145
146 /** @return a sequence number for a new ModelAction */
147 modelclock_t ModelChecker::get_next_seq_num()
148 {
149         return ++priv->used_sequence_numbers;
150 }
151
152 Node * ModelChecker::get_curr_node() {
153         return node_stack->get_head();
154 }
155
156 /**
157  * @brief Choose the next thread to execute.
158  *
159  * This function chooses the next thread that should execute. It can force the
160  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
161  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
162  * The model-checker may have no preference regarding the next thread (i.e.,
163  * when exploring a new execution ordering), in which case this will return
164  * NULL.
165  * @param curr The current ModelAction. This action might guide the choice of
166  * next thread.
167  * @return The next thread to run. If the model-checker has no preference, NULL.
168  */
169 Thread * ModelChecker::get_next_thread(ModelAction *curr)
170 {
171         thread_id_t tid;
172
173         if (curr!=NULL) {
174                 /* Do not split atomic actions. */
175                 if (curr->is_rmwr())
176                         return thread_current();
177                 /* The THREAD_CREATE action points to the created Thread */
178                 else if (curr->get_type() == THREAD_CREATE)
179                         return (Thread *)curr->get_location();
180         }
181
182         /* Have we completed exploring the preselected path? */
183         if (diverge == NULL)
184                 return NULL;
185
186         /* Else, we are trying to replay an execution */
187         ModelAction *next = node_stack->get_next()->get_action();
188
189         if (next == diverge) {
190                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
191                         earliest_diverge=diverge;
192
193                 Node *nextnode = next->get_node();
194                 Node *prevnode = nextnode->get_parent();
195                 scheduler->update_sleep_set(prevnode);
196
197                 /* Reached divergence point */
198                 if (nextnode->increment_misc()) {
199                         /* The next node will try to satisfy a different misc_index values. */
200                         tid = next->get_tid();
201                         node_stack->pop_restofstack(2);
202                 } else if (nextnode->increment_promise()) {
203                         /* The next node will try to satisfy a different set of promises. */
204                         tid = next->get_tid();
205                         node_stack->pop_restofstack(2);
206                 } else if (nextnode->increment_read_from()) {
207                         /* The next node will read from a different value. */
208                         tid = next->get_tid();
209                         node_stack->pop_restofstack(2);
210                 } else if (nextnode->increment_future_value()) {
211                         /* The next node will try to read from a different future value. */
212                         tid = next->get_tid();
213                         node_stack->pop_restofstack(2);
214                 } else if (nextnode->increment_relseq_break()) {
215                         /* The next node will try to resolve a release sequence differently */
216                         tid = next->get_tid();
217                         node_stack->pop_restofstack(2);
218                 } else {
219                         /* Make a different thread execute for next step */
220                         scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
221                         tid = prevnode->get_next_backtrack();
222                         /* Make sure the backtracked thread isn't sleeping. */
223                         node_stack->pop_restofstack(1);
224                         if (diverge==earliest_diverge) {
225                                 earliest_diverge=prevnode->get_action();
226                         }
227                 }
228                 /* The correct sleep set is in the parent node. */
229                 execute_sleep_set();
230
231                 DEBUG("*** Divergence point ***\n");
232
233                 diverge = NULL;
234         } else {
235                 tid = next->get_tid();
236         }
237         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
238         ASSERT(tid != THREAD_ID_T_NONE);
239         return thread_map->get(id_to_int(tid));
240 }
241
242 /**
243  * We need to know what the next actions of all threads in the sleep
244  * set will be.  This method computes them and stores the actions at
245  * the corresponding thread object's pending action.
246  */
247
248 void ModelChecker::execute_sleep_set() {
249         for(unsigned int i=0;i<get_num_threads();i++) {
250                 thread_id_t tid=int_to_id(i);
251                 Thread *thr=get_thread(tid);
252                 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
253                                  thr->get_pending() == NULL ) {
254                         thr->set_state(THREAD_RUNNING);
255                         scheduler->next_thread(thr);
256                         Thread::swap(&system_context, thr);
257                         priv->current_action->set_sleep_flag();
258                         thr->set_pending(priv->current_action);
259                 }
260         }
261         priv->current_action = NULL;
262 }
263
264 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
265         for(unsigned int i=0;i<get_num_threads();i++) {
266                 thread_id_t tid=int_to_id(i);
267                 Thread *thr=get_thread(tid);
268                 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
269                         ModelAction *pending_act=thr->get_pending();
270                         if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
271                                 //Remove this thread from sleep set
272                                 scheduler->remove_sleep(thr);
273                         }
274                 }
275         }
276 }
277
278 /**
279  * Check if we are in a deadlock. Should only be called at the end of an
280  * execution, although it should not give false positives in the middle of an
281  * execution (there should be some ENABLED thread).
282  *
283  * @return True if program is in a deadlock; false otherwise
284  */
285 bool ModelChecker::is_deadlocked() const
286 {
287         bool blocking_threads = false;
288         for (unsigned int i = 0; i < get_num_threads(); i++) {
289                 thread_id_t tid = int_to_id(i);
290                 if (is_enabled(tid))
291                         return false;
292                 Thread *t = get_thread(tid);
293                 if (!t->is_model_thread() && t->get_pending())
294                         blocking_threads = true;
295         }
296         return blocking_threads;
297 }
298
299 /**
300  * Check if this is a complete execution. That is, have all thread completed
301  * execution (rather than exiting because sleep sets have forced a redundant
302  * execution).
303  *
304  * @return True if the execution is complete.
305  */
306 bool ModelChecker::is_complete_execution() const
307 {
308         for (unsigned int i = 0; i < get_num_threads(); i++)
309                 if (is_enabled(int_to_id(i)))
310                         return false;
311         return true;
312 }
313
314 /**
315  * Queries the model-checker for more executions to explore and, if one
316  * exists, resets the model-checker state to execute a new execution.
317  *
318  * @return If there are more executions to explore, return true. Otherwise,
319  * return false.
320  */
321 bool ModelChecker::next_execution()
322 {
323         DBG();
324
325         num_executions++;
326
327         if (is_deadlocked())
328                 printf("ERROR: DEADLOCK\n");
329         if (isfinalfeasible()) {
330                 printf("Earliest divergence point since last feasible execution:\n");
331                 if (earliest_diverge)
332                         earliest_diverge->print();
333                 else
334                         printf("(Not set)\n");
335
336                 earliest_diverge = NULL;
337                 num_feasible_executions++;
338         }
339
340
341         if (isfinalfeasible() || DBG_ENABLED()) {
342                 checkDataRaces();
343                 print_summary();
344         }
345
346         if ((diverge = get_next_backtrack()) == NULL)
347                 return false;
348
349         if (DBG_ENABLED()) {
350                 printf("Next execution will diverge at:\n");
351                 diverge->print();
352         }
353
354         reset_to_initial_state();
355         return true;
356 }
357
358 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
359 {
360         switch (act->get_type()) {
361         case ATOMIC_READ:
362         case ATOMIC_WRITE:
363         case ATOMIC_RMW: {
364                 /* linear search: from most recent to oldest */
365                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
366                 action_list_t::reverse_iterator rit;
367                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
368                         ModelAction *prev = *rit;
369                         if (prev->could_synchronize_with(act))
370                                 return prev;
371                 }
372                 break;
373         }
374         case ATOMIC_LOCK:
375         case ATOMIC_TRYLOCK: {
376                 /* linear search: from most recent to oldest */
377                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
378                 action_list_t::reverse_iterator rit;
379                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
380                         ModelAction *prev = *rit;
381                         if (act->is_conflicting_lock(prev))
382                                 return prev;
383                 }
384                 break;
385         }
386         case ATOMIC_UNLOCK: {
387                 /* linear search: from most recent to oldest */
388                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
389                 action_list_t::reverse_iterator rit;
390                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
391                         ModelAction *prev = *rit;
392                         if (!act->same_thread(prev)&&prev->is_failed_trylock())
393                                 return prev;
394                 }
395                 break;
396         }
397         case ATOMIC_WAIT: {
398                 /* linear search: from most recent to oldest */
399                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
400                 action_list_t::reverse_iterator rit;
401                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
402                         ModelAction *prev = *rit;
403                         if (!act->same_thread(prev)&&prev->is_failed_trylock())
404                                 return prev;
405                         if (!act->same_thread(prev)&&prev->is_notify())
406                                 return prev;
407                 }
408                 break;
409         }
410
411         case ATOMIC_NOTIFY_ALL:
412         case ATOMIC_NOTIFY_ONE: {
413                 /* linear search: from most recent to oldest */
414                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
415                 action_list_t::reverse_iterator rit;
416                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
417                         ModelAction *prev = *rit;
418                         if (!act->same_thread(prev)&&prev->is_wait())
419                                 return prev;
420                 }
421                 break;
422         }
423         default:
424                 break;
425         }
426         return NULL;
427 }
428
429 /** This method finds backtracking points where we should try to
430  * reorder the parameter ModelAction against.
431  *
432  * @param the ModelAction to find backtracking points for.
433  */
434 void ModelChecker::set_backtracking(ModelAction *act)
435 {
436         Thread *t = get_thread(act);
437         ModelAction * prev = get_last_conflict(act);
438         if (prev == NULL)
439                 return;
440
441         Node * node = prev->get_node()->get_parent();
442
443         int low_tid, high_tid;
444         if (node->is_enabled(t)) {
445                 low_tid = id_to_int(act->get_tid());
446                 high_tid = low_tid+1;
447         } else {
448                 low_tid = 0;
449                 high_tid = get_num_threads();
450         }
451
452         for(int i = low_tid; i < high_tid; i++) {
453                 thread_id_t tid = int_to_id(i);
454
455                 /* Make sure this thread can be enabled here. */
456                 if (i >= node->get_num_threads())
457                         break;
458
459                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
460                 if (node->enabled_status(tid)!=THREAD_ENABLED)
461                         continue;
462
463                 /* Check if this has been explored already */
464                 if (node->has_been_explored(tid))
465                         continue;
466
467                 /* See if fairness allows */
468                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
469                         bool unfair=false;
470                         for(int t=0;t<node->get_num_threads();t++) {
471                                 thread_id_t tother=int_to_id(t);
472                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
473                                         unfair=true;
474                                         break;
475                                 }
476                         }
477                         if (unfair)
478                                 continue;
479                 }
480                 /* Cache the latest backtracking point */
481                 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
482                         priv->next_backtrack = prev;
483
484                 /* If this is a new backtracking point, mark the tree */
485                 if (!node->set_backtrack(tid))
486                         continue;
487                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
488                                         id_to_int(prev->get_tid()),
489                                         id_to_int(t->get_id()));
490                 if (DBG_ENABLED()) {
491                         prev->print();
492                         act->print();
493                 }
494         }
495 }
496
497 /**
498  * Returns last backtracking point. The model checker will explore a different
499  * path for this point in the next execution.
500  * @return The ModelAction at which the next execution should diverge.
501  */
502 ModelAction * ModelChecker::get_next_backtrack()
503 {
504         ModelAction *next = priv->next_backtrack;
505         priv->next_backtrack = NULL;
506         return next;
507 }
508
509 /**
510  * Processes a read or rmw model action.
511  * @param curr is the read model action to process.
512  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
513  * @return True if processing this read updates the mo_graph.
514  */
515 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
516 {
517         uint64_t value = VALUE_NONE;
518         bool updated = false;
519         while (true) {
520                 const ModelAction *reads_from = curr->get_node()->get_read_from();
521                 if (reads_from != NULL) {
522                         mo_graph->startChanges();
523
524                         value = reads_from->get_value();
525                         bool r_status = false;
526
527                         if (!second_part_of_rmw) {
528                                 check_recency(curr, reads_from);
529                                 r_status = r_modification_order(curr, reads_from);
530                         }
531
532
533                         if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
534                                 mo_graph->rollbackChanges();
535                                 too_many_reads = false;
536                                 continue;
537                         }
538
539                         curr->read_from(reads_from);
540                         mo_graph->commitChanges();
541                         mo_check_promises(curr->get_tid(), reads_from);
542
543                         updated |= r_status;
544                 } else if (!second_part_of_rmw) {
545                         /* Read from future value */
546                         value = curr->get_node()->get_future_value();
547                         modelclock_t expiration = curr->get_node()->get_future_value_expiration();
548                         curr->read_from(NULL);
549                         Promise *valuepromise = new Promise(curr, value, expiration);
550                         promises->push_back(valuepromise);
551                 }
552                 get_thread(curr)->set_return_value(value);
553                 return updated;
554         }
555 }
556
557 /**
558  * Processes a lock, trylock, or unlock model action.  @param curr is
559  * the read model action to process.
560  *
561  * The try lock operation checks whether the lock is taken.  If not,
562  * it falls to the normal lock operation case.  If so, it returns
563  * fail.
564  *
565  * The lock operation has already been checked that it is enabled, so
566  * it just grabs the lock and synchronizes with the previous unlock.
567  *
568  * The unlock operation has to re-enable all of the threads that are
569  * waiting on the lock.
570  *
571  * @return True if synchronization was updated; false otherwise
572  */
573 bool ModelChecker::process_mutex(ModelAction *curr) {
574         std::mutex *mutex=NULL;
575         struct std::mutex_state *state=NULL;
576
577         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
578                 mutex = (std::mutex *)curr->get_location();
579                 state = mutex->get_state();
580         } else if(curr->is_wait()) {
581                 mutex = (std::mutex *)curr->get_value();
582                 state = mutex->get_state();
583         }
584
585         switch (curr->get_type()) {
586         case ATOMIC_TRYLOCK: {
587                 bool success = !state->islocked;
588                 curr->set_try_lock(success);
589                 if (!success) {
590                         get_thread(curr)->set_return_value(0);
591                         break;
592                 }
593                 get_thread(curr)->set_return_value(1);
594         }
595                 //otherwise fall into the lock case
596         case ATOMIC_LOCK: {
597                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
598                         printf("Lock access before initialization\n");
599                         set_assert();
600                 }
601                 state->islocked = true;
602                 ModelAction *unlock = get_last_unlock(curr);
603                 //synchronize with the previous unlock statement
604                 if (unlock != NULL) {
605                         curr->synchronize_with(unlock);
606                         return true;
607                 }
608                 break;
609         }
610         case ATOMIC_UNLOCK: {
611                 //unlock the lock
612                 state->islocked = false;
613                 //wake up the other threads
614                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
615                 //activate all the waiting threads
616                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
617                         scheduler->wake(get_thread(*rit));
618                 }
619                 waiters->clear();
620                 break;
621         }
622         case ATOMIC_WAIT: {
623                 //unlock the lock
624                 state->islocked = false;
625                 //wake up the other threads
626                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
627                 //activate all the waiting threads
628                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
629                         scheduler->wake(get_thread(*rit));
630                 }
631                 waiters->clear();
632                 //check whether we should go to sleep or not...simulate spurious failures
633                 if (curr->get_node()->get_misc()==0) {
634                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
635                         //disable us
636                         scheduler->sleep(get_current_thread());
637                 }
638                 break;
639         }
640         case ATOMIC_NOTIFY_ALL: {
641                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
642                 //activate all the waiting threads
643                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
644                         scheduler->wake(get_thread(*rit));
645                 }
646                 waiters->clear();
647                 break;
648         }
649         case ATOMIC_NOTIFY_ONE: {
650                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
651                 int wakeupthread=curr->get_node()->get_misc();
652                 action_list_t::iterator it = waiters->begin();
653                 advance(it, wakeupthread);
654                 scheduler->wake(get_thread(*it));
655                 waiters->erase(it);
656                 break;
657         }
658
659         default:
660                 ASSERT(0);
661         }
662         return false;
663 }
664
665 /**
666  * Process a write ModelAction
667  * @param curr The ModelAction to process
668  * @return True if the mo_graph was updated or promises were resolved
669  */
670 bool ModelChecker::process_write(ModelAction *curr)
671 {
672         bool updated_mod_order = w_modification_order(curr);
673         bool updated_promises = resolve_promises(curr);
674
675         if (promises->size() == 0) {
676                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
677                         struct PendingFutureValue pfv = (*futurevalues)[i];
678                         //Do more ambitious checks now that mo is more complete
679                         if (mo_may_allow(pfv.writer, pfv.act)&&
680                                         pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
681                                         (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
682                                 priv->next_backtrack = pfv.act;
683                 }
684                 futurevalues->resize(0);
685         }
686
687         mo_graph->commitChanges();
688         mo_check_promises(curr->get_tid(), curr);
689
690         get_thread(curr)->set_return_value(VALUE_NONE);
691         return updated_mod_order || updated_promises;
692 }
693
694 /**
695  * @brief Process the current action for thread-related activity
696  *
697  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
698  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
699  * synchronization, etc.  This function is a no-op for non-THREAD actions
700  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
701  *
702  * @param curr The current action
703  * @return True if synchronization was updated or a thread completed
704  */
705 bool ModelChecker::process_thread_action(ModelAction *curr)
706 {
707         bool updated = false;
708
709         switch (curr->get_type()) {
710         case THREAD_CREATE: {
711                 Thread *th = (Thread *)curr->get_location();
712                 th->set_creation(curr);
713                 break;
714         }
715         case THREAD_JOIN: {
716                 Thread *blocking = (Thread *)curr->get_location();
717                 ModelAction *act = get_last_action(blocking->get_id());
718                 curr->synchronize_with(act);
719                 updated = true; /* trigger rel-seq checks */
720                 break;
721         }
722         case THREAD_FINISH: {
723                 Thread *th = get_thread(curr);
724                 while (!th->wait_list_empty()) {
725                         ModelAction *act = th->pop_wait_list();
726                         scheduler->wake(get_thread(act));
727                 }
728                 th->complete();
729                 updated = true; /* trigger rel-seq checks */
730                 break;
731         }
732         case THREAD_START: {
733                 check_promises(curr->get_tid(), NULL, curr->get_cv());
734                 break;
735         }
736         default:
737                 break;
738         }
739
740         return updated;
741 }
742
743 /**
744  * @brief Process the current action for release sequence fixup activity
745  *
746  * Performs model-checker release sequence fixups for the current action,
747  * forcing a single pending release sequence to break (with a given, potential
748  * "loose" write) or to complete (i.e., synchronize). If a pending release
749  * sequence forms a complete release sequence, then we must perform the fixup
750  * synchronization, mo_graph additions, etc.
751  *
752  * @param curr The current action; must be a release sequence fixup action
753  * @param work_queue The work queue to which to add work items as they are
754  * generated
755  */
756 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
757 {
758         const ModelAction *write = curr->get_node()->get_relseq_break();
759         struct release_seq *sequence = pending_rel_seqs->back();
760         pending_rel_seqs->pop_back();
761         ASSERT(sequence);
762         ModelAction *acquire = sequence->acquire;
763         const ModelAction *rf = sequence->rf;
764         const ModelAction *release = sequence->release;
765         ASSERT(acquire);
766         ASSERT(release);
767         ASSERT(rf);
768         ASSERT(release->same_thread(rf));
769
770         if (write == NULL) {
771                 /**
772                  * @todo Forcing a synchronization requires that we set
773                  * modification order constraints. For instance, we can't allow
774                  * a fixup sequence in which two separate read-acquire
775                  * operations read from the same sequence, where the first one
776                  * synchronizes and the other doesn't. Essentially, we can't
777                  * allow any writes to insert themselves between 'release' and
778                  * 'rf'
779                  */
780
781                 /* Must synchronize */
782                 if (!acquire->synchronize_with(release)) {
783                         set_bad_synchronization();
784                         return;
785                 }
786                 /* Re-check all pending release sequences */
787                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
788                 /* Re-check act for mo_graph edges */
789                 work_queue->push_back(MOEdgeWorkEntry(acquire));
790
791                 /* propagate synchronization to later actions */
792                 action_list_t::reverse_iterator rit = action_trace->rbegin();
793                 for (; (*rit) != acquire; rit++) {
794                         ModelAction *propagate = *rit;
795                         if (acquire->happens_before(propagate)) {
796                                 propagate->synchronize_with(acquire);
797                                 /* Re-check 'propagate' for mo_graph edges */
798                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
799                         }
800                 }
801         } else {
802                 /* Break release sequence with new edges:
803                  *   release --mo--> write --mo--> rf */
804                 mo_graph->addEdge(release, write);
805                 mo_graph->addEdge(write, rf);
806         }
807
808         /* See if we have realized a data race */
809         if (checkDataRaces())
810                 set_assert();
811 }
812
813 /**
814  * Initialize the current action by performing one or more of the following
815  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
816  * in the NodeStack, manipulating backtracking sets, allocating and
817  * initializing clock vectors, and computing the promises to fulfill.
818  *
819  * @param curr The current action, as passed from the user context; may be
820  * freed/invalidated after the execution of this function, with a different
821  * action "returned" its place (pass-by-reference)
822  * @return True if curr is a newly-explored action; false otherwise
823  */
824 bool ModelChecker::initialize_curr_action(ModelAction **curr)
825 {
826         ModelAction *newcurr;
827
828         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
829                 newcurr = process_rmw(*curr);
830                 delete *curr;
831
832                 if (newcurr->is_rmw())
833                         compute_promises(newcurr);
834
835                 *curr = newcurr;
836                 return false;
837         }
838
839         (*curr)->set_seq_number(get_next_seq_num());
840
841         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled());
842         if (newcurr) {
843                 /* First restore type and order in case of RMW operation */
844                 if ((*curr)->is_rmwr())
845                         newcurr->copy_typeandorder(*curr);
846
847                 ASSERT((*curr)->get_location() == newcurr->get_location());
848                 newcurr->copy_from_new(*curr);
849
850                 /* Discard duplicate ModelAction; use action from NodeStack */
851                 delete *curr;
852
853                 /* Always compute new clock vector */
854                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
855
856                 *curr = newcurr;
857                 return false; /* Action was explored previously */
858         } else {
859                 newcurr = *curr;
860
861                 /* Always compute new clock vector */
862                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
863                 /*
864                  * Perform one-time actions when pushing new ModelAction onto
865                  * NodeStack
866                  */
867                 if (newcurr->is_write())
868                         compute_promises(newcurr);
869                 else if (newcurr->is_relseq_fixup())
870                         compute_relseq_breakwrites(newcurr);
871                 else if (newcurr->is_wait())
872                         newcurr->get_node()->set_misc_max(2);
873                 else if (newcurr->is_notify_one()) {
874                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
875                 }
876                 return true; /* This was a new ModelAction */
877         }
878 }
879
880 /**
881  * @brief Check whether a model action is enabled.
882  *
883  * Checks whether a lock or join operation would be successful (i.e., is the
884  * lock already locked, or is the joined thread already complete). If not, put
885  * the action in a waiter list.
886  *
887  * @param curr is the ModelAction to check whether it is enabled.
888  * @return a bool that indicates whether the action is enabled.
889  */
890 bool ModelChecker::check_action_enabled(ModelAction *curr) {
891         if (curr->is_lock()) {
892                 std::mutex * lock = (std::mutex *)curr->get_location();
893                 struct std::mutex_state * state = lock->get_state();
894                 if (state->islocked) {
895                         //Stick the action in the appropriate waiting queue
896                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
897                         return false;
898                 }
899         } else if (curr->get_type() == THREAD_JOIN) {
900                 Thread *blocking = (Thread *)curr->get_location();
901                 if (!blocking->is_complete()) {
902                         blocking->push_wait_list(curr);
903                         return false;
904                 }
905         }
906
907         return true;
908 }
909
910 /**
911  * Stores the ModelAction for the current thread action.  Call this
912  * immediately before switching from user- to system-context to pass
913  * data between them.
914  * @param act The ModelAction created by the user-thread action
915  */
916 void ModelChecker::set_current_action(ModelAction *act) {
917         priv->current_action = act;
918 }
919
920 /**
921  * This is the heart of the model checker routine. It performs model-checking
922  * actions corresponding to a given "current action." Among other processes, it
923  * calculates reads-from relationships, updates synchronization clock vectors,
924  * forms a memory_order constraints graph, and handles replay/backtrack
925  * execution when running permutations of previously-observed executions.
926  *
927  * @param curr The current action to process
928  * @return The next Thread that must be executed. May be NULL if ModelChecker
929  * makes no choice (e.g., according to replay execution, combining RMW actions,
930  * etc.)
931  */
932 Thread * ModelChecker::check_current_action(ModelAction *curr)
933 {
934         ASSERT(curr);
935         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
936
937         if (!check_action_enabled(curr)) {
938                 /* Make the execution look like we chose to run this action
939                  * much later, when a lock/join can succeed */
940                 get_current_thread()->set_pending(curr);
941                 scheduler->sleep(get_current_thread());
942                 return get_next_thread(NULL);
943         }
944
945         bool newly_explored = initialize_curr_action(&curr);
946
947         wake_up_sleeping_actions(curr);
948
949         /* Add the action to lists before any other model-checking tasks */
950         if (!second_part_of_rmw)
951                 add_action_to_lists(curr);
952
953         /* Build may_read_from set for newly-created actions */
954         if (newly_explored && curr->is_read())
955                 build_reads_from_past(curr);
956
957         /* Initialize work_queue with the "current action" work */
958         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
959         while (!work_queue.empty()) {
960                 WorkQueueEntry work = work_queue.front();
961                 work_queue.pop_front();
962
963                 switch (work.type) {
964                 case WORK_CHECK_CURR_ACTION: {
965                         ModelAction *act = work.action;
966                         bool update = false; /* update this location's release seq's */
967                         bool update_all = false; /* update all release seq's */
968
969                         if (process_thread_action(curr))
970                                 update_all = true;
971
972                         if (act->is_read() && process_read(act, second_part_of_rmw))
973                                 update = true;
974
975                         if (act->is_write() && process_write(act))
976                                 update = true;
977
978                         if (act->is_mutex_op() && process_mutex(act))
979                                 update_all = true;
980
981                         if (act->is_relseq_fixup())
982                                 process_relseq_fixup(curr, &work_queue);
983
984                         if (update_all)
985                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
986                         else if (update)
987                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
988                         break;
989                 }
990                 case WORK_CHECK_RELEASE_SEQ:
991                         resolve_release_sequences(work.location, &work_queue);
992                         break;
993                 case WORK_CHECK_MO_EDGES: {
994                         /** @todo Complete verification of work_queue */
995                         ModelAction *act = work.action;
996                         bool updated = false;
997
998                         if (act->is_read()) {
999                                 const ModelAction *rf = act->get_reads_from();
1000                                 if (rf != NULL && r_modification_order(act, rf))
1001                                         updated = true;
1002                         }
1003                         if (act->is_write()) {
1004                                 if (w_modification_order(act))
1005                                         updated = true;
1006                         }
1007                         mo_graph->commitChanges();
1008
1009                         if (updated)
1010                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1011                         break;
1012                 }
1013                 default:
1014                         ASSERT(false);
1015                         break;
1016                 }
1017         }
1018
1019         check_curr_backtracking(curr);
1020         set_backtracking(curr);
1021         return get_next_thread(curr);
1022 }
1023
1024 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
1025         Node *currnode = curr->get_node();
1026         Node *parnode = currnode->get_parent();
1027
1028         if ((!parnode->backtrack_empty() ||
1029                          !currnode->misc_empty() ||
1030                          !currnode->read_from_empty() ||
1031                          !currnode->future_value_empty() ||
1032                          !currnode->promise_empty() ||
1033                          !currnode->relseq_break_empty())
1034                         && (!priv->next_backtrack ||
1035                                         *curr > *priv->next_backtrack)) {
1036                 priv->next_backtrack = curr;
1037         }
1038 }
1039
1040 bool ModelChecker::promises_expired() const
1041 {
1042         for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1043                 Promise *promise = (*promises)[promise_index];
1044                 if (promise->get_expiration()<priv->used_sequence_numbers) {
1045                         return true;
1046                 }
1047         }
1048         return false;
1049 }
1050
1051 /** @return whether the current partial trace must be a prefix of a
1052  * feasible trace. */
1053 bool ModelChecker::isfeasibleprefix() const
1054 {
1055         return promises->size() == 0 && pending_rel_seqs->size() == 0 && isfeasible();
1056 }
1057
1058 /** @return whether the current partial trace is feasible. */
1059 bool ModelChecker::isfeasible() const
1060 {
1061         if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1062                 DEBUG("Infeasible: RMW violation\n");
1063
1064         return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
1065 }
1066
1067 /** @return whether the current partial trace is feasible other than
1068  * multiple RMW reading from the same store. */
1069 bool ModelChecker::isfeasibleotherthanRMW() const
1070 {
1071         if (DBG_ENABLED()) {
1072                 if (mo_graph->checkForCycles())
1073                         DEBUG("Infeasible: modification order cycles\n");
1074                 if (failed_promise)
1075                         DEBUG("Infeasible: failed promise\n");
1076                 if (too_many_reads)
1077                         DEBUG("Infeasible: too many reads\n");
1078                 if (bad_synchronization)
1079                         DEBUG("Infeasible: bad synchronization ordering\n");
1080                 if (promises_expired())
1081                         DEBUG("Infeasible: promises expired\n");
1082         }
1083         return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
1084 }
1085
1086 /** Returns whether the current completed trace is feasible. */
1087 bool ModelChecker::isfinalfeasible() const
1088 {
1089         if (DBG_ENABLED() && promises->size() != 0)
1090                 DEBUG("Infeasible: unrevolved promises\n");
1091
1092         return isfeasible() && promises->size() == 0;
1093 }
1094
1095 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1096 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1097         ModelAction *lastread = get_last_action(act->get_tid());
1098         lastread->process_rmw(act);
1099         if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1100                 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1101                 mo_graph->commitChanges();
1102         }
1103         return lastread;
1104 }
1105
1106 /**
1107  * Checks whether a thread has read from the same write for too many times
1108  * without seeing the effects of a later write.
1109  *
1110  * Basic idea:
1111  * 1) there must a different write that we could read from that would satisfy the modification order,
1112  * 2) we must have read from the same value in excess of maxreads times, and
1113  * 3) that other write must have been in the reads_from set for maxreads times.
1114  *
1115  * If so, we decide that the execution is no longer feasible.
1116  */
1117 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1118         if (params.maxreads != 0) {
1119
1120                 if (curr->get_node()->get_read_from_size() <= 1)
1121                         return;
1122                 //Must make sure that execution is currently feasible...  We could
1123                 //accidentally clear by rolling back
1124                 if (!isfeasible())
1125                         return;
1126                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1127                 int tid = id_to_int(curr->get_tid());
1128
1129                 /* Skip checks */
1130                 if ((int)thrd_lists->size() <= tid)
1131                         return;
1132                 action_list_t *list = &(*thrd_lists)[tid];
1133
1134                 action_list_t::reverse_iterator rit = list->rbegin();
1135                 /* Skip past curr */
1136                 for (; (*rit) != curr; rit++)
1137                         ;
1138                 /* go past curr now */
1139                 rit++;
1140
1141                 action_list_t::reverse_iterator ritcopy = rit;
1142                 //See if we have enough reads from the same value
1143                 int count = 0;
1144                 for (; count < params.maxreads; rit++,count++) {
1145                         if (rit==list->rend())
1146                                 return;
1147                         ModelAction *act = *rit;
1148                         if (!act->is_read())
1149                                 return;
1150
1151                         if (act->get_reads_from() != rf)
1152                                 return;
1153                         if (act->get_node()->get_read_from_size() <= 1)
1154                                 return;
1155                 }
1156                 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1157                         //Get write
1158                         const ModelAction * write = curr->get_node()->get_read_from_at(i);
1159
1160                         //Need a different write
1161                         if (write==rf)
1162                                 continue;
1163
1164                         /* Test to see whether this is a feasible write to read from*/
1165                         mo_graph->startChanges();
1166                         r_modification_order(curr, write);
1167                         bool feasiblereadfrom = isfeasible();
1168                         mo_graph->rollbackChanges();
1169
1170                         if (!feasiblereadfrom)
1171                                 continue;
1172                         rit = ritcopy;
1173
1174                         bool feasiblewrite = true;
1175                         //new we need to see if this write works for everyone
1176
1177                         for (int loop = count; loop>0; loop--,rit++) {
1178                                 ModelAction *act=*rit;
1179                                 bool foundvalue = false;
1180                                 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1181                                         if (act->get_node()->get_read_from_at(j)==write) {
1182                                                 foundvalue = true;
1183                                                 break;
1184                                         }
1185                                 }
1186                                 if (!foundvalue) {
1187                                         feasiblewrite = false;
1188                                         break;
1189                                 }
1190                         }
1191                         if (feasiblewrite) {
1192                                 too_many_reads = true;
1193                                 return;
1194                         }
1195                 }
1196         }
1197 }
1198
1199 /**
1200  * Updates the mo_graph with the constraints imposed from the current
1201  * read.
1202  *
1203  * Basic idea is the following: Go through each other thread and find
1204  * the lastest action that happened before our read.  Two cases:
1205  *
1206  * (1) The action is a write => that write must either occur before
1207  * the write we read from or be the write we read from.
1208  *
1209  * (2) The action is a read => the write that that action read from
1210  * must occur before the write we read from or be the same write.
1211  *
1212  * @param curr The current action. Must be a read.
1213  * @param rf The action that curr reads from. Must be a write.
1214  * @return True if modification order edges were added; false otherwise
1215  */
1216 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1217 {
1218         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1219         unsigned int i;
1220         bool added = false;
1221         ASSERT(curr->is_read());
1222
1223         /* Iterate over all threads */
1224         for (i = 0; i < thrd_lists->size(); i++) {
1225                 /* Iterate over actions in thread, starting from most recent */
1226                 action_list_t *list = &(*thrd_lists)[i];
1227                 action_list_t::reverse_iterator rit;
1228                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1229                         ModelAction *act = *rit;
1230
1231                         /*
1232                          * Include at most one act per-thread that "happens
1233                          * before" curr. Don't consider reflexively.
1234                          */
1235                         if (act->happens_before(curr) && act != curr) {
1236                                 if (act->is_write()) {
1237                                         if (rf != act) {
1238                                                 mo_graph->addEdge(act, rf);
1239                                                 added = true;
1240                                         }
1241                                 } else {
1242                                         const ModelAction *prevreadfrom = act->get_reads_from();
1243                                         //if the previous read is unresolved, keep going...
1244                                         if (prevreadfrom == NULL)
1245                                                 continue;
1246
1247                                         if (rf != prevreadfrom) {
1248                                                 mo_graph->addEdge(prevreadfrom, rf);
1249                                                 added = true;
1250                                         }
1251                                 }
1252                                 break;
1253                         }
1254                 }
1255         }
1256
1257         return added;
1258 }
1259
1260 /** This method fixes up the modification order when we resolve a
1261  *  promises.  The basic problem is that actions that occur after the
1262  *  read curr could not property add items to the modification order
1263  *  for our read.
1264  *
1265  *  So for each thread, we find the earliest item that happens after
1266  *  the read curr.  This is the item we have to fix up with additional
1267  *  constraints.  If that action is write, we add a MO edge between
1268  *  the Action rf and that action.  If the action is a read, we add a
1269  *  MO edge between the Action rf, and whatever the read accessed.
1270  *
1271  * @param curr is the read ModelAction that we are fixing up MO edges for.
1272  * @param rf is the write ModelAction that curr reads from.
1273  *
1274  */
1275 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1276 {
1277         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1278         unsigned int i;
1279         ASSERT(curr->is_read());
1280
1281         /* Iterate over all threads */
1282         for (i = 0; i < thrd_lists->size(); i++) {
1283                 /* Iterate over actions in thread, starting from most recent */
1284                 action_list_t *list = &(*thrd_lists)[i];
1285                 action_list_t::reverse_iterator rit;
1286                 ModelAction *lastact = NULL;
1287
1288                 /* Find last action that happens after curr that is either not curr or a rmw */
1289                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1290                         ModelAction *act = *rit;
1291                         if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1292                                 lastact = act;
1293                         } else
1294                                 break;
1295                 }
1296
1297                         /* Include at most one act per-thread that "happens before" curr */
1298                 if (lastact != NULL) {
1299                         if (lastact==curr) {
1300                                 //Case 1: The resolved read is a RMW, and we need to make sure
1301                                 //that the write portion of the RMW mod order after rf
1302
1303                                 mo_graph->addEdge(rf, lastact);
1304                         } else if (lastact->is_read()) {
1305                                 //Case 2: The resolved read is a normal read and the next
1306                                 //operation is a read, and we need to make sure the value read
1307                                 //is mod ordered after rf
1308
1309                                 const ModelAction *postreadfrom = lastact->get_reads_from();
1310                                 if (postreadfrom != NULL&&rf != postreadfrom)
1311                                         mo_graph->addEdge(rf, postreadfrom);
1312                         } else {
1313                                 //Case 3: The resolved read is a normal read and the next
1314                                 //operation is a write, and we need to make sure that the
1315                                 //write is mod ordered after rf
1316                                 if (lastact!=rf)
1317                                         mo_graph->addEdge(rf, lastact);
1318                         }
1319                         break;
1320                 }
1321         }
1322 }
1323
1324 /**
1325  * Updates the mo_graph with the constraints imposed from the current write.
1326  *
1327  * Basic idea is the following: Go through each other thread and find
1328  * the lastest action that happened before our write.  Two cases:
1329  *
1330  * (1) The action is a write => that write must occur before
1331  * the current write
1332  *
1333  * (2) The action is a read => the write that that action read from
1334  * must occur before the current write.
1335  *
1336  * This method also handles two other issues:
1337  *
1338  * (I) Sequential Consistency: Making sure that if the current write is
1339  * seq_cst, that it occurs after the previous seq_cst write.
1340  *
1341  * (II) Sending the write back to non-synchronizing reads.
1342  *
1343  * @param curr The current action. Must be a write.
1344  * @return True if modification order edges were added; false otherwise
1345  */
1346 bool ModelChecker::w_modification_order(ModelAction *curr)
1347 {
1348         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1349         unsigned int i;
1350         bool added = false;
1351         ASSERT(curr->is_write());
1352
1353         if (curr->is_seqcst()) {
1354                 /* We have to at least see the last sequentially consistent write,
1355                          so we are initialized. */
1356                 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1357                 if (last_seq_cst != NULL) {
1358                         mo_graph->addEdge(last_seq_cst, curr);
1359                         added = true;
1360                 }
1361         }
1362
1363         /* Iterate over all threads */
1364         for (i = 0; i < thrd_lists->size(); i++) {
1365                 /* Iterate over actions in thread, starting from most recent */
1366                 action_list_t *list = &(*thrd_lists)[i];
1367                 action_list_t::reverse_iterator rit;
1368                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1369                         ModelAction *act = *rit;
1370                         if (act == curr) {
1371                                 /*
1372                                  * 1) If RMW and it actually read from something, then we
1373                                  * already have all relevant edges, so just skip to next
1374                                  * thread.
1375                                  *
1376                                  * 2) If RMW and it didn't read from anything, we should
1377                                  * whatever edge we can get to speed up convergence.
1378                                  *
1379                                  * 3) If normal write, we need to look at earlier actions, so
1380                                  * continue processing list.
1381                                  */
1382                                 if (curr->is_rmw()) {
1383                                         if (curr->get_reads_from()!=NULL)
1384                                                 break;
1385                                         else
1386                                                 continue;
1387                                 } else
1388                                         continue;
1389                         }
1390
1391                         /*
1392                          * Include at most one act per-thread that "happens
1393                          * before" curr
1394                          */
1395                         if (act->happens_before(curr)) {
1396                                 /*
1397                                  * Note: if act is RMW, just add edge:
1398                                  *   act --mo--> curr
1399                                  * The following edge should be handled elsewhere:
1400                                  *   readfrom(act) --mo--> act
1401                                  */
1402                                 if (act->is_write())
1403                                         mo_graph->addEdge(act, curr);
1404                                 else if (act->is_read()) {
1405                                         //if previous read accessed a null, just keep going
1406                                         if (act->get_reads_from() == NULL)
1407                                                 continue;
1408                                         mo_graph->addEdge(act->get_reads_from(), curr);
1409                                 }
1410                                 added = true;
1411                                 break;
1412                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1413                                                      !act->same_thread(curr)) {
1414                                 /* We have an action that:
1415                                    (1) did not happen before us
1416                                    (2) is a read and we are a write
1417                                    (3) cannot synchronize with us
1418                                    (4) is in a different thread
1419                                    =>
1420                                    that read could potentially read from our write.  Note that
1421                                    these checks are overly conservative at this point, we'll
1422                                    do more checks before actually removing the
1423                                    pendingfuturevalue.
1424
1425                                  */
1426                                 if (thin_air_constraint_may_allow(curr, act)) {
1427                                         if (isfeasible() ||
1428                                                         (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1429                                                 struct PendingFutureValue pfv = {curr,act};
1430                                                 futurevalues->push_back(pfv);
1431                                         }
1432                                 }
1433                         }
1434                 }
1435         }
1436
1437         return added;
1438 }
1439
1440 /** Arbitrary reads from the future are not allowed.  Section 29.3
1441  * part 9 places some constraints.  This method checks one result of constraint
1442  * constraint.  Others require compiler support. */
1443 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1444         if (!writer->is_rmw())
1445                 return true;
1446
1447         if (!reader->is_rmw())
1448                 return true;
1449
1450         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1451                 if (search == reader)
1452                         return false;
1453                 if (search->get_tid() == reader->get_tid() &&
1454                                 search->happens_before(reader))
1455                         break;
1456         }
1457
1458         return true;
1459 }
1460
1461 /**
1462  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1463  * some constraints. This method checks one the following constraint (others
1464  * require compiler support):
1465  *
1466  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1467  */
1468 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1469 {
1470         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1471         unsigned int i;
1472         /* Iterate over all threads */
1473         for (i = 0; i < thrd_lists->size(); i++) {
1474                 const ModelAction *write_after_read = NULL;
1475
1476                 /* Iterate over actions in thread, starting from most recent */
1477                 action_list_t *list = &(*thrd_lists)[i];
1478                 action_list_t::reverse_iterator rit;
1479                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1480                         ModelAction *act = *rit;
1481
1482                         if (!reader->happens_before(act))
1483                                 break;
1484                         else if (act->is_write())
1485                                 write_after_read = act;
1486                         else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1487                                 write_after_read = act->get_reads_from();
1488                         }
1489                 }
1490
1491                 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1492                         return false;
1493         }
1494         return true;
1495 }
1496
1497 /**
1498  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1499  * The ModelAction under consideration is expected to be taking part in
1500  * release/acquire synchronization as an object of the "reads from" relation.
1501  * Note that this can only provide release sequence support for RMW chains
1502  * which do not read from the future, as those actions cannot be traced until
1503  * their "promise" is fulfilled. Similarly, we may not even establish the
1504  * presence of a release sequence with certainty, as some modification order
1505  * constraints may be decided further in the future. Thus, this function
1506  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1507  * and a boolean representing certainty.
1508  *
1509  * @param rf The action that might be part of a release sequence. Must be a
1510  * write.
1511  * @param release_heads A pass-by-reference style return parameter. After
1512  * execution of this function, release_heads will contain the heads of all the
1513  * relevant release sequences, if any exists with certainty
1514  * @param pending A pass-by-reference style return parameter which is only used
1515  * when returning false (i.e., uncertain). Returns most information regarding
1516  * an uncertain release sequence, including any write operations that might
1517  * break the sequence.
1518  * @return true, if the ModelChecker is certain that release_heads is complete;
1519  * false otherwise
1520  */
1521 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1522                 rel_heads_list_t *release_heads,
1523                 struct release_seq *pending) const
1524 {
1525         /* Only check for release sequences if there are no cycles */
1526         if (mo_graph->checkForCycles())
1527                 return false;
1528
1529         while (rf) {
1530                 ASSERT(rf->is_write());
1531
1532                 if (rf->is_release())
1533                         release_heads->push_back(rf);
1534                 if (!rf->is_rmw())
1535                         break; /* End of RMW chain */
1536
1537                 /** @todo Need to be smarter here...  In the linux lock
1538                  * example, this will run to the beginning of the program for
1539                  * every acquire. */
1540                 /** @todo The way to be smarter here is to keep going until 1
1541                  * thread has a release preceded by an acquire and you've seen
1542                  *       both. */
1543
1544                 /* acq_rel RMW is a sufficient stopping condition */
1545                 if (rf->is_acquire() && rf->is_release())
1546                         return true; /* complete */
1547
1548                 rf = rf->get_reads_from();
1549         };
1550         if (!rf) {
1551                 /* read from future: need to settle this later */
1552                 pending->rf = NULL;
1553                 return false; /* incomplete */
1554         }
1555
1556         if (rf->is_release())
1557                 return true; /* complete */
1558
1559         /* else relaxed write; check modification order for contiguous subsequence
1560          * -> rf must be same thread as release */
1561         int tid = id_to_int(rf->get_tid());
1562         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1563         action_list_t *list = &(*thrd_lists)[tid];
1564         action_list_t::const_reverse_iterator rit;
1565
1566         /* Find rf in the thread list */
1567         rit = std::find(list->rbegin(), list->rend(), rf);
1568         ASSERT(rit != list->rend());
1569
1570         /* Find the last write/release */
1571         for (; rit != list->rend(); rit++)
1572                 if ((*rit)->is_release())
1573                         break;
1574         if (rit == list->rend()) {
1575                 /* No write-release in this thread */
1576                 return true; /* complete */
1577         }
1578         ModelAction *release = *rit;
1579
1580         ASSERT(rf->same_thread(release));
1581
1582         pending->writes.clear();
1583
1584         bool certain = true;
1585         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1586                 if (id_to_int(rf->get_tid()) == (int)i)
1587                         continue;
1588                 list = &(*thrd_lists)[i];
1589
1590                 /* Can we ensure no future writes from this thread may break
1591                  * the release seq? */
1592                 bool future_ordered = false;
1593
1594                 ModelAction *last = get_last_action(int_to_id(i));
1595                 Thread *th = get_thread(int_to_id(i));
1596                 if ((last && rf->happens_before(last)) ||
1597                                 !is_enabled(th) ||
1598                                 th->is_complete())
1599                         future_ordered = true;
1600
1601                 ASSERT(!th->is_model_thread() || future_ordered);
1602
1603                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1604                         const ModelAction *act = *rit;
1605                         /* Reach synchronization -> this thread is complete */
1606                         if (act->happens_before(release))
1607                                 break;
1608                         if (rf->happens_before(act)) {
1609                                 future_ordered = true;
1610                                 continue;
1611                         }
1612
1613                         /* Only non-RMW writes can break release sequences */
1614                         if (!act->is_write() || act->is_rmw())
1615                                 continue;
1616
1617                         /* Check modification order */
1618                         if (mo_graph->checkReachable(rf, act)) {
1619                                 /* rf --mo--> act */
1620                                 future_ordered = true;
1621                                 continue;
1622                         }
1623                         if (mo_graph->checkReachable(act, release))
1624                                 /* act --mo--> release */
1625                                 break;
1626                         if (mo_graph->checkReachable(release, act) &&
1627                                       mo_graph->checkReachable(act, rf)) {
1628                                 /* release --mo-> act --mo--> rf */
1629                                 return true; /* complete */
1630                         }
1631                         /* act may break release sequence */
1632                         pending->writes.push_back(act);
1633                         certain = false;
1634                 }
1635                 if (!future_ordered)
1636                         certain = false; /* This thread is uncertain */
1637         }
1638
1639         if (certain) {
1640                 release_heads->push_back(release);
1641                 pending->writes.clear();
1642         } else {
1643                 pending->release = release;
1644                 pending->rf = rf;
1645         }
1646         return certain;
1647 }
1648
1649 /**
1650  * A public interface for getting the release sequence head(s) with which a
1651  * given ModelAction must synchronize. This function only returns a non-empty
1652  * result when it can locate a release sequence head with certainty. Otherwise,
1653  * it may mark the internal state of the ModelChecker so that it will handle
1654  * the release sequence at a later time, causing @a act to update its
1655  * synchronization at some later point in execution.
1656  * @param act The 'acquire' action that may read from a release sequence
1657  * @param release_heads A pass-by-reference return parameter. Will be filled
1658  * with the head(s) of the release sequence(s), if they exists with certainty.
1659  * @see ModelChecker::release_seq_heads
1660  */
1661 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1662 {
1663         const ModelAction *rf = act->get_reads_from();
1664         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1665         sequence->acquire = act;
1666
1667         if (!release_seq_heads(rf, release_heads, sequence)) {
1668                 /* add act to 'lazy checking' list */
1669                 pending_rel_seqs->push_back(sequence);
1670         } else {
1671                 snapshot_free(sequence);
1672         }
1673 }
1674
1675 /**
1676  * Attempt to resolve all stashed operations that might synchronize with a
1677  * release sequence for a given location. This implements the "lazy" portion of
1678  * determining whether or not a release sequence was contiguous, since not all
1679  * modification order information is present at the time an action occurs.
1680  *
1681  * @param location The location/object that should be checked for release
1682  * sequence resolutions. A NULL value means to check all locations.
1683  * @param work_queue The work queue to which to add work items as they are
1684  * generated
1685  * @return True if any updates occurred (new synchronization, new mo_graph
1686  * edges)
1687  */
1688 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1689 {
1690         bool updated = false;
1691         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1692         while (it != pending_rel_seqs->end()) {
1693                 struct release_seq *pending = *it;
1694                 ModelAction *act = pending->acquire;
1695
1696                 /* Only resolve sequences on the given location, if provided */
1697                 if (location && act->get_location() != location) {
1698                         it++;
1699                         continue;
1700                 }
1701
1702                 const ModelAction *rf = act->get_reads_from();
1703                 rel_heads_list_t release_heads;
1704                 bool complete;
1705                 complete = release_seq_heads(rf, &release_heads, pending);
1706                 for (unsigned int i = 0; i < release_heads.size(); i++) {
1707                         if (!act->has_synchronized_with(release_heads[i])) {
1708                                 if (act->synchronize_with(release_heads[i]))
1709                                         updated = true;
1710                                 else
1711                                         set_bad_synchronization();
1712                         }
1713                 }
1714
1715                 if (updated) {
1716                         /* Re-check all pending release sequences */
1717                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1718                         /* Re-check act for mo_graph edges */
1719                         work_queue->push_back(MOEdgeWorkEntry(act));
1720
1721                         /* propagate synchronization to later actions */
1722                         action_list_t::reverse_iterator rit = action_trace->rbegin();
1723                         for (; (*rit) != act; rit++) {
1724                                 ModelAction *propagate = *rit;
1725                                 if (act->happens_before(propagate)) {
1726                                         propagate->synchronize_with(act);
1727                                         /* Re-check 'propagate' for mo_graph edges */
1728                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
1729                                 }
1730                         }
1731                 }
1732                 if (complete) {
1733                         it = pending_rel_seqs->erase(it);
1734                         snapshot_free(pending);
1735                 } else {
1736                         it++;
1737                 }
1738         }
1739
1740         // If we resolved promises or data races, see if we have realized a data race.
1741         if (checkDataRaces()) {
1742                 set_assert();
1743         }
1744
1745         return updated;
1746 }
1747
1748 /**
1749  * Performs various bookkeeping operations for the current ModelAction. For
1750  * instance, adds action to the per-object, per-thread action vector and to the
1751  * action trace list of all thread actions.
1752  *
1753  * @param act is the ModelAction to add.
1754  */
1755 void ModelChecker::add_action_to_lists(ModelAction *act)
1756 {
1757         int tid = id_to_int(act->get_tid());
1758         action_trace->push_back(act);
1759
1760         get_safe_ptr_action(obj_map, act->get_location())->push_back(act);
1761
1762         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
1763         if (tid >= (int)vec->size())
1764                 vec->resize(priv->next_thread_id);
1765         (*vec)[tid].push_back(act);
1766
1767         if ((int)thrd_last_action->size() <= tid)
1768                 thrd_last_action->resize(get_num_threads());
1769         (*thrd_last_action)[tid] = act;
1770
1771         if (act->is_wait()) {
1772                 void *mutex_loc=(void *) act->get_value();
1773                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
1774
1775                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
1776                 if (tid >= (int)vec->size())
1777                         vec->resize(priv->next_thread_id);
1778                 (*vec)[tid].push_back(act);
1779
1780                 if ((int)thrd_last_action->size() <= tid)
1781                         thrd_last_action->resize(get_num_threads());
1782                 (*thrd_last_action)[tid] = act;
1783         }
1784 }
1785
1786 /**
1787  * @brief Get the last action performed by a particular Thread
1788  * @param tid The thread ID of the Thread in question
1789  * @return The last action in the thread
1790  */
1791 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1792 {
1793         int threadid = id_to_int(tid);
1794         if (threadid < (int)thrd_last_action->size())
1795                 return (*thrd_last_action)[id_to_int(tid)];
1796         else
1797                 return NULL;
1798 }
1799
1800 /**
1801  * Gets the last memory_order_seq_cst write (in the total global sequence)
1802  * performed on a particular object (i.e., memory location), not including the
1803  * current action.
1804  * @param curr The current ModelAction; also denotes the object location to
1805  * check
1806  * @return The last seq_cst write
1807  */
1808 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1809 {
1810         void *location = curr->get_location();
1811         action_list_t *list = get_safe_ptr_action(obj_map, location);
1812         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1813         action_list_t::reverse_iterator rit;
1814         for (rit = list->rbegin(); rit != list->rend(); rit++)
1815                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1816                         return *rit;
1817         return NULL;
1818 }
1819
1820 /**
1821  * Gets the last unlock operation performed on a particular mutex (i.e., memory
1822  * location). This function identifies the mutex according to the current
1823  * action, which is presumed to perform on the same mutex.
1824  * @param curr The current ModelAction; also denotes the object location to
1825  * check
1826  * @return The last unlock operation
1827  */
1828 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1829 {
1830         void *location = curr->get_location();
1831         action_list_t *list = get_safe_ptr_action(obj_map, location);
1832         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1833         action_list_t::reverse_iterator rit;
1834         for (rit = list->rbegin(); rit != list->rend(); rit++)
1835                 if ((*rit)->is_unlock() || (*rit)->is_wait())
1836                         return *rit;
1837         return NULL;
1838 }
1839
1840 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1841 {
1842         ModelAction *parent = get_last_action(tid);
1843         if (!parent)
1844                 parent = get_thread(tid)->get_creation();
1845         return parent;
1846 }
1847
1848 /**
1849  * Returns the clock vector for a given thread.
1850  * @param tid The thread whose clock vector we want
1851  * @return Desired clock vector
1852  */
1853 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1854 {
1855         return get_parent_action(tid)->get_cv();
1856 }
1857
1858 /**
1859  * Resolve a set of Promises with a current write. The set is provided in the
1860  * Node corresponding to @a write.
1861  * @param write The ModelAction that is fulfilling Promises
1862  * @return True if promises were resolved; false otherwise
1863  */
1864 bool ModelChecker::resolve_promises(ModelAction *write)
1865 {
1866         bool resolved = false;
1867         std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
1868
1869         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1870                 Promise *promise = (*promises)[promise_index];
1871                 if (write->get_node()->get_promise(i)) {
1872                         ModelAction *read = promise->get_action();
1873                         if (read->is_rmw()) {
1874                                 mo_graph->addRMWEdge(write, read);
1875                         }
1876                         read->read_from(write);
1877                         //First fix up the modification order for actions that happened
1878                         //before the read
1879                         r_modification_order(read, write);
1880                         //Next fix up the modification order for actions that happened
1881                         //after the read.
1882                         post_r_modification_order(read, write);
1883                         //Make sure the promise's value matches the write's value
1884                         ASSERT(promise->get_value() == write->get_value());
1885                         delete(promise);
1886
1887                         promises->erase(promises->begin() + promise_index);
1888                         threads_to_check.push_back(read->get_tid());
1889
1890                         resolved = true;
1891                 } else
1892                         promise_index++;
1893         }
1894
1895         //Check whether reading these writes has made threads unable to
1896         //resolve promises
1897
1898         for(unsigned int i=0;i<threads_to_check.size();i++)
1899                 mo_check_promises(threads_to_check[i], write);
1900
1901         return resolved;
1902 }
1903
1904 /**
1905  * Compute the set of promises that could potentially be satisfied by this
1906  * action. Note that the set computation actually appears in the Node, not in
1907  * ModelChecker.
1908  * @param curr The ModelAction that may satisfy promises
1909  */
1910 void ModelChecker::compute_promises(ModelAction *curr)
1911 {
1912         for (unsigned int i = 0; i < promises->size(); i++) {
1913                 Promise *promise = (*promises)[i];
1914                 const ModelAction *act = promise->get_action();
1915                 if (!act->happens_before(curr) &&
1916                                 act->is_read() &&
1917                                 !act->could_synchronize_with(curr) &&
1918                                 !act->same_thread(curr) &&
1919                                 act->get_location() == curr->get_location() &&
1920                                 promise->get_value() == curr->get_value()) {
1921                         curr->get_node()->set_promise(i, act->is_rmw());
1922                 }
1923         }
1924 }
1925
1926 /** Checks promises in response to change in ClockVector Threads. */
1927 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
1928 {
1929         for (unsigned int i = 0; i < promises->size(); i++) {
1930                 Promise *promise = (*promises)[i];
1931                 const ModelAction *act = promise->get_action();
1932                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1933                                 merge_cv->synchronized_since(act)) {
1934                         if (promise->increment_threads(tid)) {
1935                                 //Promise has failed
1936                                 failed_promise = true;
1937                                 return;
1938                         }
1939                 }
1940         }
1941 }
1942
1943 void ModelChecker::check_promises_thread_disabled() {
1944         for (unsigned int i = 0; i < promises->size(); i++) {
1945                 Promise *promise = (*promises)[i];
1946                 if (promise->check_promise()) {
1947                         failed_promise = true;
1948                         return;
1949                 }
1950         }
1951 }
1952
1953 /** Checks promises in response to addition to modification order for threads.
1954  * Definitions:
1955  * pthread is the thread that performed the read that created the promise
1956  *
1957  * pread is the read that created the promise
1958  *
1959  * pwrite is either the first write to same location as pread by
1960  * pthread that is sequenced after pread or the value read by the
1961  * first read to the same lcoation as pread by pthread that is
1962  * sequenced after pread..
1963  *
1964  *      1. If tid=pthread, then we check what other threads are reachable
1965  * through the mode order starting with pwrite.  Those threads cannot
1966  * perform a write that will resolve the promise due to modification
1967  * order constraints.
1968  *
1969  * 2. If the tid is not pthread, we check whether pwrite can reach the
1970  * action write through the modification order.  If so, that thread
1971  * cannot perform a future write that will resolve the promise due to
1972  * modificatin order constraints.
1973  *
1974  *      @parem tid The thread that either read from the model action
1975  *      write, or actually did the model action write.
1976  *
1977  *      @parem write The ModelAction representing the relevant write.
1978  */
1979
1980 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
1981         void * location = write->get_location();
1982         for (unsigned int i = 0; i < promises->size(); i++) {
1983                 Promise *promise = (*promises)[i];
1984                 const ModelAction *act = promise->get_action();
1985
1986                 //Is this promise on the same location?
1987                 if ( act->get_location() != location )
1988                         continue;
1989
1990                 //same thread as the promise
1991                 if ( act->get_tid()==tid ) {
1992
1993                         //do we have a pwrite for the promise, if not, set it
1994                         if (promise->get_write() == NULL ) {
1995                                 promise->set_write(write);
1996                                 //The pwrite cannot happen before the promise
1997                                 if (write->happens_before(act) && (write != act)) {
1998                                         failed_promise = true;
1999                                         return;
2000                                 }
2001                         }
2002                         if (mo_graph->checkPromise(write, promise)) {
2003                                 failed_promise = true;
2004                                 return;
2005                         }
2006                 }
2007
2008                 //Don't do any lookups twice for the same thread
2009                 if (promise->has_sync_thread(tid))
2010                         continue;
2011
2012                 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2013                         if (promise->increment_threads(tid)) {
2014                                 failed_promise = true;
2015                                 return;
2016                         }
2017                 }
2018         }
2019 }
2020
2021 /**
2022  * Compute the set of writes that may break the current pending release
2023  * sequence. This information is extracted from previou release sequence
2024  * calculations.
2025  *
2026  * @param curr The current ModelAction. Must be a release sequence fixup
2027  * action.
2028  */
2029 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2030 {
2031         if (pending_rel_seqs->empty())
2032                 return;
2033
2034         struct release_seq *pending = pending_rel_seqs->back();
2035         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2036                 const ModelAction *write = pending->writes[i];
2037                 curr->get_node()->add_relseq_break(write);
2038         }
2039
2040         /* NULL means don't break the sequence; just synchronize */
2041         curr->get_node()->add_relseq_break(NULL);
2042 }
2043
2044 /**
2045  * Build up an initial set of all past writes that this 'read' action may read
2046  * from. This set is determined by the clock vector's "happens before"
2047  * relationship.
2048  * @param curr is the current ModelAction that we are exploring; it must be a
2049  * 'read' operation.
2050  */
2051 void ModelChecker::build_reads_from_past(ModelAction *curr)
2052 {
2053         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2054         unsigned int i;
2055         ASSERT(curr->is_read());
2056
2057         ModelAction *last_seq_cst = NULL;
2058
2059         /* Track whether this object has been initialized */
2060         bool initialized = false;
2061
2062         if (curr->is_seqcst()) {
2063                 last_seq_cst = get_last_seq_cst(curr);
2064                 /* We have to at least see the last sequentially consistent write,
2065                          so we are initialized. */
2066                 if (last_seq_cst != NULL)
2067                         initialized = true;
2068         }
2069
2070         /* Iterate over all threads */
2071         for (i = 0; i < thrd_lists->size(); i++) {
2072                 /* Iterate over actions in thread, starting from most recent */
2073                 action_list_t *list = &(*thrd_lists)[i];
2074                 action_list_t::reverse_iterator rit;
2075                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2076                         ModelAction *act = *rit;
2077
2078                         /* Only consider 'write' actions */
2079                         if (!act->is_write() || act == curr)
2080                                 continue;
2081
2082                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2083                         if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
2084                                 if (!curr->get_sleep_flag() || curr->is_seqcst() || sleep_can_read_from(curr, act)) {
2085                                         DEBUG("Adding action to may_read_from:\n");
2086                                         if (DBG_ENABLED()) {
2087                                                 act->print();
2088                                                 curr->print();
2089                                         }
2090                                         curr->get_node()->add_read_from(act);
2091                                 }
2092                         }
2093
2094                         /* Include at most one act per-thread that "happens before" curr */
2095                         if (act->happens_before(curr)) {
2096                                 initialized = true;
2097                                 break;
2098                         }
2099                 }
2100         }
2101
2102         if (!initialized) {
2103                 /** @todo Need a more informative way of reporting errors. */
2104                 printf("ERROR: may read from uninitialized atomic\n");
2105                 set_assert();
2106         }
2107
2108         if (DBG_ENABLED() || !initialized) {
2109                 printf("Reached read action:\n");
2110                 curr->print();
2111                 printf("Printing may_read_from\n");
2112                 curr->get_node()->print_may_read_from();
2113                 printf("End printing may_read_from\n");
2114         }
2115 }
2116
2117 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2118         while(true) {
2119                 Node *prevnode=write->get_node()->get_parent();
2120
2121                 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2122                 if (write->is_release()&&thread_sleep)
2123                         return true;
2124                 if (!write->is_rmw()) {
2125                         return false;
2126                 }
2127                 if (write->get_reads_from()==NULL)
2128                         return true;
2129                 write=write->get_reads_from();
2130         }
2131 }
2132
2133 static void print_list(action_list_t *list)
2134 {
2135         action_list_t::iterator it;
2136
2137         printf("---------------------------------------------------------------------\n");
2138         printf("Trace:\n");
2139         unsigned int hash=0;
2140
2141         for (it = list->begin(); it != list->end(); it++) {
2142                 (*it)->print();
2143                 hash=hash^(hash<<3)^((*it)->hash());
2144         }
2145         printf("HASH %u\n", hash);
2146         printf("---------------------------------------------------------------------\n");
2147 }
2148
2149 #if SUPPORT_MOD_ORDER_DUMP
2150 void ModelChecker::dumpGraph(char *filename) {
2151         char buffer[200];
2152         sprintf(buffer, "%s.dot",filename);
2153         FILE *file=fopen(buffer, "w");
2154         fprintf(file, "digraph %s {\n",filename);
2155         mo_graph->dumpNodes(file);
2156         ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2157
2158         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2159                 ModelAction *action=*it;
2160                 if (action->is_read()) {
2161                         fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2162                         if (action->get_reads_from()!=NULL)
2163                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2164                 }
2165                 if (thread_array[action->get_tid()] != NULL) {
2166                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2167                 }
2168
2169                 thread_array[action->get_tid()]=action;
2170         }
2171         fprintf(file,"}\n");
2172         model_free(thread_array);
2173         fclose(file);
2174 }
2175 #endif
2176
2177 void ModelChecker::print_summary()
2178 {
2179         printf("\n");
2180         printf("Number of executions: %d\n", num_executions);
2181         printf("Number of feasible executions: %d\n", num_feasible_executions);
2182         printf("Total nodes created: %d\n", node_stack->get_total_nodes());
2183
2184 #if SUPPORT_MOD_ORDER_DUMP
2185         scheduler->print();
2186         char buffername[100];
2187         sprintf(buffername, "exec%04u", num_executions);
2188         mo_graph->dumpGraphToFile(buffername);
2189         sprintf(buffername, "graph%04u", num_executions);
2190         dumpGraph(buffername);
2191 #endif
2192
2193         if (!isfinalfeasible())
2194                 printf("INFEASIBLE EXECUTION!\n");
2195         print_list(action_trace);
2196         printf("\n");
2197 }
2198
2199 /**
2200  * Add a Thread to the system for the first time. Should only be called once
2201  * per thread.
2202  * @param t The Thread to add
2203  */
2204 void ModelChecker::add_thread(Thread *t)
2205 {
2206         thread_map->put(id_to_int(t->get_id()), t);
2207         scheduler->add_thread(t);
2208 }
2209
2210 /**
2211  * Removes a thread from the scheduler.
2212  * @param the thread to remove.
2213  */
2214 void ModelChecker::remove_thread(Thread *t)
2215 {
2216         scheduler->remove_thread(t);
2217 }
2218
2219 /**
2220  * @brief Get a Thread reference by its ID
2221  * @param tid The Thread's ID
2222  * @return A Thread reference
2223  */
2224 Thread * ModelChecker::get_thread(thread_id_t tid) const
2225 {
2226         return thread_map->get(id_to_int(tid));
2227 }
2228
2229 /**
2230  * @brief Get a reference to the Thread in which a ModelAction was executed
2231  * @param act The ModelAction
2232  * @return A Thread reference
2233  */
2234 Thread * ModelChecker::get_thread(ModelAction *act) const
2235 {
2236         return get_thread(act->get_tid());
2237 }
2238
2239 /**
2240  * @brief Check if a Thread is currently enabled
2241  * @param t The Thread to check
2242  * @return True if the Thread is currently enabled
2243  */
2244 bool ModelChecker::is_enabled(Thread *t) const
2245 {
2246         return scheduler->is_enabled(t);
2247 }
2248
2249 /**
2250  * @brief Check if a Thread is currently enabled
2251  * @param tid The ID of the Thread to check
2252  * @return True if the Thread is currently enabled
2253  */
2254 bool ModelChecker::is_enabled(thread_id_t tid) const
2255 {
2256         return scheduler->is_enabled(tid);
2257 }
2258
2259 /**
2260  * Switch from a user-context to the "master thread" context (a.k.a. system
2261  * context). This switch is made with the intention of exploring a particular
2262  * model-checking action (described by a ModelAction object). Must be called
2263  * from a user-thread context.
2264  *
2265  * @param act The current action that will be explored. May be NULL only if
2266  * trace is exiting via an assertion (see ModelChecker::set_assert and
2267  * ModelChecker::has_asserted).
2268  * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2269  */
2270 int ModelChecker::switch_to_master(ModelAction *act)
2271 {
2272         DBG();
2273         Thread *old = thread_current();
2274         set_current_action(act);
2275         old->set_state(THREAD_READY);
2276         return Thread::swap(old, &system_context);
2277 }
2278
2279 /**
2280  * Takes the next step in the execution, if possible.
2281  * @return Returns true (success) if a step was taken and false otherwise.
2282  */
2283 bool ModelChecker::take_step() {
2284         if (has_asserted())
2285                 return false;
2286
2287         Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2288         if (curr) {
2289                 if (curr->get_state() == THREAD_READY) {
2290                         ASSERT(priv->current_action);
2291
2292                         priv->nextThread = check_current_action(priv->current_action);
2293                         priv->current_action = NULL;
2294
2295                         if (curr->is_blocked() || curr->is_complete())
2296                                 scheduler->remove_thread(curr);
2297                 } else {
2298                         ASSERT(false);
2299                 }
2300         }
2301         Thread *next = scheduler->next_thread(priv->nextThread);
2302
2303         /* Infeasible -> don't take any more steps */
2304         if (!isfeasible())
2305                 return false;
2306
2307         if (params.bound != 0) {
2308                 if (priv->used_sequence_numbers > params.bound) {
2309                         return false;
2310                 }
2311         }
2312
2313         DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2314                         next ? id_to_int(next->get_id()) : -1);
2315
2316         /*
2317          * Launch end-of-execution release sequence fixups only when there are:
2318          *
2319          * (1) no more user threads to run (or when execution replay chooses
2320          *     the 'model_thread')
2321          * (2) pending release sequences
2322          * (3) pending assertions (i.e., data races)
2323          * (4) no pending promises
2324          */
2325         if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2326                         isfinalfeasible() && !unrealizedraces.empty()) {
2327                 printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2328                                 pending_rel_seqs->size());
2329                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2330                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2331                                 model_thread);
2332                 set_current_action(fixup);
2333                 return true;
2334         }
2335
2336         /* next == NULL -> don't take any more steps */
2337         if (!next)
2338                 return false;
2339
2340         next->set_state(THREAD_RUNNING);
2341
2342         if (next->get_pending() != NULL) {
2343                 /* restart a pending action */
2344                 set_current_action(next->get_pending());
2345                 next->set_pending(NULL);
2346                 next->set_state(THREAD_READY);
2347                 return true;
2348         }
2349
2350         /* Return false only if swap fails with an error */
2351         return (Thread::swap(&system_context, next) == 0);
2352 }
2353
2354 /** Runs the current execution until threre are no more steps to take. */
2355 void ModelChecker::finish_execution() {
2356         DBG();
2357
2358         while (take_step());
2359 }