33fc4c3c38c3b5affc65bf360a833f5de8f8175e
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4
5 #include "model.h"
6 #include "action.h"
7 #include "nodestack.h"
8 #include "schedule.h"
9 #include "snapshot-interface.h"
10 #include "common.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
13 #include "promise.h"
14 #include "datarace.h"
15 #include "threads-model.h"
16
17 #define INITIAL_THREAD_ID       0
18
19 ModelChecker *model;
20
21 /**
22  * Structure for holding small ModelChecker members that should be snapshotted
23  */
24 struct model_snapshot_members {
25         ModelAction *current_action;
26         unsigned int next_thread_id;
27         modelclock_t used_sequence_numbers;
28         Thread *nextThread;
29         ModelAction *next_backtrack;
30 };
31
32 /** @brief Constructor */
33 ModelChecker::ModelChecker(struct model_params params) :
34         /* Initialize default scheduler */
35         params(params),
36         scheduler(new Scheduler()),
37         num_executions(0),
38         num_feasible_executions(0),
39         diverge(NULL),
40         earliest_diverge(NULL),
41         action_trace(new action_list_t()),
42         thread_map(new HashTable<int, Thread *, int>()),
43         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
44         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
45         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
46         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
47         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
48         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
49         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
50         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
51         node_stack(new NodeStack()),
52         mo_graph(new CycleGraph()),
53         failed_promise(false),
54         too_many_reads(false),
55         asserted(false),
56         bad_synchronization(false)
57 {
58         /* Allocate this "size" on the snapshotting heap */
59         priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
60         /* First thread created will have id INITIAL_THREAD_ID */
61         priv->next_thread_id = INITIAL_THREAD_ID;
62
63         /* Initialize a model-checker thread, for special ModelActions */
64         model_thread = new Thread(get_next_id());
65         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
66 }
67
68 /** @brief Destructor */
69 ModelChecker::~ModelChecker()
70 {
71         for (unsigned int i = 0; i < get_num_threads(); i++)
72                 delete thread_map->get(i);
73         delete thread_map;
74
75         delete obj_thrd_map;
76         delete obj_map;
77         delete lock_waiters_map;
78         delete condvar_waiters_map;
79         delete action_trace;
80
81         for (unsigned int i = 0; i < promises->size(); i++)
82                 delete (*promises)[i];
83         delete promises;
84
85         delete pending_rel_seqs;
86
87         delete thrd_last_action;
88         delete node_stack;
89         delete scheduler;
90         delete mo_graph;
91 }
92
93 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
94         action_list_t * tmp=hash->get(ptr);
95         if (tmp==NULL) {
96                 tmp=new action_list_t();
97                 hash->put(ptr, tmp);
98         }
99         return tmp;
100 }
101
102 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
103         std::vector<action_list_t> * tmp=hash->get(ptr);
104         if (tmp==NULL) {
105                 tmp=new std::vector<action_list_t>();
106                 hash->put(ptr, tmp);
107         }
108         return tmp;
109 }
110
111 /**
112  * Restores user program to initial state and resets all model-checker data
113  * structures.
114  */
115 void ModelChecker::reset_to_initial_state()
116 {
117         DEBUG("+++ Resetting to initial state +++\n");
118         node_stack->reset_execution();
119         failed_promise = false;
120         too_many_reads = false;
121         bad_synchronization = false;
122         reset_asserted();
123         snapshotObject->backTrackBeforeStep(0);
124 }
125
126 /** @return a thread ID for a new Thread */
127 thread_id_t ModelChecker::get_next_id()
128 {
129         return priv->next_thread_id++;
130 }
131
132 /** @return the number of user threads created during this execution */
133 unsigned int ModelChecker::get_num_threads() const
134 {
135         return priv->next_thread_id;
136 }
137
138 /** @return The currently executing Thread. */
139 Thread * ModelChecker::get_current_thread()
140 {
141         return scheduler->get_current_thread();
142 }
143
144 /** @return a sequence number for a new ModelAction */
145 modelclock_t ModelChecker::get_next_seq_num()
146 {
147         return ++priv->used_sequence_numbers;
148 }
149
150 Node * ModelChecker::get_curr_node() {
151         return node_stack->get_head();
152 }
153
154 /**
155  * @brief Choose the next thread to execute.
156  *
157  * This function chooses the next thread that should execute. It can force the
158  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
159  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
160  * The model-checker may have no preference regarding the next thread (i.e.,
161  * when exploring a new execution ordering), in which case this will return
162  * NULL.
163  * @param curr The current ModelAction. This action might guide the choice of
164  * next thread.
165  * @return The next thread to run. If the model-checker has no preference, NULL.
166  */
167 Thread * ModelChecker::get_next_thread(ModelAction *curr)
168 {
169         thread_id_t tid;
170
171         if (curr!=NULL) {
172                 /* Do not split atomic actions. */
173                 if (curr->is_rmwr())
174                         return thread_current();
175                 /* The THREAD_CREATE action points to the created Thread */
176                 else if (curr->get_type() == THREAD_CREATE)
177                         return (Thread *)curr->get_location();
178         }
179
180         /* Have we completed exploring the preselected path? */
181         if (diverge == NULL)
182                 return NULL;
183
184         /* Else, we are trying to replay an execution */
185         ModelAction *next = node_stack->get_next()->get_action();
186
187         if (next == diverge) {
188                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
189                         earliest_diverge=diverge;
190
191                 Node *nextnode = next->get_node();
192                 Node *prevnode = nextnode->get_parent();
193                 scheduler->update_sleep_set(prevnode);
194
195                 /* Reached divergence point */
196                 if (nextnode->increment_misc()) {
197                         /* The next node will try to satisfy a different misc_index values. */
198                         tid = next->get_tid();
199                         node_stack->pop_restofstack(2);
200                 } else if (nextnode->increment_promise()) {
201                         /* The next node will try to satisfy a different set of promises. */
202                         tid = next->get_tid();
203                         node_stack->pop_restofstack(2);
204                 } else if (nextnode->increment_read_from()) {
205                         /* The next node will read from a different value. */
206                         tid = next->get_tid();
207                         node_stack->pop_restofstack(2);
208                 } else if (nextnode->increment_future_value()) {
209                         /* The next node will try to read from a different future value. */
210                         tid = next->get_tid();
211                         node_stack->pop_restofstack(2);
212                 } else if (nextnode->increment_relseq_break()) {
213                         /* The next node will try to resolve a release sequence differently */
214                         tid = next->get_tid();
215                         node_stack->pop_restofstack(2);
216                 } else {
217                         /* Make a different thread execute for next step */
218                         scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
219                         tid = prevnode->get_next_backtrack();
220                         /* Make sure the backtracked thread isn't sleeping. */
221                         node_stack->pop_restofstack(1);
222                         if (diverge==earliest_diverge) {
223                                 earliest_diverge=prevnode->get_action();
224                         }
225                 }
226                 /* The correct sleep set is in the parent node. */
227                 execute_sleep_set();
228
229                 DEBUG("*** Divergence point ***\n");
230
231                 diverge = NULL;
232         } else {
233                 tid = next->get_tid();
234         }
235         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
236         ASSERT(tid != THREAD_ID_T_NONE);
237         return thread_map->get(id_to_int(tid));
238 }
239
240 /**
241  * We need to know what the next actions of all threads in the sleep
242  * set will be.  This method computes them and stores the actions at
243  * the corresponding thread object's pending action.
244  */
245
246 void ModelChecker::execute_sleep_set() {
247         for(unsigned int i=0;i<get_num_threads();i++) {
248                 thread_id_t tid=int_to_id(i);
249                 Thread *thr=get_thread(tid);
250                 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
251                                  thr->get_pending() == NULL ) {
252                         thr->set_state(THREAD_RUNNING);
253                         scheduler->next_thread(thr);
254                         Thread::swap(&system_context, thr);
255                         priv->current_action->set_sleep_flag();
256                         thr->set_pending(priv->current_action);
257                 }
258         }
259         priv->current_action = NULL;
260 }
261
262 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
263         for(unsigned int i=0;i<get_num_threads();i++) {
264                 thread_id_t tid=int_to_id(i);
265                 Thread *thr=get_thread(tid);
266                 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
267                         ModelAction *pending_act=thr->get_pending();
268                         if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
269                                 //Remove this thread from sleep set
270                                 scheduler->remove_sleep(thr);
271                         }
272                 }
273         }
274 }
275
276 /**
277  * Check if we are in a deadlock. Should only be called at the end of an
278  * execution, although it should not give false positives in the middle of an
279  * execution (there should be some ENABLED thread).
280  *
281  * @return True if program is in a deadlock; false otherwise
282  */
283 bool ModelChecker::is_deadlocked() const
284 {
285         bool blocking_threads = false;
286         for (unsigned int i = 0; i < get_num_threads(); i++) {
287                 thread_id_t tid = int_to_id(i);
288                 if (is_enabled(tid))
289                         return false;
290                 Thread *t = get_thread(tid);
291                 if (!t->is_model_thread() && t->get_pending())
292                         blocking_threads = true;
293         }
294         return blocking_threads;
295 }
296
297 /**
298  * Check if this is a complete execution. That is, have all thread completed
299  * execution (rather than exiting because sleep sets have forced a redundant
300  * execution).
301  *
302  * @return True if the execution is complete.
303  */
304 bool ModelChecker::is_complete_execution() const
305 {
306         for (unsigned int i = 0; i < get_num_threads(); i++)
307                 if (is_enabled(int_to_id(i)))
308                         return false;
309         return true;
310 }
311
312 /**
313  * Queries the model-checker for more executions to explore and, if one
314  * exists, resets the model-checker state to execute a new execution.
315  *
316  * @return If there are more executions to explore, return true. Otherwise,
317  * return false.
318  */
319 bool ModelChecker::next_execution()
320 {
321         DBG();
322
323         num_executions++;
324
325         if (is_deadlocked())
326                 printf("ERROR: DEADLOCK\n");
327         if (isfinalfeasible()) {
328                 printf("Earliest divergence point since last feasible execution:\n");
329                 if (earliest_diverge)
330                         earliest_diverge->print();
331                 else
332                         printf("(Not set)\n");
333
334                 earliest_diverge = NULL;
335                 num_feasible_executions++;
336         }
337
338         DEBUG("Number of acquires waiting on pending release sequences: %zu\n",
339                         pending_rel_seqs->size());
340
341
342         if (isfinalfeasible() || DBG_ENABLED()) {
343                 checkDataRaces();
344                 print_summary();
345         }
346
347         if ((diverge = get_next_backtrack()) == NULL)
348                 return false;
349
350         if (DBG_ENABLED()) {
351                 printf("Next execution will diverge at:\n");
352                 diverge->print();
353         }
354
355         reset_to_initial_state();
356         return true;
357 }
358
359 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
360 {
361         switch (act->get_type()) {
362         case ATOMIC_READ:
363         case ATOMIC_WRITE:
364         case ATOMIC_RMW: {
365                 /* linear search: from most recent to oldest */
366                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
367                 action_list_t::reverse_iterator rit;
368                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
369                         ModelAction *prev = *rit;
370                         if (prev->could_synchronize_with(act))
371                                 return prev;
372                 }
373                 break;
374         }
375         case ATOMIC_LOCK:
376         case ATOMIC_TRYLOCK: {
377                 /* linear search: from most recent to oldest */
378                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
379                 action_list_t::reverse_iterator rit;
380                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
381                         ModelAction *prev = *rit;
382                         if (act->is_conflicting_lock(prev))
383                                 return prev;
384                 }
385                 break;
386         }
387         case ATOMIC_UNLOCK: {
388                 /* linear search: from most recent to oldest */
389                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
390                 action_list_t::reverse_iterator rit;
391                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
392                         ModelAction *prev = *rit;
393                         if (!act->same_thread(prev)&&prev->is_failed_trylock())
394                                 return prev;
395                 }
396                 break;
397         }
398         case ATOMIC_WAIT: {
399                 /* linear search: from most recent to oldest */
400                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
401                 action_list_t::reverse_iterator rit;
402                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
403                         ModelAction *prev = *rit;
404                         if (!act->same_thread(prev)&&prev->is_failed_trylock())
405                                 return prev;
406                         if (!act->same_thread(prev)&&prev->is_notify())
407                                 return prev;
408                 }
409                 break;
410         }
411
412         case ATOMIC_NOTIFY_ALL:
413         case ATOMIC_NOTIFY_ONE: {
414                 /* linear search: from most recent to oldest */
415                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
416                 action_list_t::reverse_iterator rit;
417                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
418                         ModelAction *prev = *rit;
419                         if (!act->same_thread(prev)&&prev->is_wait())
420                                 return prev;
421                 }
422                 break;
423         }
424         default:
425                 break;
426         }
427         return NULL;
428 }
429
430 /** This method finds backtracking points where we should try to
431  * reorder the parameter ModelAction against.
432  *
433  * @param the ModelAction to find backtracking points for.
434  */
435 void ModelChecker::set_backtracking(ModelAction *act)
436 {
437         Thread *t = get_thread(act);
438         ModelAction * prev = get_last_conflict(act);
439         if (prev == NULL)
440                 return;
441
442         Node * node = prev->get_node()->get_parent();
443
444         int low_tid, high_tid;
445         if (node->is_enabled(t)) {
446                 low_tid = id_to_int(act->get_tid());
447                 high_tid = low_tid+1;
448         } else {
449                 low_tid = 0;
450                 high_tid = get_num_threads();
451         }
452
453         for(int i = low_tid; i < high_tid; i++) {
454                 thread_id_t tid = int_to_id(i);
455
456                 /* Make sure this thread can be enabled here. */
457                 if (i >= node->get_num_threads())
458                         break;
459
460                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
461                 if (node->enabled_status(tid)!=THREAD_ENABLED)
462                         continue;
463
464                 /* Check if this has been explored already */
465                 if (node->has_been_explored(tid))
466                         continue;
467
468                 /* See if fairness allows */
469                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
470                         bool unfair=false;
471                         for(int t=0;t<node->get_num_threads();t++) {
472                                 thread_id_t tother=int_to_id(t);
473                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
474                                         unfair=true;
475                                         break;
476                                 }
477                         }
478                         if (unfair)
479                                 continue;
480                 }
481                 /* Cache the latest backtracking point */
482                 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
483                         priv->next_backtrack = prev;
484
485                 /* If this is a new backtracking point, mark the tree */
486                 if (!node->set_backtrack(tid))
487                         continue;
488                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
489                                         id_to_int(prev->get_tid()),
490                                         id_to_int(t->get_id()));
491                 if (DBG_ENABLED()) {
492                         prev->print();
493                         act->print();
494                 }
495         }
496 }
497
498 /**
499  * Returns last backtracking point. The model checker will explore a different
500  * path for this point in the next execution.
501  * @return The ModelAction at which the next execution should diverge.
502  */
503 ModelAction * ModelChecker::get_next_backtrack()
504 {
505         ModelAction *next = priv->next_backtrack;
506         priv->next_backtrack = NULL;
507         return next;
508 }
509
510 /**
511  * Processes a read or rmw model action.
512  * @param curr is the read model action to process.
513  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
514  * @return True if processing this read updates the mo_graph.
515  */
516 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
517 {
518         uint64_t value = VALUE_NONE;
519         bool updated = false;
520         while (true) {
521                 const ModelAction *reads_from = curr->get_node()->get_read_from();
522                 if (reads_from != NULL) {
523                         mo_graph->startChanges();
524
525                         value = reads_from->get_value();
526                         bool r_status = false;
527
528                         if (!second_part_of_rmw) {
529                                 check_recency(curr, reads_from);
530                                 r_status = r_modification_order(curr, reads_from);
531                         }
532
533
534                         if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
535                                 mo_graph->rollbackChanges();
536                                 too_many_reads = false;
537                                 continue;
538                         }
539
540                         curr->read_from(reads_from);
541                         mo_graph->commitChanges();
542                         mo_check_promises(curr->get_tid(), reads_from);
543
544                         updated |= r_status;
545                 } else if (!second_part_of_rmw) {
546                         /* Read from future value */
547                         value = curr->get_node()->get_future_value();
548                         modelclock_t expiration = curr->get_node()->get_future_value_expiration();
549                         curr->read_from(NULL);
550                         Promise *valuepromise = new Promise(curr, value, expiration);
551                         promises->push_back(valuepromise);
552                 }
553                 get_thread(curr)->set_return_value(value);
554                 return updated;
555         }
556 }
557
558 /**
559  * Processes a lock, trylock, or unlock model action.  @param curr is
560  * the read model action to process.
561  *
562  * The try lock operation checks whether the lock is taken.  If not,
563  * it falls to the normal lock operation case.  If so, it returns
564  * fail.
565  *
566  * The lock operation has already been checked that it is enabled, so
567  * it just grabs the lock and synchronizes with the previous unlock.
568  *
569  * The unlock operation has to re-enable all of the threads that are
570  * waiting on the lock.
571  *
572  * @return True if synchronization was updated; false otherwise
573  */
574 bool ModelChecker::process_mutex(ModelAction *curr) {
575         std::mutex *mutex=NULL;
576         struct std::mutex_state *state=NULL;
577
578         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
579                 mutex = (std::mutex *)curr->get_location();
580                 state = mutex->get_state();
581         } else if(curr->is_wait()) {
582                 mutex = (std::mutex *)curr->get_value();
583                 state = mutex->get_state();
584         }
585
586         switch (curr->get_type()) {
587         case ATOMIC_TRYLOCK: {
588                 bool success = !state->islocked;
589                 curr->set_try_lock(success);
590                 if (!success) {
591                         get_thread(curr)->set_return_value(0);
592                         break;
593                 }
594                 get_thread(curr)->set_return_value(1);
595         }
596                 //otherwise fall into the lock case
597         case ATOMIC_LOCK: {
598                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
599                         printf("Lock access before initialization\n");
600                         set_assert();
601                 }
602                 state->islocked = true;
603                 ModelAction *unlock = get_last_unlock(curr);
604                 //synchronize with the previous unlock statement
605                 if (unlock != NULL) {
606                         curr->synchronize_with(unlock);
607                         return true;
608                 }
609                 break;
610         }
611         case ATOMIC_UNLOCK: {
612                 //unlock the lock
613                 state->islocked = false;
614                 //wake up the other threads
615                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
616                 //activate all the waiting threads
617                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
618                         scheduler->wake(get_thread(*rit));
619                 }
620                 waiters->clear();
621                 break;
622         }
623         case ATOMIC_WAIT: {
624                 //unlock the lock
625                 state->islocked = false;
626                 //wake up the other threads
627                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
628                 //activate all the waiting threads
629                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
630                         scheduler->wake(get_thread(*rit));
631                 }
632                 waiters->clear();
633                 //check whether we should go to sleep or not...simulate spurious failures
634                 if (curr->get_node()->get_misc()==0) {
635                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
636                         //disable us
637                         scheduler->sleep(get_current_thread());
638                 }
639                 break;
640         }
641         case ATOMIC_NOTIFY_ALL: {
642                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
643                 //activate all the waiting threads
644                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
645                         scheduler->wake(get_thread(*rit));
646                 }
647                 waiters->clear();
648                 break;
649         }
650         case ATOMIC_NOTIFY_ONE: {
651                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
652                 int wakeupthread=curr->get_node()->get_misc();
653                 action_list_t::iterator it = waiters->begin();
654                 advance(it, wakeupthread);
655                 scheduler->wake(get_thread(*it));
656                 waiters->erase(it);
657                 break;
658         }
659
660         default:
661                 ASSERT(0);
662         }
663         return false;
664 }
665
666 /**
667  * Process a write ModelAction
668  * @param curr The ModelAction to process
669  * @return True if the mo_graph was updated or promises were resolved
670  */
671 bool ModelChecker::process_write(ModelAction *curr)
672 {
673         bool updated_mod_order = w_modification_order(curr);
674         bool updated_promises = resolve_promises(curr);
675
676         if (promises->size() == 0) {
677                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
678                         struct PendingFutureValue pfv = (*futurevalues)[i];
679                         //Do more ambitious checks now that mo is more complete
680                         if (mo_may_allow(pfv.writer, pfv.act)&&
681                                         pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
682                                         (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
683                                 priv->next_backtrack = pfv.act;
684                 }
685                 futurevalues->resize(0);
686         }
687
688         mo_graph->commitChanges();
689         mo_check_promises(curr->get_tid(), curr);
690
691         get_thread(curr)->set_return_value(VALUE_NONE);
692         return updated_mod_order || updated_promises;
693 }
694
695 /**
696  * @brief Process the current action for thread-related activity
697  *
698  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
699  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
700  * synchronization, etc.  This function is a no-op for non-THREAD actions
701  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
702  *
703  * @param curr The current action
704  * @return True if synchronization was updated or a thread completed
705  */
706 bool ModelChecker::process_thread_action(ModelAction *curr)
707 {
708         bool updated = false;
709
710         switch (curr->get_type()) {
711         case THREAD_CREATE: {
712                 Thread *th = (Thread *)curr->get_location();
713                 th->set_creation(curr);
714                 break;
715         }
716         case THREAD_JOIN: {
717                 Thread *blocking = (Thread *)curr->get_location();
718                 ModelAction *act = get_last_action(blocking->get_id());
719                 curr->synchronize_with(act);
720                 updated = true; /* trigger rel-seq checks */
721                 break;
722         }
723         case THREAD_FINISH: {
724                 Thread *th = get_thread(curr);
725                 while (!th->wait_list_empty()) {
726                         ModelAction *act = th->pop_wait_list();
727                         scheduler->wake(get_thread(act));
728                 }
729                 th->complete();
730                 updated = true; /* trigger rel-seq checks */
731                 break;
732         }
733         case THREAD_START: {
734                 check_promises(curr->get_tid(), NULL, curr->get_cv());
735                 break;
736         }
737         default:
738                 break;
739         }
740
741         return updated;
742 }
743
744 /**
745  * @brief Process the current action for release sequence fixup activity
746  *
747  * Performs model-checker release sequence fixups for the current action,
748  * forcing a single pending release sequence to break (with a given, potential
749  * "loose" write) or to complete (i.e., synchronize). If a pending release
750  * sequence forms a complete release sequence, then we must perform the fixup
751  * synchronization, mo_graph additions, etc.
752  *
753  * @param curr The current action; must be a release sequence fixup action
754  * @param work_queue The work queue to which to add work items as they are
755  * generated
756  */
757 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
758 {
759         const ModelAction *write = curr->get_node()->get_relseq_break();
760         struct release_seq *sequence = pending_rel_seqs->back();
761         pending_rel_seqs->pop_back();
762         ASSERT(sequence);
763         ModelAction *acquire = sequence->acquire;
764         const ModelAction *rf = sequence->rf;
765         const ModelAction *release = sequence->release;
766         ASSERT(acquire);
767         ASSERT(release);
768         ASSERT(rf);
769         ASSERT(release->same_thread(rf));
770
771         if (write == NULL) {
772                 /**
773                  * @todo Forcing a synchronization requires that we set
774                  * modification order constraints. For instance, we can't allow
775                  * a fixup sequence in which two separate read-acquire
776                  * operations read from the same sequence, where the first one
777                  * synchronizes and the other doesn't. Essentially, we can't
778                  * allow any writes to insert themselves between 'release' and
779                  * 'rf'
780                  */
781
782                 /* Must synchronize */
783                 if (!acquire->synchronize_with(release)) {
784                         set_bad_synchronization();
785                         return;
786                 }
787                 /* Re-check all pending release sequences */
788                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
789                 /* Re-check act for mo_graph edges */
790                 work_queue->push_back(MOEdgeWorkEntry(acquire));
791
792                 /* propagate synchronization to later actions */
793                 action_list_t::reverse_iterator rit = action_trace->rbegin();
794                 for (; (*rit) != acquire; rit++) {
795                         ModelAction *propagate = *rit;
796                         if (acquire->happens_before(propagate)) {
797                                 propagate->synchronize_with(acquire);
798                                 /* Re-check 'propagate' for mo_graph edges */
799                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
800                         }
801                 }
802         } else {
803                 /* Break release sequence with new edges:
804                  *   release --mo--> write --mo--> rf */
805                 mo_graph->addEdge(release, write);
806                 mo_graph->addEdge(write, rf);
807         }
808
809         /* See if we have realized a data race */
810         if (checkDataRaces())
811                 set_assert();
812 }
813
814 /**
815  * Initialize the current action by performing one or more of the following
816  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
817  * in the NodeStack, manipulating backtracking sets, allocating and
818  * initializing clock vectors, and computing the promises to fulfill.
819  *
820  * @param curr The current action, as passed from the user context; may be
821  * freed/invalidated after the execution of this function, with a different
822  * action "returned" its place (pass-by-reference)
823  * @return True if curr is a newly-explored action; false otherwise
824  */
825 bool ModelChecker::initialize_curr_action(ModelAction **curr)
826 {
827         ModelAction *newcurr;
828
829         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
830                 newcurr = process_rmw(*curr);
831                 delete *curr;
832
833                 if (newcurr->is_rmw())
834                         compute_promises(newcurr);
835
836                 *curr = newcurr;
837                 return false;
838         }
839
840         (*curr)->set_seq_number(get_next_seq_num());
841
842         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled());
843         if (newcurr) {
844                 /* First restore type and order in case of RMW operation */
845                 if ((*curr)->is_rmwr())
846                         newcurr->copy_typeandorder(*curr);
847
848                 ASSERT((*curr)->get_location() == newcurr->get_location());
849                 newcurr->copy_from_new(*curr);
850
851                 /* Discard duplicate ModelAction; use action from NodeStack */
852                 delete *curr;
853
854                 /* Always compute new clock vector */
855                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
856
857                 *curr = newcurr;
858                 return false; /* Action was explored previously */
859         } else {
860                 newcurr = *curr;
861
862                 /* Always compute new clock vector */
863                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
864                 /*
865                  * Perform one-time actions when pushing new ModelAction onto
866                  * NodeStack
867                  */
868                 if (newcurr->is_write())
869                         compute_promises(newcurr);
870                 else if (newcurr->is_relseq_fixup())
871                         compute_relseq_breakwrites(newcurr);
872                 else if (newcurr->is_wait())
873                         newcurr->get_node()->set_misc_max(2);
874                 else if (newcurr->is_notify_one()) {
875                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
876                 }
877                 return true; /* This was a new ModelAction */
878         }
879 }
880
881 /**
882  * @brief Check whether a model action is enabled.
883  *
884  * Checks whether a lock or join operation would be successful (i.e., is the
885  * lock already locked, or is the joined thread already complete). If not, put
886  * the action in a waiter list.
887  *
888  * @param curr is the ModelAction to check whether it is enabled.
889  * @return a bool that indicates whether the action is enabled.
890  */
891 bool ModelChecker::check_action_enabled(ModelAction *curr) {
892         if (curr->is_lock()) {
893                 std::mutex * lock = (std::mutex *)curr->get_location();
894                 struct std::mutex_state * state = lock->get_state();
895                 if (state->islocked) {
896                         //Stick the action in the appropriate waiting queue
897                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
898                         return false;
899                 }
900         } else if (curr->get_type() == THREAD_JOIN) {
901                 Thread *blocking = (Thread *)curr->get_location();
902                 if (!blocking->is_complete()) {
903                         blocking->push_wait_list(curr);
904                         return false;
905                 }
906         }
907
908         return true;
909 }
910
911 /**
912  * Stores the ModelAction for the current thread action.  Call this
913  * immediately before switching from user- to system-context to pass
914  * data between them.
915  * @param act The ModelAction created by the user-thread action
916  */
917 void ModelChecker::set_current_action(ModelAction *act) {
918         priv->current_action = act;
919 }
920
921 /**
922  * This is the heart of the model checker routine. It performs model-checking
923  * actions corresponding to a given "current action." Among other processes, it
924  * calculates reads-from relationships, updates synchronization clock vectors,
925  * forms a memory_order constraints graph, and handles replay/backtrack
926  * execution when running permutations of previously-observed executions.
927  *
928  * @param curr The current action to process
929  * @return The next Thread that must be executed. May be NULL if ModelChecker
930  * makes no choice (e.g., according to replay execution, combining RMW actions,
931  * etc.)
932  */
933 Thread * ModelChecker::check_current_action(ModelAction *curr)
934 {
935         ASSERT(curr);
936         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
937
938         if (!check_action_enabled(curr)) {
939                 /* Make the execution look like we chose to run this action
940                  * much later, when a lock/join can succeed */
941                 get_current_thread()->set_pending(curr);
942                 scheduler->sleep(get_current_thread());
943                 return get_next_thread(NULL);
944         }
945
946         bool newly_explored = initialize_curr_action(&curr);
947
948         wake_up_sleeping_actions(curr);
949
950         /* Add the action to lists before any other model-checking tasks */
951         if (!second_part_of_rmw)
952                 add_action_to_lists(curr);
953
954         /* Build may_read_from set for newly-created actions */
955         if (newly_explored && curr->is_read())
956                 build_reads_from_past(curr);
957
958         /* Initialize work_queue with the "current action" work */
959         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
960         while (!work_queue.empty()) {
961                 WorkQueueEntry work = work_queue.front();
962                 work_queue.pop_front();
963
964                 switch (work.type) {
965                 case WORK_CHECK_CURR_ACTION: {
966                         ModelAction *act = work.action;
967                         bool update = false; /* update this location's release seq's */
968                         bool update_all = false; /* update all release seq's */
969
970                         if (process_thread_action(curr))
971                                 update_all = true;
972
973                         if (act->is_read() && process_read(act, second_part_of_rmw))
974                                 update = true;
975
976                         if (act->is_write() && process_write(act))
977                                 update = true;
978
979                         if (act->is_mutex_op() && process_mutex(act))
980                                 update_all = true;
981
982                         if (act->is_relseq_fixup())
983                                 process_relseq_fixup(curr, &work_queue);
984
985                         if (update_all)
986                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
987                         else if (update)
988                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
989                         break;
990                 }
991                 case WORK_CHECK_RELEASE_SEQ:
992                         resolve_release_sequences(work.location, &work_queue);
993                         break;
994                 case WORK_CHECK_MO_EDGES: {
995                         /** @todo Complete verification of work_queue */
996                         ModelAction *act = work.action;
997                         bool updated = false;
998
999                         if (act->is_read()) {
1000                                 const ModelAction *rf = act->get_reads_from();
1001                                 if (rf != NULL && r_modification_order(act, rf))
1002                                         updated = true;
1003                         }
1004                         if (act->is_write()) {
1005                                 if (w_modification_order(act))
1006                                         updated = true;
1007                         }
1008                         mo_graph->commitChanges();
1009
1010                         if (updated)
1011                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1012                         break;
1013                 }
1014                 default:
1015                         ASSERT(false);
1016                         break;
1017                 }
1018         }
1019
1020         check_curr_backtracking(curr);
1021         set_backtracking(curr);
1022         return get_next_thread(curr);
1023 }
1024
1025 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
1026         Node *currnode = curr->get_node();
1027         Node *parnode = currnode->get_parent();
1028
1029         if ((!parnode->backtrack_empty() ||
1030                          !currnode->misc_empty() ||
1031                          !currnode->read_from_empty() ||
1032                          !currnode->future_value_empty() ||
1033                          !currnode->promise_empty() ||
1034                          !currnode->relseq_break_empty())
1035                         && (!priv->next_backtrack ||
1036                                         *curr > *priv->next_backtrack)) {
1037                 priv->next_backtrack = curr;
1038         }
1039 }
1040
1041 bool ModelChecker::promises_expired() {
1042         for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1043                 Promise *promise = (*promises)[promise_index];
1044                 if (promise->get_expiration()<priv->used_sequence_numbers) {
1045                         return true;
1046                 }
1047         }
1048         return false;
1049 }
1050
1051 /** @return whether the current partial trace must be a prefix of a
1052  * feasible trace. */
1053 bool ModelChecker::isfeasibleprefix() {
1054         return promises->size() == 0 && pending_rel_seqs->size() == 0 && isfeasible();
1055 }
1056
1057 /** @return whether the current partial trace is feasible. */
1058 bool ModelChecker::isfeasible() {
1059         if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1060                 DEBUG("Infeasible: RMW violation\n");
1061
1062         return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
1063 }
1064
1065 /** @return whether the current partial trace is feasible other than
1066  * multiple RMW reading from the same store. */
1067 bool ModelChecker::isfeasibleotherthanRMW() {
1068         if (DBG_ENABLED()) {
1069                 if (mo_graph->checkForCycles())
1070                         DEBUG("Infeasible: modification order cycles\n");
1071                 if (failed_promise)
1072                         DEBUG("Infeasible: failed promise\n");
1073                 if (too_many_reads)
1074                         DEBUG("Infeasible: too many reads\n");
1075                 if (bad_synchronization)
1076                         DEBUG("Infeasible: bad synchronization ordering\n");
1077                 if (promises_expired())
1078                         DEBUG("Infeasible: promises expired\n");
1079         }
1080         return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
1081 }
1082
1083 /** Returns whether the current completed trace is feasible. */
1084 bool ModelChecker::isfinalfeasible() {
1085         if (DBG_ENABLED() && promises->size() != 0)
1086                 DEBUG("Infeasible: unrevolved promises\n");
1087
1088         return isfeasible() && promises->size() == 0;
1089 }
1090
1091 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1092 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1093         ModelAction *lastread = get_last_action(act->get_tid());
1094         lastread->process_rmw(act);
1095         if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1096                 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1097                 mo_graph->commitChanges();
1098         }
1099         return lastread;
1100 }
1101
1102 /**
1103  * Checks whether a thread has read from the same write for too many times
1104  * without seeing the effects of a later write.
1105  *
1106  * Basic idea:
1107  * 1) there must a different write that we could read from that would satisfy the modification order,
1108  * 2) we must have read from the same value in excess of maxreads times, and
1109  * 3) that other write must have been in the reads_from set for maxreads times.
1110  *
1111  * If so, we decide that the execution is no longer feasible.
1112  */
1113 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1114         if (params.maxreads != 0) {
1115
1116                 if (curr->get_node()->get_read_from_size() <= 1)
1117                         return;
1118                 //Must make sure that execution is currently feasible...  We could
1119                 //accidentally clear by rolling back
1120                 if (!isfeasible())
1121                         return;
1122                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1123                 int tid = id_to_int(curr->get_tid());
1124
1125                 /* Skip checks */
1126                 if ((int)thrd_lists->size() <= tid)
1127                         return;
1128                 action_list_t *list = &(*thrd_lists)[tid];
1129
1130                 action_list_t::reverse_iterator rit = list->rbegin();
1131                 /* Skip past curr */
1132                 for (; (*rit) != curr; rit++)
1133                         ;
1134                 /* go past curr now */
1135                 rit++;
1136
1137                 action_list_t::reverse_iterator ritcopy = rit;
1138                 //See if we have enough reads from the same value
1139                 int count = 0;
1140                 for (; count < params.maxreads; rit++,count++) {
1141                         if (rit==list->rend())
1142                                 return;
1143                         ModelAction *act = *rit;
1144                         if (!act->is_read())
1145                                 return;
1146
1147                         if (act->get_reads_from() != rf)
1148                                 return;
1149                         if (act->get_node()->get_read_from_size() <= 1)
1150                                 return;
1151                 }
1152                 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1153                         //Get write
1154                         const ModelAction * write = curr->get_node()->get_read_from_at(i);
1155
1156                         //Need a different write
1157                         if (write==rf)
1158                                 continue;
1159
1160                         /* Test to see whether this is a feasible write to read from*/
1161                         mo_graph->startChanges();
1162                         r_modification_order(curr, write);
1163                         bool feasiblereadfrom = isfeasible();
1164                         mo_graph->rollbackChanges();
1165
1166                         if (!feasiblereadfrom)
1167                                 continue;
1168                         rit = ritcopy;
1169
1170                         bool feasiblewrite = true;
1171                         //new we need to see if this write works for everyone
1172
1173                         for (int loop = count; loop>0; loop--,rit++) {
1174                                 ModelAction *act=*rit;
1175                                 bool foundvalue = false;
1176                                 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1177                                         if (act->get_node()->get_read_from_at(j)==write) {
1178                                                 foundvalue = true;
1179                                                 break;
1180                                         }
1181                                 }
1182                                 if (!foundvalue) {
1183                                         feasiblewrite = false;
1184                                         break;
1185                                 }
1186                         }
1187                         if (feasiblewrite) {
1188                                 too_many_reads = true;
1189                                 return;
1190                         }
1191                 }
1192         }
1193 }
1194
1195 /**
1196  * Updates the mo_graph with the constraints imposed from the current
1197  * read.
1198  *
1199  * Basic idea is the following: Go through each other thread and find
1200  * the lastest action that happened before our read.  Two cases:
1201  *
1202  * (1) The action is a write => that write must either occur before
1203  * the write we read from or be the write we read from.
1204  *
1205  * (2) The action is a read => the write that that action read from
1206  * must occur before the write we read from or be the same write.
1207  *
1208  * @param curr The current action. Must be a read.
1209  * @param rf The action that curr reads from. Must be a write.
1210  * @return True if modification order edges were added; false otherwise
1211  */
1212 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1213 {
1214         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1215         unsigned int i;
1216         bool added = false;
1217         ASSERT(curr->is_read());
1218
1219         /* Iterate over all threads */
1220         for (i = 0; i < thrd_lists->size(); i++) {
1221                 /* Iterate over actions in thread, starting from most recent */
1222                 action_list_t *list = &(*thrd_lists)[i];
1223                 action_list_t::reverse_iterator rit;
1224                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1225                         ModelAction *act = *rit;
1226
1227                         /*
1228                          * Include at most one act per-thread that "happens
1229                          * before" curr. Don't consider reflexively.
1230                          */
1231                         if (act->happens_before(curr) && act != curr) {
1232                                 if (act->is_write()) {
1233                                         if (rf != act) {
1234                                                 mo_graph->addEdge(act, rf);
1235                                                 added = true;
1236                                         }
1237                                 } else {
1238                                         const ModelAction *prevreadfrom = act->get_reads_from();
1239                                         //if the previous read is unresolved, keep going...
1240                                         if (prevreadfrom == NULL)
1241                                                 continue;
1242
1243                                         if (rf != prevreadfrom) {
1244                                                 mo_graph->addEdge(prevreadfrom, rf);
1245                                                 added = true;
1246                                         }
1247                                 }
1248                                 break;
1249                         }
1250                 }
1251         }
1252
1253         return added;
1254 }
1255
1256 /** This method fixes up the modification order when we resolve a
1257  *  promises.  The basic problem is that actions that occur after the
1258  *  read curr could not property add items to the modification order
1259  *  for our read.
1260  *
1261  *  So for each thread, we find the earliest item that happens after
1262  *  the read curr.  This is the item we have to fix up with additional
1263  *  constraints.  If that action is write, we add a MO edge between
1264  *  the Action rf and that action.  If the action is a read, we add a
1265  *  MO edge between the Action rf, and whatever the read accessed.
1266  *
1267  * @param curr is the read ModelAction that we are fixing up MO edges for.
1268  * @param rf is the write ModelAction that curr reads from.
1269  *
1270  */
1271 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1272 {
1273         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1274         unsigned int i;
1275         ASSERT(curr->is_read());
1276
1277         /* Iterate over all threads */
1278         for (i = 0; i < thrd_lists->size(); i++) {
1279                 /* Iterate over actions in thread, starting from most recent */
1280                 action_list_t *list = &(*thrd_lists)[i];
1281                 action_list_t::reverse_iterator rit;
1282                 ModelAction *lastact = NULL;
1283
1284                 /* Find last action that happens after curr that is either not curr or a rmw */
1285                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1286                         ModelAction *act = *rit;
1287                         if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1288                                 lastact = act;
1289                         } else
1290                                 break;
1291                 }
1292
1293                         /* Include at most one act per-thread that "happens before" curr */
1294                 if (lastact != NULL) {
1295                         if (lastact==curr) {
1296                                 //Case 1: The resolved read is a RMW, and we need to make sure
1297                                 //that the write portion of the RMW mod order after rf
1298
1299                                 mo_graph->addEdge(rf, lastact);
1300                         } else if (lastact->is_read()) {
1301                                 //Case 2: The resolved read is a normal read and the next
1302                                 //operation is a read, and we need to make sure the value read
1303                                 //is mod ordered after rf
1304
1305                                 const ModelAction *postreadfrom = lastact->get_reads_from();
1306                                 if (postreadfrom != NULL&&rf != postreadfrom)
1307                                         mo_graph->addEdge(rf, postreadfrom);
1308                         } else {
1309                                 //Case 3: The resolved read is a normal read and the next
1310                                 //operation is a write, and we need to make sure that the
1311                                 //write is mod ordered after rf
1312                                 if (lastact!=rf)
1313                                         mo_graph->addEdge(rf, lastact);
1314                         }
1315                         break;
1316                 }
1317         }
1318 }
1319
1320 /**
1321  * Updates the mo_graph with the constraints imposed from the current write.
1322  *
1323  * Basic idea is the following: Go through each other thread and find
1324  * the lastest action that happened before our write.  Two cases:
1325  *
1326  * (1) The action is a write => that write must occur before
1327  * the current write
1328  *
1329  * (2) The action is a read => the write that that action read from
1330  * must occur before the current write.
1331  *
1332  * This method also handles two other issues:
1333  *
1334  * (I) Sequential Consistency: Making sure that if the current write is
1335  * seq_cst, that it occurs after the previous seq_cst write.
1336  *
1337  * (II) Sending the write back to non-synchronizing reads.
1338  *
1339  * @param curr The current action. Must be a write.
1340  * @return True if modification order edges were added; false otherwise
1341  */
1342 bool ModelChecker::w_modification_order(ModelAction *curr)
1343 {
1344         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1345         unsigned int i;
1346         bool added = false;
1347         ASSERT(curr->is_write());
1348
1349         if (curr->is_seqcst()) {
1350                 /* We have to at least see the last sequentially consistent write,
1351                          so we are initialized. */
1352                 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1353                 if (last_seq_cst != NULL) {
1354                         mo_graph->addEdge(last_seq_cst, curr);
1355                         added = true;
1356                 }
1357         }
1358
1359         /* Iterate over all threads */
1360         for (i = 0; i < thrd_lists->size(); i++) {
1361                 /* Iterate over actions in thread, starting from most recent */
1362                 action_list_t *list = &(*thrd_lists)[i];
1363                 action_list_t::reverse_iterator rit;
1364                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1365                         ModelAction *act = *rit;
1366                         if (act == curr) {
1367                                 /*
1368                                  * 1) If RMW and it actually read from something, then we
1369                                  * already have all relevant edges, so just skip to next
1370                                  * thread.
1371                                  *
1372                                  * 2) If RMW and it didn't read from anything, we should
1373                                  * whatever edge we can get to speed up convergence.
1374                                  *
1375                                  * 3) If normal write, we need to look at earlier actions, so
1376                                  * continue processing list.
1377                                  */
1378                                 if (curr->is_rmw()) {
1379                                         if (curr->get_reads_from()!=NULL)
1380                                                 break;
1381                                         else
1382                                                 continue;
1383                                 } else
1384                                         continue;
1385                         }
1386
1387                         /*
1388                          * Include at most one act per-thread that "happens
1389                          * before" curr
1390                          */
1391                         if (act->happens_before(curr)) {
1392                                 /*
1393                                  * Note: if act is RMW, just add edge:
1394                                  *   act --mo--> curr
1395                                  * The following edge should be handled elsewhere:
1396                                  *   readfrom(act) --mo--> act
1397                                  */
1398                                 if (act->is_write())
1399                                         mo_graph->addEdge(act, curr);
1400                                 else if (act->is_read()) {
1401                                         //if previous read accessed a null, just keep going
1402                                         if (act->get_reads_from() == NULL)
1403                                                 continue;
1404                                         mo_graph->addEdge(act->get_reads_from(), curr);
1405                                 }
1406                                 added = true;
1407                                 break;
1408                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1409                                                      !act->same_thread(curr)) {
1410                                 /* We have an action that:
1411                                    (1) did not happen before us
1412                                    (2) is a read and we are a write
1413                                    (3) cannot synchronize with us
1414                                    (4) is in a different thread
1415                                    =>
1416                                    that read could potentially read from our write.  Note that
1417                                    these checks are overly conservative at this point, we'll
1418                                    do more checks before actually removing the
1419                                    pendingfuturevalue.
1420
1421                                  */
1422                                 if (thin_air_constraint_may_allow(curr, act)) {
1423                                         if (isfeasible() ||
1424                                                         (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1425                                                 struct PendingFutureValue pfv = {curr,act};
1426                                                 futurevalues->push_back(pfv);
1427                                         }
1428                                 }
1429                         }
1430                 }
1431         }
1432
1433         return added;
1434 }
1435
1436 /** Arbitrary reads from the future are not allowed.  Section 29.3
1437  * part 9 places some constraints.  This method checks one result of constraint
1438  * constraint.  Others require compiler support. */
1439 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1440         if (!writer->is_rmw())
1441                 return true;
1442
1443         if (!reader->is_rmw())
1444                 return true;
1445
1446         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1447                 if (search == reader)
1448                         return false;
1449                 if (search->get_tid() == reader->get_tid() &&
1450                                 search->happens_before(reader))
1451                         break;
1452         }
1453
1454         return true;
1455 }
1456
1457 /**
1458  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1459  * some constraints. This method checks one the following constraint (others
1460  * require compiler support):
1461  *
1462  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1463  */
1464 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1465 {
1466         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1467         unsigned int i;
1468         /* Iterate over all threads */
1469         for (i = 0; i < thrd_lists->size(); i++) {
1470                 const ModelAction *write_after_read = NULL;
1471
1472                 /* Iterate over actions in thread, starting from most recent */
1473                 action_list_t *list = &(*thrd_lists)[i];
1474                 action_list_t::reverse_iterator rit;
1475                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1476                         ModelAction *act = *rit;
1477
1478                         if (!reader->happens_before(act))
1479                                 break;
1480                         else if (act->is_write())
1481                                 write_after_read = act;
1482                         else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1483                                 write_after_read = act->get_reads_from();
1484                         }
1485                 }
1486
1487                 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1488                         return false;
1489         }
1490         return true;
1491 }
1492
1493 /**
1494  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1495  * The ModelAction under consideration is expected to be taking part in
1496  * release/acquire synchronization as an object of the "reads from" relation.
1497  * Note that this can only provide release sequence support for RMW chains
1498  * which do not read from the future, as those actions cannot be traced until
1499  * their "promise" is fulfilled. Similarly, we may not even establish the
1500  * presence of a release sequence with certainty, as some modification order
1501  * constraints may be decided further in the future. Thus, this function
1502  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1503  * and a boolean representing certainty.
1504  *
1505  * @param rf The action that might be part of a release sequence. Must be a
1506  * write.
1507  * @param release_heads A pass-by-reference style return parameter. After
1508  * execution of this function, release_heads will contain the heads of all the
1509  * relevant release sequences, if any exists with certainty
1510  * @param pending A pass-by-reference style return parameter which is only used
1511  * when returning false (i.e., uncertain). Returns most information regarding
1512  * an uncertain release sequence, including any write operations that might
1513  * break the sequence.
1514  * @return true, if the ModelChecker is certain that release_heads is complete;
1515  * false otherwise
1516  */
1517 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1518                 rel_heads_list_t *release_heads,
1519                 struct release_seq *pending) const
1520 {
1521         /* Only check for release sequences if there are no cycles */
1522         if (mo_graph->checkForCycles())
1523                 return false;
1524
1525         while (rf) {
1526                 ASSERT(rf->is_write());
1527
1528                 if (rf->is_release())
1529                         release_heads->push_back(rf);
1530                 if (!rf->is_rmw())
1531                         break; /* End of RMW chain */
1532
1533                 /** @todo Need to be smarter here...  In the linux lock
1534                  * example, this will run to the beginning of the program for
1535                  * every acquire. */
1536                 /** @todo The way to be smarter here is to keep going until 1
1537                  * thread has a release preceded by an acquire and you've seen
1538                  *       both. */
1539
1540                 /* acq_rel RMW is a sufficient stopping condition */
1541                 if (rf->is_acquire() && rf->is_release())
1542                         return true; /* complete */
1543
1544                 rf = rf->get_reads_from();
1545         };
1546         if (!rf) {
1547                 /* read from future: need to settle this later */
1548                 pending->rf = NULL;
1549                 return false; /* incomplete */
1550         }
1551
1552         if (rf->is_release())
1553                 return true; /* complete */
1554
1555         /* else relaxed write; check modification order for contiguous subsequence
1556          * -> rf must be same thread as release */
1557         int tid = id_to_int(rf->get_tid());
1558         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1559         action_list_t *list = &(*thrd_lists)[tid];
1560         action_list_t::const_reverse_iterator rit;
1561
1562         /* Find rf in the thread list */
1563         rit = std::find(list->rbegin(), list->rend(), rf);
1564         ASSERT(rit != list->rend());
1565
1566         /* Find the last write/release */
1567         for (; rit != list->rend(); rit++)
1568                 if ((*rit)->is_release())
1569                         break;
1570         if (rit == list->rend()) {
1571                 /* No write-release in this thread */
1572                 return true; /* complete */
1573         }
1574         ModelAction *release = *rit;
1575
1576         ASSERT(rf->same_thread(release));
1577
1578         pending->writes.clear();
1579
1580         bool certain = true;
1581         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1582                 if (id_to_int(rf->get_tid()) == (int)i)
1583                         continue;
1584                 list = &(*thrd_lists)[i];
1585
1586                 /* Can we ensure no future writes from this thread may break
1587                  * the release seq? */
1588                 bool future_ordered = false;
1589
1590                 ModelAction *last = get_last_action(int_to_id(i));
1591                 Thread *th = get_thread(int_to_id(i));
1592                 if ((last && rf->happens_before(last)) ||
1593                                 !is_enabled(th) ||
1594                                 th->is_complete())
1595                         future_ordered = true;
1596
1597                 ASSERT(!th->is_model_thread() || future_ordered);
1598
1599                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1600                         const ModelAction *act = *rit;
1601                         /* Reach synchronization -> this thread is complete */
1602                         if (act->happens_before(release))
1603                                 break;
1604                         if (rf->happens_before(act)) {
1605                                 future_ordered = true;
1606                                 continue;
1607                         }
1608
1609                         /* Only non-RMW writes can break release sequences */
1610                         if (!act->is_write() || act->is_rmw())
1611                                 continue;
1612
1613                         /* Check modification order */
1614                         if (mo_graph->checkReachable(rf, act)) {
1615                                 /* rf --mo--> act */
1616                                 future_ordered = true;
1617                                 continue;
1618                         }
1619                         if (mo_graph->checkReachable(act, release))
1620                                 /* act --mo--> release */
1621                                 break;
1622                         if (mo_graph->checkReachable(release, act) &&
1623                                       mo_graph->checkReachable(act, rf)) {
1624                                 /* release --mo-> act --mo--> rf */
1625                                 return true; /* complete */
1626                         }
1627                         /* act may break release sequence */
1628                         pending->writes.push_back(act);
1629                         certain = false;
1630                 }
1631                 if (!future_ordered)
1632                         certain = false; /* This thread is uncertain */
1633         }
1634
1635         if (certain) {
1636                 release_heads->push_back(release);
1637                 pending->writes.clear();
1638         } else {
1639                 pending->release = release;
1640                 pending->rf = rf;
1641         }
1642         return certain;
1643 }
1644
1645 /**
1646  * A public interface for getting the release sequence head(s) with which a
1647  * given ModelAction must synchronize. This function only returns a non-empty
1648  * result when it can locate a release sequence head with certainty. Otherwise,
1649  * it may mark the internal state of the ModelChecker so that it will handle
1650  * the release sequence at a later time, causing @a act to update its
1651  * synchronization at some later point in execution.
1652  * @param act The 'acquire' action that may read from a release sequence
1653  * @param release_heads A pass-by-reference return parameter. Will be filled
1654  * with the head(s) of the release sequence(s), if they exists with certainty.
1655  * @see ModelChecker::release_seq_heads
1656  */
1657 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1658 {
1659         const ModelAction *rf = act->get_reads_from();
1660         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1661         sequence->acquire = act;
1662
1663         if (!release_seq_heads(rf, release_heads, sequence)) {
1664                 /* add act to 'lazy checking' list */
1665                 pending_rel_seqs->push_back(sequence);
1666         } else {
1667                 snapshot_free(sequence);
1668         }
1669 }
1670
1671 /**
1672  * Attempt to resolve all stashed operations that might synchronize with a
1673  * release sequence for a given location. This implements the "lazy" portion of
1674  * determining whether or not a release sequence was contiguous, since not all
1675  * modification order information is present at the time an action occurs.
1676  *
1677  * @param location The location/object that should be checked for release
1678  * sequence resolutions. A NULL value means to check all locations.
1679  * @param work_queue The work queue to which to add work items as they are
1680  * generated
1681  * @return True if any updates occurred (new synchronization, new mo_graph
1682  * edges)
1683  */
1684 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1685 {
1686         bool updated = false;
1687         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1688         while (it != pending_rel_seqs->end()) {
1689                 struct release_seq *pending = *it;
1690                 ModelAction *act = pending->acquire;
1691
1692                 /* Only resolve sequences on the given location, if provided */
1693                 if (location && act->get_location() != location) {
1694                         it++;
1695                         continue;
1696                 }
1697
1698                 const ModelAction *rf = act->get_reads_from();
1699                 rel_heads_list_t release_heads;
1700                 bool complete;
1701                 complete = release_seq_heads(rf, &release_heads, pending);
1702                 for (unsigned int i = 0; i < release_heads.size(); i++) {
1703                         if (!act->has_synchronized_with(release_heads[i])) {
1704                                 if (act->synchronize_with(release_heads[i]))
1705                                         updated = true;
1706                                 else
1707                                         set_bad_synchronization();
1708                         }
1709                 }
1710
1711                 if (updated) {
1712                         /* Re-check all pending release sequences */
1713                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1714                         /* Re-check act for mo_graph edges */
1715                         work_queue->push_back(MOEdgeWorkEntry(act));
1716
1717                         /* propagate synchronization to later actions */
1718                         action_list_t::reverse_iterator rit = action_trace->rbegin();
1719                         for (; (*rit) != act; rit++) {
1720                                 ModelAction *propagate = *rit;
1721                                 if (act->happens_before(propagate)) {
1722                                         propagate->synchronize_with(act);
1723                                         /* Re-check 'propagate' for mo_graph edges */
1724                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
1725                                 }
1726                         }
1727                 }
1728                 if (complete) {
1729                         it = pending_rel_seqs->erase(it);
1730                         snapshot_free(pending);
1731                 } else {
1732                         it++;
1733                 }
1734         }
1735
1736         // If we resolved promises or data races, see if we have realized a data race.
1737         if (checkDataRaces()) {
1738                 set_assert();
1739         }
1740
1741         return updated;
1742 }
1743
1744 /**
1745  * Performs various bookkeeping operations for the current ModelAction. For
1746  * instance, adds action to the per-object, per-thread action vector and to the
1747  * action trace list of all thread actions.
1748  *
1749  * @param act is the ModelAction to add.
1750  */
1751 void ModelChecker::add_action_to_lists(ModelAction *act)
1752 {
1753         int tid = id_to_int(act->get_tid());
1754         action_trace->push_back(act);
1755
1756         get_safe_ptr_action(obj_map, act->get_location())->push_back(act);
1757
1758         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
1759         if (tid >= (int)vec->size())
1760                 vec->resize(priv->next_thread_id);
1761         (*vec)[tid].push_back(act);
1762
1763         if ((int)thrd_last_action->size() <= tid)
1764                 thrd_last_action->resize(get_num_threads());
1765         (*thrd_last_action)[tid] = act;
1766
1767         if (act->is_wait()) {
1768                 void *mutex_loc=(void *) act->get_value();
1769                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
1770
1771                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
1772                 if (tid >= (int)vec->size())
1773                         vec->resize(priv->next_thread_id);
1774                 (*vec)[tid].push_back(act);
1775
1776                 if ((int)thrd_last_action->size() <= tid)
1777                         thrd_last_action->resize(get_num_threads());
1778                 (*thrd_last_action)[tid] = act;
1779         }
1780 }
1781
1782 /**
1783  * @brief Get the last action performed by a particular Thread
1784  * @param tid The thread ID of the Thread in question
1785  * @return The last action in the thread
1786  */
1787 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1788 {
1789         int threadid = id_to_int(tid);
1790         if (threadid < (int)thrd_last_action->size())
1791                 return (*thrd_last_action)[id_to_int(tid)];
1792         else
1793                 return NULL;
1794 }
1795
1796 /**
1797  * Gets the last memory_order_seq_cst write (in the total global sequence)
1798  * performed on a particular object (i.e., memory location), not including the
1799  * current action.
1800  * @param curr The current ModelAction; also denotes the object location to
1801  * check
1802  * @return The last seq_cst write
1803  */
1804 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1805 {
1806         void *location = curr->get_location();
1807         action_list_t *list = get_safe_ptr_action(obj_map, location);
1808         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1809         action_list_t::reverse_iterator rit;
1810         for (rit = list->rbegin(); rit != list->rend(); rit++)
1811                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1812                         return *rit;
1813         return NULL;
1814 }
1815
1816 /**
1817  * Gets the last unlock operation performed on a particular mutex (i.e., memory
1818  * location). This function identifies the mutex according to the current
1819  * action, which is presumed to perform on the same mutex.
1820  * @param curr The current ModelAction; also denotes the object location to
1821  * check
1822  * @return The last unlock operation
1823  */
1824 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1825 {
1826         void *location = curr->get_location();
1827         action_list_t *list = get_safe_ptr_action(obj_map, location);
1828         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1829         action_list_t::reverse_iterator rit;
1830         for (rit = list->rbegin(); rit != list->rend(); rit++)
1831                 if ((*rit)->is_unlock() || (*rit)->is_wait())
1832                         return *rit;
1833         return NULL;
1834 }
1835
1836 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1837 {
1838         ModelAction *parent = get_last_action(tid);
1839         if (!parent)
1840                 parent = get_thread(tid)->get_creation();
1841         return parent;
1842 }
1843
1844 /**
1845  * Returns the clock vector for a given thread.
1846  * @param tid The thread whose clock vector we want
1847  * @return Desired clock vector
1848  */
1849 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1850 {
1851         return get_parent_action(tid)->get_cv();
1852 }
1853
1854 /**
1855  * Resolve a set of Promises with a current write. The set is provided in the
1856  * Node corresponding to @a write.
1857  * @param write The ModelAction that is fulfilling Promises
1858  * @return True if promises were resolved; false otherwise
1859  */
1860 bool ModelChecker::resolve_promises(ModelAction *write)
1861 {
1862         bool resolved = false;
1863         std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
1864
1865         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1866                 Promise *promise = (*promises)[promise_index];
1867                 if (write->get_node()->get_promise(i)) {
1868                         ModelAction *read = promise->get_action();
1869                         if (read->is_rmw()) {
1870                                 mo_graph->addRMWEdge(write, read);
1871                         }
1872                         read->read_from(write);
1873                         //First fix up the modification order for actions that happened
1874                         //before the read
1875                         r_modification_order(read, write);
1876                         //Next fix up the modification order for actions that happened
1877                         //after the read.
1878                         post_r_modification_order(read, write);
1879                         //Make sure the promise's value matches the write's value
1880                         ASSERT(promise->get_value() == write->get_value());
1881                         delete(promise);
1882
1883                         promises->erase(promises->begin() + promise_index);
1884                         threads_to_check.push_back(read->get_tid());
1885
1886                         resolved = true;
1887                 } else
1888                         promise_index++;
1889         }
1890
1891         //Check whether reading these writes has made threads unable to
1892         //resolve promises
1893
1894         for(unsigned int i=0;i<threads_to_check.size();i++)
1895                 mo_check_promises(threads_to_check[i], write);
1896
1897         return resolved;
1898 }
1899
1900 /**
1901  * Compute the set of promises that could potentially be satisfied by this
1902  * action. Note that the set computation actually appears in the Node, not in
1903  * ModelChecker.
1904  * @param curr The ModelAction that may satisfy promises
1905  */
1906 void ModelChecker::compute_promises(ModelAction *curr)
1907 {
1908         for (unsigned int i = 0; i < promises->size(); i++) {
1909                 Promise *promise = (*promises)[i];
1910                 const ModelAction *act = promise->get_action();
1911                 if (!act->happens_before(curr) &&
1912                                 act->is_read() &&
1913                                 !act->could_synchronize_with(curr) &&
1914                                 !act->same_thread(curr) &&
1915                                 act->get_location() == curr->get_location() &&
1916                                 promise->get_value() == curr->get_value()) {
1917                         curr->get_node()->set_promise(i, act->is_rmw());
1918                 }
1919         }
1920 }
1921
1922 /** Checks promises in response to change in ClockVector Threads. */
1923 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
1924 {
1925         for (unsigned int i = 0; i < promises->size(); i++) {
1926                 Promise *promise = (*promises)[i];
1927                 const ModelAction *act = promise->get_action();
1928                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1929                                 merge_cv->synchronized_since(act)) {
1930                         if (promise->increment_threads(tid)) {
1931                                 //Promise has failed
1932                                 failed_promise = true;
1933                                 return;
1934                         }
1935                 }
1936         }
1937 }
1938
1939 void ModelChecker::check_promises_thread_disabled() {
1940         for (unsigned int i = 0; i < promises->size(); i++) {
1941                 Promise *promise = (*promises)[i];
1942                 if (promise->check_promise()) {
1943                         failed_promise = true;
1944                         return;
1945                 }
1946         }
1947 }
1948
1949 /** Checks promises in response to addition to modification order for threads.
1950  * Definitions:
1951  * pthread is the thread that performed the read that created the promise
1952  *
1953  * pread is the read that created the promise
1954  *
1955  * pwrite is either the first write to same location as pread by
1956  * pthread that is sequenced after pread or the value read by the
1957  * first read to the same lcoation as pread by pthread that is
1958  * sequenced after pread..
1959  *
1960  *      1. If tid=pthread, then we check what other threads are reachable
1961  * through the mode order starting with pwrite.  Those threads cannot
1962  * perform a write that will resolve the promise due to modification
1963  * order constraints.
1964  *
1965  * 2. If the tid is not pthread, we check whether pwrite can reach the
1966  * action write through the modification order.  If so, that thread
1967  * cannot perform a future write that will resolve the promise due to
1968  * modificatin order constraints.
1969  *
1970  *      @parem tid The thread that either read from the model action
1971  *      write, or actually did the model action write.
1972  *
1973  *      @parem write The ModelAction representing the relevant write.
1974  */
1975
1976 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
1977         void * location = write->get_location();
1978         for (unsigned int i = 0; i < promises->size(); i++) {
1979                 Promise *promise = (*promises)[i];
1980                 const ModelAction *act = promise->get_action();
1981
1982                 //Is this promise on the same location?
1983                 if ( act->get_location() != location )
1984                         continue;
1985
1986                 //same thread as the promise
1987                 if ( act->get_tid()==tid ) {
1988
1989                         //do we have a pwrite for the promise, if not, set it
1990                         if (promise->get_write() == NULL ) {
1991                                 promise->set_write(write);
1992                                 //The pwrite cannot happen before the promise
1993                                 if (write->happens_before(act) && (write != act)) {
1994                                         failed_promise = true;
1995                                         return;
1996                                 }
1997                         }
1998                         if (mo_graph->checkPromise(write, promise)) {
1999                                 failed_promise = true;
2000                                 return;
2001                         }
2002                 }
2003
2004                 //Don't do any lookups twice for the same thread
2005                 if (promise->has_sync_thread(tid))
2006                         continue;
2007
2008                 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2009                         if (promise->increment_threads(tid)) {
2010                                 failed_promise = true;
2011                                 return;
2012                         }
2013                 }
2014         }
2015 }
2016
2017 /**
2018  * Compute the set of writes that may break the current pending release
2019  * sequence. This information is extracted from previou release sequence
2020  * calculations.
2021  *
2022  * @param curr The current ModelAction. Must be a release sequence fixup
2023  * action.
2024  */
2025 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2026 {
2027         if (pending_rel_seqs->empty())
2028                 return;
2029
2030         struct release_seq *pending = pending_rel_seqs->back();
2031         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2032                 const ModelAction *write = pending->writes[i];
2033                 curr->get_node()->add_relseq_break(write);
2034         }
2035
2036         /* NULL means don't break the sequence; just synchronize */
2037         curr->get_node()->add_relseq_break(NULL);
2038 }
2039
2040 /**
2041  * Build up an initial set of all past writes that this 'read' action may read
2042  * from. This set is determined by the clock vector's "happens before"
2043  * relationship.
2044  * @param curr is the current ModelAction that we are exploring; it must be a
2045  * 'read' operation.
2046  */
2047 void ModelChecker::build_reads_from_past(ModelAction *curr)
2048 {
2049         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2050         unsigned int i;
2051         ASSERT(curr->is_read());
2052
2053         ModelAction *last_seq_cst = NULL;
2054
2055         /* Track whether this object has been initialized */
2056         bool initialized = false;
2057
2058         if (curr->is_seqcst()) {
2059                 last_seq_cst = get_last_seq_cst(curr);
2060                 /* We have to at least see the last sequentially consistent write,
2061                          so we are initialized. */
2062                 if (last_seq_cst != NULL)
2063                         initialized = true;
2064         }
2065
2066         /* Iterate over all threads */
2067         for (i = 0; i < thrd_lists->size(); i++) {
2068                 /* Iterate over actions in thread, starting from most recent */
2069                 action_list_t *list = &(*thrd_lists)[i];
2070                 action_list_t::reverse_iterator rit;
2071                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2072                         ModelAction *act = *rit;
2073
2074                         /* Only consider 'write' actions */
2075                         if (!act->is_write() || act == curr)
2076                                 continue;
2077
2078                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2079                         if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
2080                                 if (!curr->get_sleep_flag() || curr->is_seqcst() || sleep_can_read_from(curr, act)) {
2081                                         DEBUG("Adding action to may_read_from:\n");
2082                                         if (DBG_ENABLED()) {
2083                                                 act->print();
2084                                                 curr->print();
2085                                         }
2086                                         curr->get_node()->add_read_from(act);
2087                                 }
2088                         }
2089
2090                         /* Include at most one act per-thread that "happens before" curr */
2091                         if (act->happens_before(curr)) {
2092                                 initialized = true;
2093                                 break;
2094                         }
2095                 }
2096         }
2097
2098         if (!initialized) {
2099                 /** @todo Need a more informative way of reporting errors. */
2100                 printf("ERROR: may read from uninitialized atomic\n");
2101                 set_assert();
2102         }
2103
2104         if (DBG_ENABLED() || !initialized) {
2105                 printf("Reached read action:\n");
2106                 curr->print();
2107                 printf("Printing may_read_from\n");
2108                 curr->get_node()->print_may_read_from();
2109                 printf("End printing may_read_from\n");
2110         }
2111 }
2112
2113 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2114         while(true) {
2115                 Node *prevnode=write->get_node()->get_parent();
2116
2117                 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2118                 if (write->is_release()&&thread_sleep)
2119                         return true;
2120                 if (!write->is_rmw()) {
2121                         return false;
2122                 }
2123                 if (write->get_reads_from()==NULL)
2124                         return true;
2125                 write=write->get_reads_from();
2126         }
2127 }
2128
2129 static void print_list(action_list_t *list)
2130 {
2131         action_list_t::iterator it;
2132
2133         printf("---------------------------------------------------------------------\n");
2134         printf("Trace:\n");
2135         unsigned int hash=0;
2136
2137         for (it = list->begin(); it != list->end(); it++) {
2138                 (*it)->print();
2139                 hash=hash^(hash<<3)^((*it)->hash());
2140         }
2141         printf("HASH %u\n", hash);
2142         printf("---------------------------------------------------------------------\n");
2143 }
2144
2145 #if SUPPORT_MOD_ORDER_DUMP
2146 void ModelChecker::dumpGraph(char *filename) {
2147         char buffer[200];
2148         sprintf(buffer, "%s.dot",filename);
2149         FILE *file=fopen(buffer, "w");
2150         fprintf(file, "digraph %s {\n",filename);
2151         mo_graph->dumpNodes(file);
2152         ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2153
2154         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2155                 ModelAction *action=*it;
2156                 if (action->is_read()) {
2157                         fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2158                         if (action->get_reads_from()!=NULL)
2159                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2160                 }
2161                 if (thread_array[action->get_tid()] != NULL) {
2162                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2163                 }
2164
2165                 thread_array[action->get_tid()]=action;
2166         }
2167         fprintf(file,"}\n");
2168         model_free(thread_array);
2169         fclose(file);
2170 }
2171 #endif
2172
2173 void ModelChecker::print_summary()
2174 {
2175         printf("\n");
2176         printf("Number of executions: %d\n", num_executions);
2177         printf("Number of feasible executions: %d\n", num_feasible_executions);
2178         printf("Total nodes created: %d\n", node_stack->get_total_nodes());
2179
2180 #if SUPPORT_MOD_ORDER_DUMP
2181         scheduler->print();
2182         char buffername[100];
2183         sprintf(buffername, "exec%04u", num_executions);
2184         mo_graph->dumpGraphToFile(buffername);
2185         sprintf(buffername, "graph%04u", num_executions);
2186         dumpGraph(buffername);
2187 #endif
2188
2189         if (!isfinalfeasible())
2190                 printf("INFEASIBLE EXECUTION!\n");
2191         print_list(action_trace);
2192         printf("\n");
2193 }
2194
2195 /**
2196  * Add a Thread to the system for the first time. Should only be called once
2197  * per thread.
2198  * @param t The Thread to add
2199  */
2200 void ModelChecker::add_thread(Thread *t)
2201 {
2202         thread_map->put(id_to_int(t->get_id()), t);
2203         scheduler->add_thread(t);
2204 }
2205
2206 /**
2207  * Removes a thread from the scheduler.
2208  * @param the thread to remove.
2209  */
2210 void ModelChecker::remove_thread(Thread *t)
2211 {
2212         scheduler->remove_thread(t);
2213 }
2214
2215 /**
2216  * @brief Get a Thread reference by its ID
2217  * @param tid The Thread's ID
2218  * @return A Thread reference
2219  */
2220 Thread * ModelChecker::get_thread(thread_id_t tid) const
2221 {
2222         return thread_map->get(id_to_int(tid));
2223 }
2224
2225 /**
2226  * @brief Get a reference to the Thread in which a ModelAction was executed
2227  * @param act The ModelAction
2228  * @return A Thread reference
2229  */
2230 Thread * ModelChecker::get_thread(ModelAction *act) const
2231 {
2232         return get_thread(act->get_tid());
2233 }
2234
2235 /**
2236  * @brief Check if a Thread is currently enabled
2237  * @param t The Thread to check
2238  * @return True if the Thread is currently enabled
2239  */
2240 bool ModelChecker::is_enabled(Thread *t) const
2241 {
2242         return scheduler->is_enabled(t);
2243 }
2244
2245 /**
2246  * @brief Check if a Thread is currently enabled
2247  * @param tid The ID of the Thread to check
2248  * @return True if the Thread is currently enabled
2249  */
2250 bool ModelChecker::is_enabled(thread_id_t tid) const
2251 {
2252         return scheduler->is_enabled(tid);
2253 }
2254
2255 /**
2256  * Switch from a user-context to the "master thread" context (a.k.a. system
2257  * context). This switch is made with the intention of exploring a particular
2258  * model-checking action (described by a ModelAction object). Must be called
2259  * from a user-thread context.
2260  *
2261  * @param act The current action that will be explored. May be NULL only if
2262  * trace is exiting via an assertion (see ModelChecker::set_assert and
2263  * ModelChecker::has_asserted).
2264  * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2265  */
2266 int ModelChecker::switch_to_master(ModelAction *act)
2267 {
2268         DBG();
2269         Thread *old = thread_current();
2270         set_current_action(act);
2271         old->set_state(THREAD_READY);
2272         return Thread::swap(old, &system_context);
2273 }
2274
2275 /**
2276  * Takes the next step in the execution, if possible.
2277  * @return Returns true (success) if a step was taken and false otherwise.
2278  */
2279 bool ModelChecker::take_step() {
2280         if (has_asserted())
2281                 return false;
2282
2283         Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2284         if (curr) {
2285                 if (curr->get_state() == THREAD_READY) {
2286                         ASSERT(priv->current_action);
2287
2288                         priv->nextThread = check_current_action(priv->current_action);
2289                         priv->current_action = NULL;
2290
2291                         if (curr->is_blocked() || curr->is_complete())
2292                                 scheduler->remove_thread(curr);
2293                 } else {
2294                         ASSERT(false);
2295                 }
2296         }
2297         Thread *next = scheduler->next_thread(priv->nextThread);
2298
2299         /* Infeasible -> don't take any more steps */
2300         if (!isfeasible())
2301                 return false;
2302
2303         if (params.bound != 0) {
2304                 if (priv->used_sequence_numbers > params.bound) {
2305                         return false;
2306                 }
2307         }
2308
2309         DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2310                         next ? id_to_int(next->get_id()) : -1);
2311
2312         /*
2313          * Launch end-of-execution release sequence fixups only when there are:
2314          *
2315          * (1) no more user threads to run (or when execution replay chooses
2316          *     the 'model_thread')
2317          * (2) pending release sequences
2318          * (3) pending assertions (i.e., data races)
2319          * (4) no pending promises
2320          */
2321         if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2322                         isfinalfeasible() && !unrealizedraces.empty()) {
2323                 printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2324                                 pending_rel_seqs->size());
2325                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2326                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2327                                 model_thread);
2328                 set_current_action(fixup);
2329                 return true;
2330         }
2331
2332         /* next == NULL -> don't take any more steps */
2333         if (!next)
2334                 return false;
2335
2336         next->set_state(THREAD_RUNNING);
2337
2338         if (next->get_pending() != NULL) {
2339                 /* restart a pending action */
2340                 set_current_action(next->get_pending());
2341                 next->set_pending(NULL);
2342                 next->set_state(THREAD_READY);
2343                 return true;
2344         }
2345
2346         /* Return false only if swap fails with an error */
2347         return (Thread::swap(&system_context, next) == 0);
2348 }
2349
2350 /** Runs the current execution until threre are no more steps to take. */
2351 void ModelChecker::finish_execution() {
2352         DBG();
2353
2354         while (take_step());
2355 }