f96249fcb9cf9ceeb9eb0b97b5a13c9b74b96b60
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4
5 #include "model.h"
6 #include "action.h"
7 #include "nodestack.h"
8 #include "schedule.h"
9 #include "snapshot-interface.h"
10 #include "common.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
13 #include "promise.h"
14 #include "datarace.h"
15 #include "threads-model.h"
16 #include "output.h"
17
18 #define INITIAL_THREAD_ID       0
19
20 ModelChecker *model;
21
22 struct bug_message {
23         bug_message(const char *str) {
24                 const char *fmt = "  [BUG] %s\n";
25                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
26                 sprintf(msg, fmt, str);
27         }
28         ~bug_message() { if (msg) snapshot_free(msg); }
29
30         char *msg;
31         void print() { model_print("%s", msg); }
32
33         SNAPSHOTALLOC
34 };
35
36 /**
37  * Structure for holding small ModelChecker members that should be snapshotted
38  */
39 struct model_snapshot_members {
40         model_snapshot_members() :
41                 current_action(NULL),
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 nextThread(NULL),
46                 next_backtrack(NULL),
47                 bugs(),
48                 stats(),
49                 failed_promise(false),
50                 too_many_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         ModelAction *current_action;
62         unsigned int next_thread_id;
63         modelclock_t used_sequence_numbers;
64         Thread *nextThread;
65         ModelAction *next_backtrack;
66         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
67         struct execution_stats stats;
68         bool failed_promise;
69         bool too_many_reads;
70         /** @brief Incorrectly-ordered synchronization was made */
71         bool bad_synchronization;
72         bool asserted;
73
74         SNAPSHOTALLOC
75 };
76
77 /** @brief Constructor */
78 ModelChecker::ModelChecker(struct model_params params) :
79         /* Initialize default scheduler */
80         params(params),
81         scheduler(new Scheduler()),
82         diverge(NULL),
83         earliest_diverge(NULL),
84         action_trace(new action_list_t()),
85         thread_map(new HashTable<int, Thread *, int>()),
86         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
89         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
90         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
91         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
92         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
93         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
94         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
95         node_stack(new NodeStack()),
96         priv(new struct model_snapshot_members()),
97         mo_graph(new CycleGraph())
98 {
99         /* Initialize a model-checker thread, for special ModelActions */
100         model_thread = new Thread(get_next_id());
101         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
102 }
103
104 /** @brief Destructor */
105 ModelChecker::~ModelChecker()
106 {
107         for (unsigned int i = 0; i < get_num_threads(); i++)
108                 delete thread_map->get(i);
109         delete thread_map;
110
111         delete obj_thrd_map;
112         delete obj_map;
113         delete lock_waiters_map;
114         delete condvar_waiters_map;
115         delete action_trace;
116
117         for (unsigned int i = 0; i < promises->size(); i++)
118                 delete (*promises)[i];
119         delete promises;
120
121         delete pending_rel_seqs;
122
123         delete thrd_last_action;
124         delete thrd_last_fence_release;
125         delete node_stack;
126         delete scheduler;
127         delete mo_graph;
128         delete priv;
129 }
130
131 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
132         action_list_t * tmp=hash->get(ptr);
133         if (tmp==NULL) {
134                 tmp=new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
141         std::vector<action_list_t> * tmp=hash->get(ptr);
142         if (tmp==NULL) {
143                 tmp=new std::vector<action_list_t>();
144                 hash->put(ptr, tmp);
145         }
146         return tmp;
147 }
148
149 /**
150  * Restores user program to initial state and resets all model-checker data
151  * structures.
152  */
153 void ModelChecker::reset_to_initial_state()
154 {
155         DEBUG("+++ Resetting to initial state +++\n");
156         node_stack->reset_execution();
157
158         /* Print all model-checker output before rollback */
159         fflush(model_out);
160
161         snapshotObject->backTrackBeforeStep(0);
162 }
163
164 /** @return a thread ID for a new Thread */
165 thread_id_t ModelChecker::get_next_id()
166 {
167         return priv->next_thread_id++;
168 }
169
170 /** @return the number of user threads created during this execution */
171 unsigned int ModelChecker::get_num_threads() const
172 {
173         return priv->next_thread_id;
174 }
175
176 /** @return The currently executing Thread. */
177 Thread * ModelChecker::get_current_thread() const
178 {
179         return scheduler->get_current_thread();
180 }
181
182 /** @return a sequence number for a new ModelAction */
183 modelclock_t ModelChecker::get_next_seq_num()
184 {
185         return ++priv->used_sequence_numbers;
186 }
187
188 Node * ModelChecker::get_curr_node() const
189 {
190         return node_stack->get_head();
191 }
192
193 /**
194  * @brief Choose the next thread to execute.
195  *
196  * This function chooses the next thread that should execute. It can force the
197  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
198  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
199  * The model-checker may have no preference regarding the next thread (i.e.,
200  * when exploring a new execution ordering), in which case this will return
201  * NULL.
202  * @param curr The current ModelAction. This action might guide the choice of
203  * next thread.
204  * @return The next thread to run. If the model-checker has no preference, NULL.
205  */
206 Thread * ModelChecker::get_next_thread(ModelAction *curr)
207 {
208         thread_id_t tid;
209
210         if (curr!=NULL) {
211                 /* Do not split atomic actions. */
212                 if (curr->is_rmwr())
213                         return thread_current();
214                 /* The THREAD_CREATE action points to the created Thread */
215                 else if (curr->get_type() == THREAD_CREATE)
216                         return (Thread *)curr->get_location();
217         }
218
219         /* Have we completed exploring the preselected path? */
220         if (diverge == NULL)
221                 return NULL;
222
223         /* Else, we are trying to replay an execution */
224         ModelAction *next = node_stack->get_next()->get_action();
225
226         if (next == diverge) {
227                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
228                         earliest_diverge=diverge;
229
230                 Node *nextnode = next->get_node();
231                 Node *prevnode = nextnode->get_parent();
232                 scheduler->update_sleep_set(prevnode);
233
234                 /* Reached divergence point */
235                 if (nextnode->increment_misc()) {
236                         /* The next node will try to satisfy a different misc_index values. */
237                         tid = next->get_tid();
238                         node_stack->pop_restofstack(2);
239                 } else if (nextnode->increment_promise()) {
240                         /* The next node will try to satisfy a different set of promises. */
241                         tid = next->get_tid();
242                         node_stack->pop_restofstack(2);
243                 } else if (nextnode->increment_read_from()) {
244                         /* The next node will read from a different value. */
245                         tid = next->get_tid();
246                         node_stack->pop_restofstack(2);
247                 } else if (nextnode->increment_future_value()) {
248                         /* The next node will try to read from a different future value. */
249                         tid = next->get_tid();
250                         node_stack->pop_restofstack(2);
251                 } else if (nextnode->increment_relseq_break()) {
252                         /* The next node will try to resolve a release sequence differently */
253                         tid = next->get_tid();
254                         node_stack->pop_restofstack(2);
255                 } else {
256                         /* Make a different thread execute for next step */
257                         scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
258                         tid = prevnode->get_next_backtrack();
259                         /* Make sure the backtracked thread isn't sleeping. */
260                         node_stack->pop_restofstack(1);
261                         if (diverge==earliest_diverge) {
262                                 earliest_diverge=prevnode->get_action();
263                         }
264                 }
265                 /* The correct sleep set is in the parent node. */
266                 execute_sleep_set();
267
268                 DEBUG("*** Divergence point ***\n");
269
270                 diverge = NULL;
271         } else {
272                 tid = next->get_tid();
273         }
274         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
275         ASSERT(tid != THREAD_ID_T_NONE);
276         return thread_map->get(id_to_int(tid));
277 }
278
279 /**
280  * We need to know what the next actions of all threads in the sleep
281  * set will be.  This method computes them and stores the actions at
282  * the corresponding thread object's pending action.
283  */
284
285 void ModelChecker::execute_sleep_set() {
286         for(unsigned int i=0;i<get_num_threads();i++) {
287                 thread_id_t tid=int_to_id(i);
288                 Thread *thr=get_thread(tid);
289                 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
290                                  thr->get_pending() == NULL ) {
291                         thr->set_state(THREAD_RUNNING);
292                         scheduler->next_thread(thr);
293                         Thread::swap(&system_context, thr);
294                         priv->current_action->set_sleep_flag();
295                         thr->set_pending(priv->current_action);
296                 }
297         }
298         priv->current_action = NULL;
299 }
300
301 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
302         for(unsigned int i=0;i<get_num_threads();i++) {
303                 thread_id_t tid=int_to_id(i);
304                 Thread *thr=get_thread(tid);
305                 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
306                         ModelAction *pending_act=thr->get_pending();
307                         if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
308                                 //Remove this thread from sleep set
309                                 scheduler->remove_sleep(thr);
310                         }
311                 }
312         }
313 }
314
315 /** @brief Alert the model-checker that an incorrectly-ordered
316  * synchronization was made */
317 void ModelChecker::set_bad_synchronization()
318 {
319         priv->bad_synchronization = true;
320 }
321
322 bool ModelChecker::has_asserted() const
323 {
324         return priv->asserted;
325 }
326
327 void ModelChecker::set_assert()
328 {
329         priv->asserted = true;
330 }
331
332 /**
333  * Check if we are in a deadlock. Should only be called at the end of an
334  * execution, although it should not give false positives in the middle of an
335  * execution (there should be some ENABLED thread).
336  *
337  * @return True if program is in a deadlock; false otherwise
338  */
339 bool ModelChecker::is_deadlocked() const
340 {
341         bool blocking_threads = false;
342         for (unsigned int i = 0; i < get_num_threads(); i++) {
343                 thread_id_t tid = int_to_id(i);
344                 if (is_enabled(tid))
345                         return false;
346                 Thread *t = get_thread(tid);
347                 if (!t->is_model_thread() && t->get_pending())
348                         blocking_threads = true;
349         }
350         return blocking_threads;
351 }
352
353 /**
354  * Check if this is a complete execution. That is, have all thread completed
355  * execution (rather than exiting because sleep sets have forced a redundant
356  * execution).
357  *
358  * @return True if the execution is complete.
359  */
360 bool ModelChecker::is_complete_execution() const
361 {
362         for (unsigned int i = 0; i < get_num_threads(); i++)
363                 if (is_enabled(int_to_id(i)))
364                         return false;
365         return true;
366 }
367
368 /**
369  * @brief Assert a bug in the executing program.
370  *
371  * Use this function to assert any sort of bug in the user program. If the
372  * current trace is feasible (actually, a prefix of some feasible execution),
373  * then this execution will be aborted, printing the appropriate message. If
374  * the current trace is not yet feasible, the error message will be stashed and
375  * printed if the execution ever becomes feasible.
376  *
377  * @param msg Descriptive message for the bug (do not include newline char)
378  * @return True if bug is immediately-feasible
379  */
380 bool ModelChecker::assert_bug(const char *msg)
381 {
382         priv->bugs.push_back(new bug_message(msg));
383
384         if (isfeasibleprefix()) {
385                 set_assert();
386                 return true;
387         }
388         return false;
389 }
390
391 /**
392  * @brief Assert a bug in the executing program, asserted by a user thread
393  * @see ModelChecker::assert_bug
394  * @param msg Descriptive message for the bug (do not include newline char)
395  */
396 void ModelChecker::assert_user_bug(const char *msg)
397 {
398         /* If feasible bug, bail out now */
399         if (assert_bug(msg))
400                 switch_to_master(NULL);
401 }
402
403 /** @return True, if any bugs have been reported for this execution */
404 bool ModelChecker::have_bug_reports() const
405 {
406         return priv->bugs.size() != 0;
407 }
408
409 /** @brief Print bug report listing for this execution (if any bugs exist) */
410 void ModelChecker::print_bugs() const
411 {
412         if (have_bug_reports()) {
413                 model_print("Bug report: %zu bug%s detected\n",
414                                 priv->bugs.size(),
415                                 priv->bugs.size() > 1 ? "s" : "");
416                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
417                         priv->bugs[i]->print();
418         }
419 }
420
421 /**
422  * @brief Record end-of-execution stats
423  *
424  * Must be run when exiting an execution. Records various stats.
425  * @see struct execution_stats
426  */
427 void ModelChecker::record_stats()
428 {
429         stats.num_total++;
430         if (!isfeasibleprefix())
431                 stats.num_infeasible++;
432         else if (have_bug_reports())
433                 stats.num_buggy_executions++;
434         else if (is_complete_execution())
435                 stats.num_complete++;
436         else
437                 stats.num_redundant++;
438 }
439
440 /** @brief Print execution stats */
441 void ModelChecker::print_stats() const
442 {
443         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
444         model_print("Number of redundant executions: %d\n", stats.num_redundant);
445         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
446         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
447         model_print("Total executions: %d\n", stats.num_total);
448         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
449 }
450
451 /**
452  * @brief End-of-exeuction print
453  * @param printbugs Should any existing bugs be printed?
454  */
455 void ModelChecker::print_execution(bool printbugs) const
456 {
457         print_program_output();
458
459         if (DBG_ENABLED() || params.verbose) {
460                 model_print("Earliest divergence point since last feasible execution:\n");
461                 if (earliest_diverge)
462                         earliest_diverge->print();
463                 else
464                         model_print("(Not set)\n");
465
466                 model_print("\n");
467                 print_stats();
468         }
469
470         /* Don't print invalid bugs */
471         if (printbugs)
472                 print_bugs();
473
474         model_print("\n");
475         print_summary();
476 }
477
478 /**
479  * Queries the model-checker for more executions to explore and, if one
480  * exists, resets the model-checker state to execute a new execution.
481  *
482  * @return If there are more executions to explore, return true. Otherwise,
483  * return false.
484  */
485 bool ModelChecker::next_execution()
486 {
487         DBG();
488         /* Is this execution a feasible execution that's worth bug-checking? */
489         bool complete = isfeasibleprefix() && (is_complete_execution() ||
490                         have_bug_reports());
491
492         /* End-of-execution bug checks */
493         if (complete) {
494                 if (is_deadlocked())
495                         assert_bug("Deadlock detected");
496
497                 checkDataRaces();
498         }
499
500         record_stats();
501
502         /* Output */
503         if (DBG_ENABLED() || params.verbose || have_bug_reports())
504                 print_execution(complete);
505         else
506                 clear_program_output();
507
508         if (complete)
509                 earliest_diverge = NULL;
510
511         if ((diverge = get_next_backtrack()) == NULL)
512                 return false;
513
514         if (DBG_ENABLED()) {
515                 model_print("Next execution will diverge at:\n");
516                 diverge->print();
517         }
518
519         reset_to_initial_state();
520         return true;
521 }
522
523 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
524 {
525         switch (act->get_type()) {
526         case ATOMIC_FENCE:
527         case ATOMIC_READ:
528         case ATOMIC_WRITE:
529         case ATOMIC_RMW: {
530                 /* Optimization: relaxed operations don't need backtracking */
531                 if (act->is_relaxed())
532                         return NULL;
533                 /* linear search: from most recent to oldest */
534                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
535                 action_list_t::reverse_iterator rit;
536                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
537                         ModelAction *prev = *rit;
538                         if (prev->could_synchronize_with(act))
539                                 return prev;
540                 }
541                 break;
542         }
543         case ATOMIC_LOCK:
544         case ATOMIC_TRYLOCK: {
545                 /* linear search: from most recent to oldest */
546                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
547                 action_list_t::reverse_iterator rit;
548                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
549                         ModelAction *prev = *rit;
550                         if (act->is_conflicting_lock(prev))
551                                 return prev;
552                 }
553                 break;
554         }
555         case ATOMIC_UNLOCK: {
556                 /* linear search: from most recent to oldest */
557                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
558                 action_list_t::reverse_iterator rit;
559                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
560                         ModelAction *prev = *rit;
561                         if (!act->same_thread(prev)&&prev->is_failed_trylock())
562                                 return prev;
563                 }
564                 break;
565         }
566         case ATOMIC_WAIT: {
567                 /* linear search: from most recent to oldest */
568                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
569                 action_list_t::reverse_iterator rit;
570                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
571                         ModelAction *prev = *rit;
572                         if (!act->same_thread(prev)&&prev->is_failed_trylock())
573                                 return prev;
574                         if (!act->same_thread(prev)&&prev->is_notify())
575                                 return prev;
576                 }
577                 break;
578         }
579
580         case ATOMIC_NOTIFY_ALL:
581         case ATOMIC_NOTIFY_ONE: {
582                 /* linear search: from most recent to oldest */
583                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
584                 action_list_t::reverse_iterator rit;
585                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
586                         ModelAction *prev = *rit;
587                         if (!act->same_thread(prev)&&prev->is_wait())
588                                 return prev;
589                 }
590                 break;
591         }
592         default:
593                 break;
594         }
595         return NULL;
596 }
597
598 /** This method finds backtracking points where we should try to
599  * reorder the parameter ModelAction against.
600  *
601  * @param the ModelAction to find backtracking points for.
602  */
603 void ModelChecker::set_backtracking(ModelAction *act)
604 {
605         Thread *t = get_thread(act);
606         ModelAction * prev = get_last_conflict(act);
607         if (prev == NULL)
608                 return;
609
610         Node * node = prev->get_node()->get_parent();
611
612         int low_tid, high_tid;
613         if (node->is_enabled(t)) {
614                 low_tid = id_to_int(act->get_tid());
615                 high_tid = low_tid+1;
616         } else {
617                 low_tid = 0;
618                 high_tid = get_num_threads();
619         }
620
621         for(int i = low_tid; i < high_tid; i++) {
622                 thread_id_t tid = int_to_id(i);
623
624                 /* Make sure this thread can be enabled here. */
625                 if (i >= node->get_num_threads())
626                         break;
627
628                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
629                 if (node->enabled_status(tid)!=THREAD_ENABLED)
630                         continue;
631
632                 /* Check if this has been explored already */
633                 if (node->has_been_explored(tid))
634                         continue;
635
636                 /* See if fairness allows */
637                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
638                         bool unfair=false;
639                         for(int t=0;t<node->get_num_threads();t++) {
640                                 thread_id_t tother=int_to_id(t);
641                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
642                                         unfair=true;
643                                         break;
644                                 }
645                         }
646                         if (unfair)
647                                 continue;
648                 }
649                 /* Cache the latest backtracking point */
650                 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
651                         priv->next_backtrack = prev;
652
653                 /* If this is a new backtracking point, mark the tree */
654                 if (!node->set_backtrack(tid))
655                         continue;
656                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
657                                         id_to_int(prev->get_tid()),
658                                         id_to_int(t->get_id()));
659                 if (DBG_ENABLED()) {
660                         prev->print();
661                         act->print();
662                 }
663         }
664 }
665
666 /**
667  * Returns last backtracking point. The model checker will explore a different
668  * path for this point in the next execution.
669  * @return The ModelAction at which the next execution should diverge.
670  */
671 ModelAction * ModelChecker::get_next_backtrack()
672 {
673         ModelAction *next = priv->next_backtrack;
674         priv->next_backtrack = NULL;
675         return next;
676 }
677
678 /**
679  * Processes a read or rmw model action.
680  * @param curr is the read model action to process.
681  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
682  * @return True if processing this read updates the mo_graph.
683  */
684 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
685 {
686         uint64_t value = VALUE_NONE;
687         bool updated = false;
688         while (true) {
689                 const ModelAction *reads_from = curr->get_node()->get_read_from();
690                 if (reads_from != NULL) {
691                         mo_graph->startChanges();
692
693                         value = reads_from->get_value();
694                         bool r_status = false;
695
696                         if (!second_part_of_rmw) {
697                                 check_recency(curr, reads_from);
698                                 r_status = r_modification_order(curr, reads_from);
699                         }
700
701
702                         if (!second_part_of_rmw&&is_infeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
703                                 mo_graph->rollbackChanges();
704                                 priv->too_many_reads = false;
705                                 continue;
706                         }
707
708                         curr->read_from(reads_from);
709                         mo_graph->commitChanges();
710                         mo_check_promises(curr->get_tid(), reads_from);
711
712                         updated |= r_status;
713                 } else if (!second_part_of_rmw) {
714                         /* Read from future value */
715                         value = curr->get_node()->get_future_value();
716                         modelclock_t expiration = curr->get_node()->get_future_value_expiration();
717                         curr->read_from(NULL);
718                         Promise *valuepromise = new Promise(curr, value, expiration);
719                         promises->push_back(valuepromise);
720                 }
721                 get_thread(curr)->set_return_value(value);
722                 return updated;
723         }
724 }
725
726 /**
727  * Processes a lock, trylock, or unlock model action.  @param curr is
728  * the read model action to process.
729  *
730  * The try lock operation checks whether the lock is taken.  If not,
731  * it falls to the normal lock operation case.  If so, it returns
732  * fail.
733  *
734  * The lock operation has already been checked that it is enabled, so
735  * it just grabs the lock and synchronizes with the previous unlock.
736  *
737  * The unlock operation has to re-enable all of the threads that are
738  * waiting on the lock.
739  *
740  * @return True if synchronization was updated; false otherwise
741  */
742 bool ModelChecker::process_mutex(ModelAction *curr) {
743         std::mutex *mutex=NULL;
744         struct std::mutex_state *state=NULL;
745
746         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
747                 mutex = (std::mutex *)curr->get_location();
748                 state = mutex->get_state();
749         } else if(curr->is_wait()) {
750                 mutex = (std::mutex *)curr->get_value();
751                 state = mutex->get_state();
752         }
753
754         switch (curr->get_type()) {
755         case ATOMIC_TRYLOCK: {
756                 bool success = !state->islocked;
757                 curr->set_try_lock(success);
758                 if (!success) {
759                         get_thread(curr)->set_return_value(0);
760                         break;
761                 }
762                 get_thread(curr)->set_return_value(1);
763         }
764                 //otherwise fall into the lock case
765         case ATOMIC_LOCK: {
766                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
767                         assert_bug("Lock access before initialization");
768                 state->islocked = true;
769                 ModelAction *unlock = get_last_unlock(curr);
770                 //synchronize with the previous unlock statement
771                 if (unlock != NULL) {
772                         curr->synchronize_with(unlock);
773                         return true;
774                 }
775                 break;
776         }
777         case ATOMIC_UNLOCK: {
778                 //unlock the lock
779                 state->islocked = false;
780                 //wake up the other threads
781                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
782                 //activate all the waiting threads
783                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
784                         scheduler->wake(get_thread(*rit));
785                 }
786                 waiters->clear();
787                 break;
788         }
789         case ATOMIC_WAIT: {
790                 //unlock the lock
791                 state->islocked = false;
792                 //wake up the other threads
793                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
794                 //activate all the waiting threads
795                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
796                         scheduler->wake(get_thread(*rit));
797                 }
798                 waiters->clear();
799                 //check whether we should go to sleep or not...simulate spurious failures
800                 if (curr->get_node()->get_misc()==0) {
801                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
802                         //disable us
803                         scheduler->sleep(get_current_thread());
804                 }
805                 break;
806         }
807         case ATOMIC_NOTIFY_ALL: {
808                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
809                 //activate all the waiting threads
810                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
811                         scheduler->wake(get_thread(*rit));
812                 }
813                 waiters->clear();
814                 break;
815         }
816         case ATOMIC_NOTIFY_ONE: {
817                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
818                 int wakeupthread=curr->get_node()->get_misc();
819                 action_list_t::iterator it = waiters->begin();
820                 advance(it, wakeupthread);
821                 scheduler->wake(get_thread(*it));
822                 waiters->erase(it);
823                 break;
824         }
825
826         default:
827                 ASSERT(0);
828         }
829         return false;
830 }
831
832 /**
833  * Process a write ModelAction
834  * @param curr The ModelAction to process
835  * @return True if the mo_graph was updated or promises were resolved
836  */
837 bool ModelChecker::process_write(ModelAction *curr)
838 {
839         bool updated_mod_order = w_modification_order(curr);
840         bool updated_promises = resolve_promises(curr);
841
842         if (promises->size() == 0) {
843                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
844                         struct PendingFutureValue pfv = (*futurevalues)[i];
845                         //Do more ambitious checks now that mo is more complete
846                         if (mo_may_allow(pfv.writer, pfv.act)&&
847                                         pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
848                                         (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
849                                 priv->next_backtrack = pfv.act;
850                 }
851                 futurevalues->resize(0);
852         }
853
854         mo_graph->commitChanges();
855         mo_check_promises(curr->get_tid(), curr);
856
857         get_thread(curr)->set_return_value(VALUE_NONE);
858         return updated_mod_order || updated_promises;
859 }
860
861 /**
862  * @brief Process the current action for thread-related activity
863  *
864  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
865  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
866  * synchronization, etc.  This function is a no-op for non-THREAD actions
867  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
868  *
869  * @param curr The current action
870  * @return True if synchronization was updated or a thread completed
871  */
872 bool ModelChecker::process_thread_action(ModelAction *curr)
873 {
874         bool updated = false;
875
876         switch (curr->get_type()) {
877         case THREAD_CREATE: {
878                 Thread *th = (Thread *)curr->get_location();
879                 th->set_creation(curr);
880                 break;
881         }
882         case THREAD_JOIN: {
883                 Thread *blocking = (Thread *)curr->get_location();
884                 ModelAction *act = get_last_action(blocking->get_id());
885                 curr->synchronize_with(act);
886                 updated = true; /* trigger rel-seq checks */
887                 break;
888         }
889         case THREAD_FINISH: {
890                 Thread *th = get_thread(curr);
891                 while (!th->wait_list_empty()) {
892                         ModelAction *act = th->pop_wait_list();
893                         scheduler->wake(get_thread(act));
894                 }
895                 th->complete();
896                 updated = true; /* trigger rel-seq checks */
897                 break;
898         }
899         case THREAD_START: {
900                 check_promises(curr->get_tid(), NULL, curr->get_cv());
901                 break;
902         }
903         default:
904                 break;
905         }
906
907         return updated;
908 }
909
910 /**
911  * @brief Process the current action for release sequence fixup activity
912  *
913  * Performs model-checker release sequence fixups for the current action,
914  * forcing a single pending release sequence to break (with a given, potential
915  * "loose" write) or to complete (i.e., synchronize). If a pending release
916  * sequence forms a complete release sequence, then we must perform the fixup
917  * synchronization, mo_graph additions, etc.
918  *
919  * @param curr The current action; must be a release sequence fixup action
920  * @param work_queue The work queue to which to add work items as they are
921  * generated
922  */
923 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
924 {
925         const ModelAction *write = curr->get_node()->get_relseq_break();
926         struct release_seq *sequence = pending_rel_seqs->back();
927         pending_rel_seqs->pop_back();
928         ASSERT(sequence);
929         ModelAction *acquire = sequence->acquire;
930         const ModelAction *rf = sequence->rf;
931         const ModelAction *release = sequence->release;
932         ASSERT(acquire);
933         ASSERT(release);
934         ASSERT(rf);
935         ASSERT(release->same_thread(rf));
936
937         if (write == NULL) {
938                 /**
939                  * @todo Forcing a synchronization requires that we set
940                  * modification order constraints. For instance, we can't allow
941                  * a fixup sequence in which two separate read-acquire
942                  * operations read from the same sequence, where the first one
943                  * synchronizes and the other doesn't. Essentially, we can't
944                  * allow any writes to insert themselves between 'release' and
945                  * 'rf'
946                  */
947
948                 /* Must synchronize */
949                 if (!acquire->synchronize_with(release)) {
950                         set_bad_synchronization();
951                         return;
952                 }
953                 /* Re-check all pending release sequences */
954                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
955                 /* Re-check act for mo_graph edges */
956                 work_queue->push_back(MOEdgeWorkEntry(acquire));
957
958                 /* propagate synchronization to later actions */
959                 action_list_t::reverse_iterator rit = action_trace->rbegin();
960                 for (; (*rit) != acquire; rit++) {
961                         ModelAction *propagate = *rit;
962                         if (acquire->happens_before(propagate)) {
963                                 propagate->synchronize_with(acquire);
964                                 /* Re-check 'propagate' for mo_graph edges */
965                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
966                         }
967                 }
968         } else {
969                 /* Break release sequence with new edges:
970                  *   release --mo--> write --mo--> rf */
971                 mo_graph->addEdge(release, write);
972                 mo_graph->addEdge(write, rf);
973         }
974
975         /* See if we have realized a data race */
976         checkDataRaces();
977 }
978
979 /**
980  * Initialize the current action by performing one or more of the following
981  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
982  * in the NodeStack, manipulating backtracking sets, allocating and
983  * initializing clock vectors, and computing the promises to fulfill.
984  *
985  * @param curr The current action, as passed from the user context; may be
986  * freed/invalidated after the execution of this function, with a different
987  * action "returned" its place (pass-by-reference)
988  * @return True if curr is a newly-explored action; false otherwise
989  */
990 bool ModelChecker::initialize_curr_action(ModelAction **curr)
991 {
992         ModelAction *newcurr;
993
994         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
995                 newcurr = process_rmw(*curr);
996                 delete *curr;
997
998                 if (newcurr->is_rmw())
999                         compute_promises(newcurr);
1000
1001                 *curr = newcurr;
1002                 return false;
1003         }
1004
1005         (*curr)->set_seq_number(get_next_seq_num());
1006
1007         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1008         if (newcurr) {
1009                 /* First restore type and order in case of RMW operation */
1010                 if ((*curr)->is_rmwr())
1011                         newcurr->copy_typeandorder(*curr);
1012
1013                 ASSERT((*curr)->get_location() == newcurr->get_location());
1014                 newcurr->copy_from_new(*curr);
1015
1016                 /* Discard duplicate ModelAction; use action from NodeStack */
1017                 delete *curr;
1018
1019                 /* Always compute new clock vector */
1020                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1021
1022                 *curr = newcurr;
1023                 return false; /* Action was explored previously */
1024         } else {
1025                 newcurr = *curr;
1026
1027                 /* Always compute new clock vector */
1028                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1029
1030                 /* Assign most recent release fence */
1031                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1032
1033                 /*
1034                  * Perform one-time actions when pushing new ModelAction onto
1035                  * NodeStack
1036                  */
1037                 if (newcurr->is_write())
1038                         compute_promises(newcurr);
1039                 else if (newcurr->is_relseq_fixup())
1040                         compute_relseq_breakwrites(newcurr);
1041                 else if (newcurr->is_wait())
1042                         newcurr->get_node()->set_misc_max(2);
1043                 else if (newcurr->is_notify_one()) {
1044                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1045                 }
1046                 return true; /* This was a new ModelAction */
1047         }
1048 }
1049
1050 /**
1051  * @brief Check whether a model action is enabled.
1052  *
1053  * Checks whether a lock or join operation would be successful (i.e., is the
1054  * lock already locked, or is the joined thread already complete). If not, put
1055  * the action in a waiter list.
1056  *
1057  * @param curr is the ModelAction to check whether it is enabled.
1058  * @return a bool that indicates whether the action is enabled.
1059  */
1060 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1061         if (curr->is_lock()) {
1062                 std::mutex * lock = (std::mutex *)curr->get_location();
1063                 struct std::mutex_state * state = lock->get_state();
1064                 if (state->islocked) {
1065                         //Stick the action in the appropriate waiting queue
1066                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1067                         return false;
1068                 }
1069         } else if (curr->get_type() == THREAD_JOIN) {
1070                 Thread *blocking = (Thread *)curr->get_location();
1071                 if (!blocking->is_complete()) {
1072                         blocking->push_wait_list(curr);
1073                         return false;
1074                 }
1075         }
1076
1077         return true;
1078 }
1079
1080 /**
1081  * Stores the ModelAction for the current thread action.  Call this
1082  * immediately before switching from user- to system-context to pass
1083  * data between them.
1084  * @param act The ModelAction created by the user-thread action
1085  */
1086 void ModelChecker::set_current_action(ModelAction *act) {
1087         priv->current_action = act;
1088 }
1089
1090 /**
1091  * This is the heart of the model checker routine. It performs model-checking
1092  * actions corresponding to a given "current action." Among other processes, it
1093  * calculates reads-from relationships, updates synchronization clock vectors,
1094  * forms a memory_order constraints graph, and handles replay/backtrack
1095  * execution when running permutations of previously-observed executions.
1096  *
1097  * @param curr The current action to process
1098  * @return The next Thread that must be executed. May be NULL if ModelChecker
1099  * makes no choice (e.g., according to replay execution, combining RMW actions,
1100  * etc.)
1101  */
1102 Thread * ModelChecker::check_current_action(ModelAction *curr)
1103 {
1104         ASSERT(curr);
1105         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1106
1107         if (!check_action_enabled(curr)) {
1108                 /* Make the execution look like we chose to run this action
1109                  * much later, when a lock/join can succeed */
1110                 get_current_thread()->set_pending(curr);
1111                 scheduler->sleep(get_current_thread());
1112                 return get_next_thread(NULL);
1113         }
1114
1115         bool newly_explored = initialize_curr_action(&curr);
1116
1117         wake_up_sleeping_actions(curr);
1118
1119         /* Add the action to lists before any other model-checking tasks */
1120         if (!second_part_of_rmw)
1121                 add_action_to_lists(curr);
1122
1123         /* Build may_read_from set for newly-created actions */
1124         if (newly_explored && curr->is_read())
1125                 build_reads_from_past(curr);
1126
1127         /* Initialize work_queue with the "current action" work */
1128         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1129         while (!work_queue.empty() && !has_asserted()) {
1130                 WorkQueueEntry work = work_queue.front();
1131                 work_queue.pop_front();
1132
1133                 switch (work.type) {
1134                 case WORK_CHECK_CURR_ACTION: {
1135                         ModelAction *act = work.action;
1136                         bool update = false; /* update this location's release seq's */
1137                         bool update_all = false; /* update all release seq's */
1138
1139                         if (process_thread_action(curr))
1140                                 update_all = true;
1141
1142                         if (act->is_read() && process_read(act, second_part_of_rmw))
1143                                 update = true;
1144
1145                         if (act->is_write() && process_write(act))
1146                                 update = true;
1147
1148                         if (act->is_mutex_op() && process_mutex(act))
1149                                 update_all = true;
1150
1151                         if (act->is_relseq_fixup())
1152                                 process_relseq_fixup(curr, &work_queue);
1153
1154                         if (update_all)
1155                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1156                         else if (update)
1157                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1158                         break;
1159                 }
1160                 case WORK_CHECK_RELEASE_SEQ:
1161                         resolve_release_sequences(work.location, &work_queue);
1162                         break;
1163                 case WORK_CHECK_MO_EDGES: {
1164                         /** @todo Complete verification of work_queue */
1165                         ModelAction *act = work.action;
1166                         bool updated = false;
1167
1168                         if (act->is_read()) {
1169                                 const ModelAction *rf = act->get_reads_from();
1170                                 if (rf != NULL && r_modification_order(act, rf))
1171                                         updated = true;
1172                         }
1173                         if (act->is_write()) {
1174                                 if (w_modification_order(act))
1175                                         updated = true;
1176                         }
1177                         mo_graph->commitChanges();
1178
1179                         if (updated)
1180                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1181                         break;
1182                 }
1183                 default:
1184                         ASSERT(false);
1185                         break;
1186                 }
1187         }
1188
1189         check_curr_backtracking(curr);
1190         set_backtracking(curr);
1191         return get_next_thread(curr);
1192 }
1193
1194 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
1195         Node *currnode = curr->get_node();
1196         Node *parnode = currnode->get_parent();
1197
1198         if ((!parnode->backtrack_empty() ||
1199                          !currnode->misc_empty() ||
1200                          !currnode->read_from_empty() ||
1201                          !currnode->future_value_empty() ||
1202                          !currnode->promise_empty() ||
1203                          !currnode->relseq_break_empty())
1204                         && (!priv->next_backtrack ||
1205                                         *curr > *priv->next_backtrack)) {
1206                 priv->next_backtrack = curr;
1207         }
1208 }
1209
1210 bool ModelChecker::promises_expired() const
1211 {
1212         for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1213                 Promise *promise = (*promises)[promise_index];
1214                 if (promise->get_expiration()<priv->used_sequence_numbers) {
1215                         return true;
1216                 }
1217         }
1218         return false;
1219 }
1220
1221 /**
1222  * This is the strongest feasibility check available.
1223  * @return whether the current trace (partial or complete) must be a prefix of
1224  * a feasible trace.
1225  */
1226 bool ModelChecker::isfeasibleprefix() const
1227 {
1228         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1229 }
1230
1231 /**
1232  * Returns whether the current completed trace is feasible, except for pending
1233  * release sequences.
1234  */
1235 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1236 {
1237         if (DBG_ENABLED() && promises->size() != 0)
1238                 DEBUG("Infeasible: unrevolved promises\n");
1239
1240         return !is_infeasible() && promises->size() == 0;
1241 }
1242
1243 /**
1244  * Check if the current partial trace is infeasible. Does not check any
1245  * end-of-execution flags, which might rule out the execution. Thus, this is
1246  * useful only for ruling an execution as infeasible.
1247  * @return whether the current partial trace is infeasible.
1248  */
1249 bool ModelChecker::is_infeasible() const
1250 {
1251         if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1252                 DEBUG("Infeasible: RMW violation\n");
1253
1254         return mo_graph->checkForRMWViolation() || is_infeasible_ignoreRMW();
1255 }
1256
1257 /**
1258  * Check If the current partial trace is infeasible, while ignoring
1259  * infeasibility related to 2 RMW's reading from the same store. It does not
1260  * check end-of-execution feasibility.
1261  * @see ModelChecker::is_infeasible
1262  * @return whether the current partial trace is infeasible, ignoring multiple
1263  * RMWs reading from the same store.
1264  * */
1265 bool ModelChecker::is_infeasible_ignoreRMW() const
1266 {
1267         if (DBG_ENABLED()) {
1268                 if (mo_graph->checkForCycles())
1269                         DEBUG("Infeasible: modification order cycles\n");
1270                 if (priv->failed_promise)
1271                         DEBUG("Infeasible: failed promise\n");
1272                 if (priv->too_many_reads)
1273                         DEBUG("Infeasible: too many reads\n");
1274                 if (priv->bad_synchronization)
1275                         DEBUG("Infeasible: bad synchronization ordering\n");
1276                 if (promises_expired())
1277                         DEBUG("Infeasible: promises expired\n");
1278         }
1279         return mo_graph->checkForCycles() || priv->failed_promise ||
1280                 priv->too_many_reads || priv->bad_synchronization ||
1281                 promises_expired();
1282 }
1283
1284 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1285 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1286         ModelAction *lastread = get_last_action(act->get_tid());
1287         lastread->process_rmw(act);
1288         if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1289                 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1290                 mo_graph->commitChanges();
1291         }
1292         return lastread;
1293 }
1294
1295 /**
1296  * Checks whether a thread has read from the same write for too many times
1297  * without seeing the effects of a later write.
1298  *
1299  * Basic idea:
1300  * 1) there must a different write that we could read from that would satisfy the modification order,
1301  * 2) we must have read from the same value in excess of maxreads times, and
1302  * 3) that other write must have been in the reads_from set for maxreads times.
1303  *
1304  * If so, we decide that the execution is no longer feasible.
1305  */
1306 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1307         if (params.maxreads != 0) {
1308
1309                 if (curr->get_node()->get_read_from_size() <= 1)
1310                         return;
1311                 //Must make sure that execution is currently feasible...  We could
1312                 //accidentally clear by rolling back
1313                 if (is_infeasible())
1314                         return;
1315                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1316                 int tid = id_to_int(curr->get_tid());
1317
1318                 /* Skip checks */
1319                 if ((int)thrd_lists->size() <= tid)
1320                         return;
1321                 action_list_t *list = &(*thrd_lists)[tid];
1322
1323                 action_list_t::reverse_iterator rit = list->rbegin();
1324                 /* Skip past curr */
1325                 for (; (*rit) != curr; rit++)
1326                         ;
1327                 /* go past curr now */
1328                 rit++;
1329
1330                 action_list_t::reverse_iterator ritcopy = rit;
1331                 //See if we have enough reads from the same value
1332                 int count = 0;
1333                 for (; count < params.maxreads; rit++,count++) {
1334                         if (rit==list->rend())
1335                                 return;
1336                         ModelAction *act = *rit;
1337                         if (!act->is_read())
1338                                 return;
1339
1340                         if (act->get_reads_from() != rf)
1341                                 return;
1342                         if (act->get_node()->get_read_from_size() <= 1)
1343                                 return;
1344                 }
1345                 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1346                         //Get write
1347                         const ModelAction * write = curr->get_node()->get_read_from_at(i);
1348
1349                         //Need a different write
1350                         if (write==rf)
1351                                 continue;
1352
1353                         /* Test to see whether this is a feasible write to read from*/
1354                         mo_graph->startChanges();
1355                         r_modification_order(curr, write);
1356                         bool feasiblereadfrom = !is_infeasible();
1357                         mo_graph->rollbackChanges();
1358
1359                         if (!feasiblereadfrom)
1360                                 continue;
1361                         rit = ritcopy;
1362
1363                         bool feasiblewrite = true;
1364                         //new we need to see if this write works for everyone
1365
1366                         for (int loop = count; loop>0; loop--,rit++) {
1367                                 ModelAction *act=*rit;
1368                                 bool foundvalue = false;
1369                                 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1370                                         if (act->get_node()->get_read_from_at(j)==write) {
1371                                                 foundvalue = true;
1372                                                 break;
1373                                         }
1374                                 }
1375                                 if (!foundvalue) {
1376                                         feasiblewrite = false;
1377                                         break;
1378                                 }
1379                         }
1380                         if (feasiblewrite) {
1381                                 priv->too_many_reads = true;
1382                                 return;
1383                         }
1384                 }
1385         }
1386 }
1387
1388 /**
1389  * Updates the mo_graph with the constraints imposed from the current
1390  * read.
1391  *
1392  * Basic idea is the following: Go through each other thread and find
1393  * the lastest action that happened before our read.  Two cases:
1394  *
1395  * (1) The action is a write => that write must either occur before
1396  * the write we read from or be the write we read from.
1397  *
1398  * (2) The action is a read => the write that that action read from
1399  * must occur before the write we read from or be the same write.
1400  *
1401  * @param curr The current action. Must be a read.
1402  * @param rf The action that curr reads from. Must be a write.
1403  * @return True if modification order edges were added; false otherwise
1404  */
1405 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1406 {
1407         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1408         unsigned int i;
1409         bool added = false;
1410         ASSERT(curr->is_read());
1411
1412         /* Last SC fence in the current thread */
1413         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1414
1415         /* Iterate over all threads */
1416         for (i = 0; i < thrd_lists->size(); i++) {
1417                 /* Last SC fence in thread i */
1418                 ModelAction *last_sc_fence_thread_local = NULL;
1419                 if (int_to_id((int)i) != curr->get_tid())
1420                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1421
1422                 /* Last SC fence in thread i, before last SC fence in current thread */
1423                 ModelAction *last_sc_fence_thread_before = NULL;
1424                 if (last_sc_fence_local)
1425                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1426
1427                 /* Iterate over actions in thread, starting from most recent */
1428                 action_list_t *list = &(*thrd_lists)[i];
1429                 action_list_t::reverse_iterator rit;
1430                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1431                         ModelAction *act = *rit;
1432
1433                         if (act->is_write() && act != rf && act != curr) {
1434                                 /* C++, Section 29.3 statement 5 */
1435                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1436                                                 *act < *last_sc_fence_thread_local) {
1437                                         mo_graph->addEdge(act, rf);
1438                                         added = true;
1439                                 }
1440                                 /* C++, Section 29.3 statement 4 */
1441                                 else if (act->is_seqcst() && last_sc_fence_local &&
1442                                                 *act < *last_sc_fence_local) {
1443                                         mo_graph->addEdge(act, rf);
1444                                         added = true;
1445                                 }
1446                                 /* C++, Section 29.3 statement 6 */
1447                                 else if (last_sc_fence_thread_before &&
1448                                                 *act < *last_sc_fence_thread_before) {
1449                                         mo_graph->addEdge(act, rf);
1450                                         added = true;
1451                                 }
1452                         }
1453
1454                         /*
1455                          * Include at most one act per-thread that "happens
1456                          * before" curr. Don't consider reflexively.
1457                          */
1458                         if (act->happens_before(curr) && act != curr) {
1459                                 if (act->is_write()) {
1460                                         if (rf != act) {
1461                                                 mo_graph->addEdge(act, rf);
1462                                                 added = true;
1463                                         }
1464                                 } else {
1465                                         const ModelAction *prevreadfrom = act->get_reads_from();
1466                                         //if the previous read is unresolved, keep going...
1467                                         if (prevreadfrom == NULL)
1468                                                 continue;
1469
1470                                         if (rf != prevreadfrom) {
1471                                                 mo_graph->addEdge(prevreadfrom, rf);
1472                                                 added = true;
1473                                         }
1474                                 }
1475                                 break;
1476                         }
1477                 }
1478         }
1479
1480         return added;
1481 }
1482
1483 /** This method fixes up the modification order when we resolve a
1484  *  promises.  The basic problem is that actions that occur after the
1485  *  read curr could not property add items to the modification order
1486  *  for our read.
1487  *
1488  *  So for each thread, we find the earliest item that happens after
1489  *  the read curr.  This is the item we have to fix up with additional
1490  *  constraints.  If that action is write, we add a MO edge between
1491  *  the Action rf and that action.  If the action is a read, we add a
1492  *  MO edge between the Action rf, and whatever the read accessed.
1493  *
1494  * @param curr is the read ModelAction that we are fixing up MO edges for.
1495  * @param rf is the write ModelAction that curr reads from.
1496  *
1497  */
1498 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1499 {
1500         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1501         unsigned int i;
1502         ASSERT(curr->is_read());
1503
1504         /* Iterate over all threads */
1505         for (i = 0; i < thrd_lists->size(); i++) {
1506                 /* Iterate over actions in thread, starting from most recent */
1507                 action_list_t *list = &(*thrd_lists)[i];
1508                 action_list_t::reverse_iterator rit;
1509                 ModelAction *lastact = NULL;
1510
1511                 /* Find last action that happens after curr that is either not curr or a rmw */
1512                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1513                         ModelAction *act = *rit;
1514                         if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1515                                 lastact = act;
1516                         } else
1517                                 break;
1518                 }
1519
1520                         /* Include at most one act per-thread that "happens before" curr */
1521                 if (lastact != NULL) {
1522                         if (lastact==curr) {
1523                                 //Case 1: The resolved read is a RMW, and we need to make sure
1524                                 //that the write portion of the RMW mod order after rf
1525
1526                                 mo_graph->addEdge(rf, lastact);
1527                         } else if (lastact->is_read()) {
1528                                 //Case 2: The resolved read is a normal read and the next
1529                                 //operation is a read, and we need to make sure the value read
1530                                 //is mod ordered after rf
1531
1532                                 const ModelAction *postreadfrom = lastact->get_reads_from();
1533                                 if (postreadfrom != NULL&&rf != postreadfrom)
1534                                         mo_graph->addEdge(rf, postreadfrom);
1535                         } else {
1536                                 //Case 3: The resolved read is a normal read and the next
1537                                 //operation is a write, and we need to make sure that the
1538                                 //write is mod ordered after rf
1539                                 if (lastact!=rf)
1540                                         mo_graph->addEdge(rf, lastact);
1541                         }
1542                         break;
1543                 }
1544         }
1545 }
1546
1547 /**
1548  * Updates the mo_graph with the constraints imposed from the current write.
1549  *
1550  * Basic idea is the following: Go through each other thread and find
1551  * the lastest action that happened before our write.  Two cases:
1552  *
1553  * (1) The action is a write => that write must occur before
1554  * the current write
1555  *
1556  * (2) The action is a read => the write that that action read from
1557  * must occur before the current write.
1558  *
1559  * This method also handles two other issues:
1560  *
1561  * (I) Sequential Consistency: Making sure that if the current write is
1562  * seq_cst, that it occurs after the previous seq_cst write.
1563  *
1564  * (II) Sending the write back to non-synchronizing reads.
1565  *
1566  * @param curr The current action. Must be a write.
1567  * @return True if modification order edges were added; false otherwise
1568  */
1569 bool ModelChecker::w_modification_order(ModelAction *curr)
1570 {
1571         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1572         unsigned int i;
1573         bool added = false;
1574         ASSERT(curr->is_write());
1575
1576         if (curr->is_seqcst()) {
1577                 /* We have to at least see the last sequentially consistent write,
1578                          so we are initialized. */
1579                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1580                 if (last_seq_cst != NULL) {
1581                         mo_graph->addEdge(last_seq_cst, curr);
1582                         added = true;
1583                 }
1584         }
1585
1586         /* Last SC fence in the current thread */
1587         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1588
1589         /* Iterate over all threads */
1590         for (i = 0; i < thrd_lists->size(); i++) {
1591                 /* Last SC fence in thread i, before last SC fence in current thread */
1592                 ModelAction *last_sc_fence_thread_before = NULL;
1593                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1594                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1595
1596                 /* Iterate over actions in thread, starting from most recent */
1597                 action_list_t *list = &(*thrd_lists)[i];
1598                 action_list_t::reverse_iterator rit;
1599                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1600                         ModelAction *act = *rit;
1601                         if (act == curr) {
1602                                 /*
1603                                  * 1) If RMW and it actually read from something, then we
1604                                  * already have all relevant edges, so just skip to next
1605                                  * thread.
1606                                  *
1607                                  * 2) If RMW and it didn't read from anything, we should
1608                                  * whatever edge we can get to speed up convergence.
1609                                  *
1610                                  * 3) If normal write, we need to look at earlier actions, so
1611                                  * continue processing list.
1612                                  */
1613                                 if (curr->is_rmw()) {
1614                                         if (curr->get_reads_from()!=NULL)
1615                                                 break;
1616                                         else
1617                                                 continue;
1618                                 } else
1619                                         continue;
1620                         }
1621
1622                         /* C++, Section 29.3 statement 7 */
1623                         if (last_sc_fence_thread_before && act->is_write() &&
1624                                         *act < *last_sc_fence_thread_before) {
1625                                 mo_graph->addEdge(act, curr);
1626                                 added = true;
1627                         }
1628
1629                         /*
1630                          * Include at most one act per-thread that "happens
1631                          * before" curr
1632                          */
1633                         if (act->happens_before(curr)) {
1634                                 /*
1635                                  * Note: if act is RMW, just add edge:
1636                                  *   act --mo--> curr
1637                                  * The following edge should be handled elsewhere:
1638                                  *   readfrom(act) --mo--> act
1639                                  */
1640                                 if (act->is_write())
1641                                         mo_graph->addEdge(act, curr);
1642                                 else if (act->is_read()) {
1643                                         //if previous read accessed a null, just keep going
1644                                         if (act->get_reads_from() == NULL)
1645                                                 continue;
1646                                         mo_graph->addEdge(act->get_reads_from(), curr);
1647                                 }
1648                                 added = true;
1649                                 break;
1650                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1651                                                      !act->same_thread(curr)) {
1652                                 /* We have an action that:
1653                                    (1) did not happen before us
1654                                    (2) is a read and we are a write
1655                                    (3) cannot synchronize with us
1656                                    (4) is in a different thread
1657                                    =>
1658                                    that read could potentially read from our write.  Note that
1659                                    these checks are overly conservative at this point, we'll
1660                                    do more checks before actually removing the
1661                                    pendingfuturevalue.
1662
1663                                  */
1664                                 if (thin_air_constraint_may_allow(curr, act)) {
1665                                         if (!is_infeasible() ||
1666                                                         (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && !is_infeasible_ignoreRMW())) {
1667                                                 struct PendingFutureValue pfv = {curr,act};
1668                                                 futurevalues->push_back(pfv);
1669                                         }
1670                                 }
1671                         }
1672                 }
1673         }
1674
1675         return added;
1676 }
1677
1678 /** Arbitrary reads from the future are not allowed.  Section 29.3
1679  * part 9 places some constraints.  This method checks one result of constraint
1680  * constraint.  Others require compiler support. */
1681 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1682         if (!writer->is_rmw())
1683                 return true;
1684
1685         if (!reader->is_rmw())
1686                 return true;
1687
1688         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1689                 if (search == reader)
1690                         return false;
1691                 if (search->get_tid() == reader->get_tid() &&
1692                                 search->happens_before(reader))
1693                         break;
1694         }
1695
1696         return true;
1697 }
1698
1699 /**
1700  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1701  * some constraints. This method checks one the following constraint (others
1702  * require compiler support):
1703  *
1704  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1705  */
1706 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1707 {
1708         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1709         unsigned int i;
1710         /* Iterate over all threads */
1711         for (i = 0; i < thrd_lists->size(); i++) {
1712                 const ModelAction *write_after_read = NULL;
1713
1714                 /* Iterate over actions in thread, starting from most recent */
1715                 action_list_t *list = &(*thrd_lists)[i];
1716                 action_list_t::reverse_iterator rit;
1717                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1718                         ModelAction *act = *rit;
1719
1720                         if (!reader->happens_before(act))
1721                                 break;
1722                         else if (act->is_write())
1723                                 write_after_read = act;
1724                         else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1725                                 write_after_read = act->get_reads_from();
1726                         }
1727                 }
1728
1729                 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1730                         return false;
1731         }
1732         return true;
1733 }
1734
1735 /**
1736  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1737  * The ModelAction under consideration is expected to be taking part in
1738  * release/acquire synchronization as an object of the "reads from" relation.
1739  * Note that this can only provide release sequence support for RMW chains
1740  * which do not read from the future, as those actions cannot be traced until
1741  * their "promise" is fulfilled. Similarly, we may not even establish the
1742  * presence of a release sequence with certainty, as some modification order
1743  * constraints may be decided further in the future. Thus, this function
1744  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1745  * and a boolean representing certainty.
1746  *
1747  * @param rf The action that might be part of a release sequence. Must be a
1748  * write.
1749  * @param release_heads A pass-by-reference style return parameter. After
1750  * execution of this function, release_heads will contain the heads of all the
1751  * relevant release sequences, if any exists with certainty
1752  * @param pending A pass-by-reference style return parameter which is only used
1753  * when returning false (i.e., uncertain). Returns most information regarding
1754  * an uncertain release sequence, including any write operations that might
1755  * break the sequence.
1756  * @return true, if the ModelChecker is certain that release_heads is complete;
1757  * false otherwise
1758  */
1759 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1760                 rel_heads_list_t *release_heads,
1761                 struct release_seq *pending) const
1762 {
1763         /* Only check for release sequences if there are no cycles */
1764         if (mo_graph->checkForCycles())
1765                 return false;
1766
1767         while (rf) {
1768                 ASSERT(rf->is_write());
1769
1770                 if (rf->is_release())
1771                         release_heads->push_back(rf);
1772                 if (!rf->is_rmw())
1773                         break; /* End of RMW chain */
1774
1775                 /** @todo Need to be smarter here...  In the linux lock
1776                  * example, this will run to the beginning of the program for
1777                  * every acquire. */
1778                 /** @todo The way to be smarter here is to keep going until 1
1779                  * thread has a release preceded by an acquire and you've seen
1780                  *       both. */
1781
1782                 /* acq_rel RMW is a sufficient stopping condition */
1783                 if (rf->is_acquire() && rf->is_release())
1784                         return true; /* complete */
1785
1786                 rf = rf->get_reads_from();
1787         };
1788         if (!rf) {
1789                 /* read from future: need to settle this later */
1790                 pending->rf = NULL;
1791                 return false; /* incomplete */
1792         }
1793
1794         if (rf->is_release())
1795                 return true; /* complete */
1796
1797         /* else relaxed write; check modification order for contiguous subsequence
1798          * -> rf must be same thread as release */
1799         int tid = id_to_int(rf->get_tid());
1800         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1801         action_list_t *list = &(*thrd_lists)[tid];
1802         action_list_t::const_reverse_iterator rit;
1803
1804         /* Find rf in the thread list */
1805         rit = std::find(list->rbegin(), list->rend(), rf);
1806         ASSERT(rit != list->rend());
1807
1808         /* Find the last write/release */
1809         for (; rit != list->rend(); rit++)
1810                 if ((*rit)->is_release())
1811                         break;
1812         if (rit == list->rend()) {
1813                 /* No write-release in this thread */
1814                 return true; /* complete */
1815         }
1816         ModelAction *release = *rit;
1817
1818         ASSERT(rf->same_thread(release));
1819
1820         pending->writes.clear();
1821
1822         bool certain = true;
1823         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1824                 if (id_to_int(rf->get_tid()) == (int)i)
1825                         continue;
1826                 list = &(*thrd_lists)[i];
1827
1828                 /* Can we ensure no future writes from this thread may break
1829                  * the release seq? */
1830                 bool future_ordered = false;
1831
1832                 ModelAction *last = get_last_action(int_to_id(i));
1833                 Thread *th = get_thread(int_to_id(i));
1834                 if ((last && rf->happens_before(last)) ||
1835                                 !is_enabled(th) ||
1836                                 th->is_complete())
1837                         future_ordered = true;
1838
1839                 ASSERT(!th->is_model_thread() || future_ordered);
1840
1841                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1842                         const ModelAction *act = *rit;
1843                         /* Reach synchronization -> this thread is complete */
1844                         if (act->happens_before(release))
1845                                 break;
1846                         if (rf->happens_before(act)) {
1847                                 future_ordered = true;
1848                                 continue;
1849                         }
1850
1851                         /* Only non-RMW writes can break release sequences */
1852                         if (!act->is_write() || act->is_rmw())
1853                                 continue;
1854
1855                         /* Check modification order */
1856                         if (mo_graph->checkReachable(rf, act)) {
1857                                 /* rf --mo--> act */
1858                                 future_ordered = true;
1859                                 continue;
1860                         }
1861                         if (mo_graph->checkReachable(act, release))
1862                                 /* act --mo--> release */
1863                                 break;
1864                         if (mo_graph->checkReachable(release, act) &&
1865                                       mo_graph->checkReachable(act, rf)) {
1866                                 /* release --mo-> act --mo--> rf */
1867                                 return true; /* complete */
1868                         }
1869                         /* act may break release sequence */
1870                         pending->writes.push_back(act);
1871                         certain = false;
1872                 }
1873                 if (!future_ordered)
1874                         certain = false; /* This thread is uncertain */
1875         }
1876
1877         if (certain) {
1878                 release_heads->push_back(release);
1879                 pending->writes.clear();
1880         } else {
1881                 pending->release = release;
1882                 pending->rf = rf;
1883         }
1884         return certain;
1885 }
1886
1887 /**
1888  * A public interface for getting the release sequence head(s) with which a
1889  * given ModelAction must synchronize. This function only returns a non-empty
1890  * result when it can locate a release sequence head with certainty. Otherwise,
1891  * it may mark the internal state of the ModelChecker so that it will handle
1892  * the release sequence at a later time, causing @a act to update its
1893  * synchronization at some later point in execution.
1894  * @param act The 'acquire' action that may read from a release sequence
1895  * @param release_heads A pass-by-reference return parameter. Will be filled
1896  * with the head(s) of the release sequence(s), if they exists with certainty.
1897  * @see ModelChecker::release_seq_heads
1898  */
1899 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1900 {
1901         const ModelAction *rf = act->get_reads_from();
1902         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1903         sequence->acquire = act;
1904
1905         if (!release_seq_heads(rf, release_heads, sequence)) {
1906                 /* add act to 'lazy checking' list */
1907                 pending_rel_seqs->push_back(sequence);
1908         } else {
1909                 snapshot_free(sequence);
1910         }
1911 }
1912
1913 /**
1914  * Attempt to resolve all stashed operations that might synchronize with a
1915  * release sequence for a given location. This implements the "lazy" portion of
1916  * determining whether or not a release sequence was contiguous, since not all
1917  * modification order information is present at the time an action occurs.
1918  *
1919  * @param location The location/object that should be checked for release
1920  * sequence resolutions. A NULL value means to check all locations.
1921  * @param work_queue The work queue to which to add work items as they are
1922  * generated
1923  * @return True if any updates occurred (new synchronization, new mo_graph
1924  * edges)
1925  */
1926 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1927 {
1928         bool updated = false;
1929         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1930         while (it != pending_rel_seqs->end()) {
1931                 struct release_seq *pending = *it;
1932                 ModelAction *act = pending->acquire;
1933
1934                 /* Only resolve sequences on the given location, if provided */
1935                 if (location && act->get_location() != location) {
1936                         it++;
1937                         continue;
1938                 }
1939
1940                 const ModelAction *rf = act->get_reads_from();
1941                 rel_heads_list_t release_heads;
1942                 bool complete;
1943                 complete = release_seq_heads(rf, &release_heads, pending);
1944                 for (unsigned int i = 0; i < release_heads.size(); i++) {
1945                         if (!act->has_synchronized_with(release_heads[i])) {
1946                                 if (act->synchronize_with(release_heads[i]))
1947                                         updated = true;
1948                                 else
1949                                         set_bad_synchronization();
1950                         }
1951                 }
1952
1953                 if (updated) {
1954                         /* Re-check all pending release sequences */
1955                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1956                         /* Re-check act for mo_graph edges */
1957                         work_queue->push_back(MOEdgeWorkEntry(act));
1958
1959                         /* propagate synchronization to later actions */
1960                         action_list_t::reverse_iterator rit = action_trace->rbegin();
1961                         for (; (*rit) != act; rit++) {
1962                                 ModelAction *propagate = *rit;
1963                                 if (act->happens_before(propagate)) {
1964                                         propagate->synchronize_with(act);
1965                                         /* Re-check 'propagate' for mo_graph edges */
1966                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
1967                                 }
1968                         }
1969                 }
1970                 if (complete) {
1971                         it = pending_rel_seqs->erase(it);
1972                         snapshot_free(pending);
1973                 } else {
1974                         it++;
1975                 }
1976         }
1977
1978         // If we resolved promises or data races, see if we have realized a data race.
1979         checkDataRaces();
1980
1981         return updated;
1982 }
1983
1984 /**
1985  * Performs various bookkeeping operations for the current ModelAction. For
1986  * instance, adds action to the per-object, per-thread action vector and to the
1987  * action trace list of all thread actions.
1988  *
1989  * @param act is the ModelAction to add.
1990  */
1991 void ModelChecker::add_action_to_lists(ModelAction *act)
1992 {
1993         int tid = id_to_int(act->get_tid());
1994         action_trace->push_back(act);
1995
1996         get_safe_ptr_action(obj_map, act->get_location())->push_back(act);
1997
1998         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
1999         if (tid >= (int)vec->size())
2000                 vec->resize(priv->next_thread_id);
2001         (*vec)[tid].push_back(act);
2002
2003         if ((int)thrd_last_action->size() <= tid)
2004                 thrd_last_action->resize(get_num_threads());
2005         (*thrd_last_action)[tid] = act;
2006
2007         if (act->is_fence() && act->is_release()) {
2008                 if ((int)thrd_last_fence_release->size() <= tid)
2009                         thrd_last_fence_release->resize(get_num_threads());
2010                 (*thrd_last_fence_release)[tid] = act;
2011         }
2012
2013         if (act->is_wait()) {
2014                 void *mutex_loc=(void *) act->get_value();
2015                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2016
2017                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2018                 if (tid >= (int)vec->size())
2019                         vec->resize(priv->next_thread_id);
2020                 (*vec)[tid].push_back(act);
2021         }
2022 }
2023
2024 /**
2025  * @brief Get the last action performed by a particular Thread
2026  * @param tid The thread ID of the Thread in question
2027  * @return The last action in the thread
2028  */
2029 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2030 {
2031         int threadid = id_to_int(tid);
2032         if (threadid < (int)thrd_last_action->size())
2033                 return (*thrd_last_action)[id_to_int(tid)];
2034         else
2035                 return NULL;
2036 }
2037
2038 /**
2039  * @brief Get the last fence release performed by a particular Thread
2040  * @param tid The thread ID of the Thread in question
2041  * @return The last fence release in the thread, if one exists; NULL otherwise
2042  */
2043 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2044 {
2045         int threadid = id_to_int(tid);
2046         if (threadid < (int)thrd_last_fence_release->size())
2047                 return (*thrd_last_fence_release)[id_to_int(tid)];
2048         else
2049                 return NULL;
2050 }
2051
2052 /**
2053  * Gets the last memory_order_seq_cst write (in the total global sequence)
2054  * performed on a particular object (i.e., memory location), not including the
2055  * current action.
2056  * @param curr The current ModelAction; also denotes the object location to
2057  * check
2058  * @return The last seq_cst write
2059  */
2060 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2061 {
2062         void *location = curr->get_location();
2063         action_list_t *list = get_safe_ptr_action(obj_map, location);
2064         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2065         action_list_t::reverse_iterator rit;
2066         for (rit = list->rbegin(); rit != list->rend(); rit++)
2067                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2068                         return *rit;
2069         return NULL;
2070 }
2071
2072 /**
2073  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2074  * performed in a particular thread, prior to a particular fence.
2075  * @param tid The ID of the thread to check
2076  * @param before_fence The fence from which to begin the search; if NULL, then
2077  * search for the most recent fence in the thread.
2078  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2079  */
2080 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2081 {
2082         /* All fences should have NULL location */
2083         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2084         action_list_t::reverse_iterator rit = list->rbegin();
2085
2086         if (before_fence) {
2087                 for (; rit != list->rend(); rit++)
2088                         if (*rit == before_fence)
2089                                 break;
2090
2091                 ASSERT(*rit == before_fence);
2092                 rit++;
2093         }
2094
2095         for (; rit != list->rend(); rit++)
2096                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2097                         return *rit;
2098         return NULL;
2099 }
2100
2101 /**
2102  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2103  * location). This function identifies the mutex according to the current
2104  * action, which is presumed to perform on the same mutex.
2105  * @param curr The current ModelAction; also denotes the object location to
2106  * check
2107  * @return The last unlock operation
2108  */
2109 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2110 {
2111         void *location = curr->get_location();
2112         action_list_t *list = get_safe_ptr_action(obj_map, location);
2113         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2114         action_list_t::reverse_iterator rit;
2115         for (rit = list->rbegin(); rit != list->rend(); rit++)
2116                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2117                         return *rit;
2118         return NULL;
2119 }
2120
2121 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2122 {
2123         ModelAction *parent = get_last_action(tid);
2124         if (!parent)
2125                 parent = get_thread(tid)->get_creation();
2126         return parent;
2127 }
2128
2129 /**
2130  * Returns the clock vector for a given thread.
2131  * @param tid The thread whose clock vector we want
2132  * @return Desired clock vector
2133  */
2134 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2135 {
2136         return get_parent_action(tid)->get_cv();
2137 }
2138
2139 /**
2140  * Resolve a set of Promises with a current write. The set is provided in the
2141  * Node corresponding to @a write.
2142  * @param write The ModelAction that is fulfilling Promises
2143  * @return True if promises were resolved; false otherwise
2144  */
2145 bool ModelChecker::resolve_promises(ModelAction *write)
2146 {
2147         bool resolved = false;
2148         std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2149
2150         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2151                 Promise *promise = (*promises)[promise_index];
2152                 if (write->get_node()->get_promise(i)) {
2153                         ModelAction *read = promise->get_action();
2154                         if (read->is_rmw()) {
2155                                 mo_graph->addRMWEdge(write, read);
2156                         }
2157                         read->read_from(write);
2158                         //First fix up the modification order for actions that happened
2159                         //before the read
2160                         r_modification_order(read, write);
2161                         //Next fix up the modification order for actions that happened
2162                         //after the read.
2163                         post_r_modification_order(read, write);
2164                         //Make sure the promise's value matches the write's value
2165                         ASSERT(promise->get_value() == write->get_value());
2166                         delete(promise);
2167
2168                         promises->erase(promises->begin() + promise_index);
2169                         threads_to_check.push_back(read->get_tid());
2170
2171                         resolved = true;
2172                 } else
2173                         promise_index++;
2174         }
2175
2176         //Check whether reading these writes has made threads unable to
2177         //resolve promises
2178
2179         for(unsigned int i=0;i<threads_to_check.size();i++)
2180                 mo_check_promises(threads_to_check[i], write);
2181
2182         return resolved;
2183 }
2184
2185 /**
2186  * Compute the set of promises that could potentially be satisfied by this
2187  * action. Note that the set computation actually appears in the Node, not in
2188  * ModelChecker.
2189  * @param curr The ModelAction that may satisfy promises
2190  */
2191 void ModelChecker::compute_promises(ModelAction *curr)
2192 {
2193         for (unsigned int i = 0; i < promises->size(); i++) {
2194                 Promise *promise = (*promises)[i];
2195                 const ModelAction *act = promise->get_action();
2196                 if (!act->happens_before(curr) &&
2197                                 act->is_read() &&
2198                                 !act->could_synchronize_with(curr) &&
2199                                 !act->same_thread(curr) &&
2200                                 act->get_location() == curr->get_location() &&
2201                                 promise->get_value() == curr->get_value()) {
2202                         curr->get_node()->set_promise(i, act->is_rmw());
2203                 }
2204         }
2205 }
2206
2207 /** Checks promises in response to change in ClockVector Threads. */
2208 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2209 {
2210         for (unsigned int i = 0; i < promises->size(); i++) {
2211                 Promise *promise = (*promises)[i];
2212                 const ModelAction *act = promise->get_action();
2213                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2214                                 merge_cv->synchronized_since(act)) {
2215                         if (promise->increment_threads(tid)) {
2216                                 //Promise has failed
2217                                 priv->failed_promise = true;
2218                                 return;
2219                         }
2220                 }
2221         }
2222 }
2223
2224 void ModelChecker::check_promises_thread_disabled() {
2225         for (unsigned int i = 0; i < promises->size(); i++) {
2226                 Promise *promise = (*promises)[i];
2227                 if (promise->check_promise()) {
2228                         priv->failed_promise = true;
2229                         return;
2230                 }
2231         }
2232 }
2233
2234 /** Checks promises in response to addition to modification order for threads.
2235  * Definitions:
2236  * pthread is the thread that performed the read that created the promise
2237  *
2238  * pread is the read that created the promise
2239  *
2240  * pwrite is either the first write to same location as pread by
2241  * pthread that is sequenced after pread or the value read by the
2242  * first read to the same lcoation as pread by pthread that is
2243  * sequenced after pread..
2244  *
2245  *      1. If tid=pthread, then we check what other threads are reachable
2246  * through the mode order starting with pwrite.  Those threads cannot
2247  * perform a write that will resolve the promise due to modification
2248  * order constraints.
2249  *
2250  * 2. If the tid is not pthread, we check whether pwrite can reach the
2251  * action write through the modification order.  If so, that thread
2252  * cannot perform a future write that will resolve the promise due to
2253  * modificatin order constraints.
2254  *
2255  *      @parem tid The thread that either read from the model action
2256  *      write, or actually did the model action write.
2257  *
2258  *      @parem write The ModelAction representing the relevant write.
2259  */
2260
2261 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
2262         void * location = write->get_location();
2263         for (unsigned int i = 0; i < promises->size(); i++) {
2264                 Promise *promise = (*promises)[i];
2265                 const ModelAction *act = promise->get_action();
2266
2267                 //Is this promise on the same location?
2268                 if ( act->get_location() != location )
2269                         continue;
2270
2271                 //same thread as the promise
2272                 if ( act->get_tid()==tid ) {
2273
2274                         //do we have a pwrite for the promise, if not, set it
2275                         if (promise->get_write() == NULL ) {
2276                                 promise->set_write(write);
2277                                 //The pwrite cannot happen before the promise
2278                                 if (write->happens_before(act) && (write != act)) {
2279                                         priv->failed_promise = true;
2280                                         return;
2281                                 }
2282                         }
2283                         if (mo_graph->checkPromise(write, promise)) {
2284                                 priv->failed_promise = true;
2285                                 return;
2286                         }
2287                 }
2288
2289                 //Don't do any lookups twice for the same thread
2290                 if (promise->has_sync_thread(tid))
2291                         continue;
2292
2293                 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2294                         if (promise->increment_threads(tid)) {
2295                                 priv->failed_promise = true;
2296                                 return;
2297                         }
2298                 }
2299         }
2300 }
2301
2302 /**
2303  * Compute the set of writes that may break the current pending release
2304  * sequence. This information is extracted from previou release sequence
2305  * calculations.
2306  *
2307  * @param curr The current ModelAction. Must be a release sequence fixup
2308  * action.
2309  */
2310 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2311 {
2312         if (pending_rel_seqs->empty())
2313                 return;
2314
2315         struct release_seq *pending = pending_rel_seqs->back();
2316         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2317                 const ModelAction *write = pending->writes[i];
2318                 curr->get_node()->add_relseq_break(write);
2319         }
2320
2321         /* NULL means don't break the sequence; just synchronize */
2322         curr->get_node()->add_relseq_break(NULL);
2323 }
2324
2325 /**
2326  * Build up an initial set of all past writes that this 'read' action may read
2327  * from. This set is determined by the clock vector's "happens before"
2328  * relationship.
2329  * @param curr is the current ModelAction that we are exploring; it must be a
2330  * 'read' operation.
2331  */
2332 void ModelChecker::build_reads_from_past(ModelAction *curr)
2333 {
2334         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2335         unsigned int i;
2336         ASSERT(curr->is_read());
2337
2338         ModelAction *last_sc_write = NULL;
2339
2340         /* Track whether this object has been initialized */
2341         bool initialized = false;
2342
2343         if (curr->is_seqcst()) {
2344                 last_sc_write = get_last_seq_cst_write(curr);
2345                 /* We have to at least see the last sequentially consistent write,
2346                          so we are initialized. */
2347                 if (last_sc_write != NULL)
2348                         initialized = true;
2349         }
2350
2351         /* Iterate over all threads */
2352         for (i = 0; i < thrd_lists->size(); i++) {
2353                 /* Iterate over actions in thread, starting from most recent */
2354                 action_list_t *list = &(*thrd_lists)[i];
2355                 action_list_t::reverse_iterator rit;
2356                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2357                         ModelAction *act = *rit;
2358
2359                         /* Only consider 'write' actions */
2360                         if (!act->is_write() || act == curr)
2361                                 continue;
2362
2363                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2364                         bool allow_read = true;
2365
2366                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2367                                 allow_read = false;
2368                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2369                                 allow_read = false;
2370
2371                         if (allow_read) {
2372                                 DEBUG("Adding action to may_read_from:\n");
2373                                 if (DBG_ENABLED()) {
2374                                         act->print();
2375                                         curr->print();
2376                                 }
2377                                 curr->get_node()->add_read_from(act);
2378                         }
2379
2380                         /* Include at most one act per-thread that "happens before" curr */
2381                         if (act->happens_before(curr)) {
2382                                 initialized = true;
2383                                 break;
2384                         }
2385                 }
2386         }
2387
2388         if (!initialized)
2389                 assert_bug("May read from uninitialized atomic");
2390
2391         if (DBG_ENABLED() || !initialized) {
2392                 model_print("Reached read action:\n");
2393                 curr->print();
2394                 model_print("Printing may_read_from\n");
2395                 curr->get_node()->print_may_read_from();
2396                 model_print("End printing may_read_from\n");
2397         }
2398 }
2399
2400 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2401         while(true) {
2402                 Node *prevnode=write->get_node()->get_parent();
2403
2404                 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2405                 if (write->is_release()&&thread_sleep)
2406                         return true;
2407                 if (!write->is_rmw()) {
2408                         return false;
2409                 }
2410                 if (write->get_reads_from()==NULL)
2411                         return true;
2412                 write=write->get_reads_from();
2413         }
2414 }
2415
2416 static void print_list(action_list_t *list, int exec_num = -1)
2417 {
2418         action_list_t::iterator it;
2419
2420         model_print("---------------------------------------------------------------------\n");
2421         if (exec_num >= 0)
2422                 model_print("Execution %d:\n", exec_num);
2423
2424         unsigned int hash=0;
2425
2426         for (it = list->begin(); it != list->end(); it++) {
2427                 (*it)->print();
2428                 hash=hash^(hash<<3)^((*it)->hash());
2429         }
2430         model_print("HASH %u\n", hash);
2431         model_print("---------------------------------------------------------------------\n");
2432 }
2433
2434 #if SUPPORT_MOD_ORDER_DUMP
2435 void ModelChecker::dumpGraph(char *filename) {
2436         char buffer[200];
2437         sprintf(buffer, "%s.dot",filename);
2438         FILE *file=fopen(buffer, "w");
2439         fprintf(file, "digraph %s {\n",filename);
2440         mo_graph->dumpNodes(file);
2441         ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2442
2443         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2444                 ModelAction *action=*it;
2445                 if (action->is_read()) {
2446                         fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2447                         if (action->get_reads_from()!=NULL)
2448                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2449                 }
2450                 if (thread_array[action->get_tid()] != NULL) {
2451                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2452                 }
2453
2454                 thread_array[action->get_tid()]=action;
2455         }
2456         fprintf(file,"}\n");
2457         model_free(thread_array);
2458         fclose(file);
2459 }
2460 #endif
2461
2462 /** @brief Prints an execution trace summary. */
2463 void ModelChecker::print_summary() const
2464 {
2465 #if SUPPORT_MOD_ORDER_DUMP
2466         scheduler->print();
2467         char buffername[100];
2468         sprintf(buffername, "exec%04u", stats.num_total);
2469         mo_graph->dumpGraphToFile(buffername);
2470         sprintf(buffername, "graph%04u", stats.num_total);
2471         dumpGraph(buffername);
2472 #endif
2473
2474         if (!isfeasibleprefix())
2475                 model_print("INFEASIBLE EXECUTION!\n");
2476         print_list(action_trace, stats.num_total);
2477         model_print("\n");
2478 }
2479
2480 /**
2481  * Add a Thread to the system for the first time. Should only be called once
2482  * per thread.
2483  * @param t The Thread to add
2484  */
2485 void ModelChecker::add_thread(Thread *t)
2486 {
2487         thread_map->put(id_to_int(t->get_id()), t);
2488         scheduler->add_thread(t);
2489 }
2490
2491 /**
2492  * Removes a thread from the scheduler.
2493  * @param the thread to remove.
2494  */
2495 void ModelChecker::remove_thread(Thread *t)
2496 {
2497         scheduler->remove_thread(t);
2498 }
2499
2500 /**
2501  * @brief Get a Thread reference by its ID
2502  * @param tid The Thread's ID
2503  * @return A Thread reference
2504  */
2505 Thread * ModelChecker::get_thread(thread_id_t tid) const
2506 {
2507         return thread_map->get(id_to_int(tid));
2508 }
2509
2510 /**
2511  * @brief Get a reference to the Thread in which a ModelAction was executed
2512  * @param act The ModelAction
2513  * @return A Thread reference
2514  */
2515 Thread * ModelChecker::get_thread(ModelAction *act) const
2516 {
2517         return get_thread(act->get_tid());
2518 }
2519
2520 /**
2521  * @brief Check if a Thread is currently enabled
2522  * @param t The Thread to check
2523  * @return True if the Thread is currently enabled
2524  */
2525 bool ModelChecker::is_enabled(Thread *t) const
2526 {
2527         return scheduler->is_enabled(t);
2528 }
2529
2530 /**
2531  * @brief Check if a Thread is currently enabled
2532  * @param tid The ID of the Thread to check
2533  * @return True if the Thread is currently enabled
2534  */
2535 bool ModelChecker::is_enabled(thread_id_t tid) const
2536 {
2537         return scheduler->is_enabled(tid);
2538 }
2539
2540 /**
2541  * Switch from a user-context to the "master thread" context (a.k.a. system
2542  * context). This switch is made with the intention of exploring a particular
2543  * model-checking action (described by a ModelAction object). Must be called
2544  * from a user-thread context.
2545  *
2546  * @param act The current action that will be explored. May be NULL only if
2547  * trace is exiting via an assertion (see ModelChecker::set_assert and
2548  * ModelChecker::has_asserted).
2549  * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2550  */
2551 int ModelChecker::switch_to_master(ModelAction *act)
2552 {
2553         DBG();
2554         Thread *old = thread_current();
2555         set_current_action(act);
2556         old->set_state(THREAD_READY);
2557         return Thread::swap(old, &system_context);
2558 }
2559
2560 /**
2561  * Takes the next step in the execution, if possible.
2562  * @return Returns true (success) if a step was taken and false otherwise.
2563  */
2564 bool ModelChecker::take_step() {
2565         if (has_asserted())
2566                 return false;
2567
2568         Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2569         if (curr) {
2570                 if (curr->get_state() == THREAD_READY) {
2571                         ASSERT(priv->current_action);
2572
2573                         priv->nextThread = check_current_action(priv->current_action);
2574                         priv->current_action = NULL;
2575
2576                         if (curr->is_blocked() || curr->is_complete())
2577                                 scheduler->remove_thread(curr);
2578                 } else {
2579                         ASSERT(false);
2580                 }
2581         }
2582         Thread *next = scheduler->next_thread(priv->nextThread);
2583
2584         /* Infeasible -> don't take any more steps */
2585         if (is_infeasible())
2586                 return false;
2587         else if (isfeasibleprefix() && have_bug_reports()) {
2588                 set_assert();
2589                 return false;
2590         }
2591
2592         if (params.bound != 0) {
2593                 if (priv->used_sequence_numbers > params.bound) {
2594                         return false;
2595                 }
2596         }
2597
2598         DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2599                         next ? id_to_int(next->get_id()) : -1);
2600
2601         /*
2602          * Launch end-of-execution release sequence fixups only when there are:
2603          *
2604          * (1) no more user threads to run (or when execution replay chooses
2605          *     the 'model_thread')
2606          * (2) pending release sequences
2607          * (3) pending assertions (i.e., data races)
2608          * (4) no pending promises
2609          */
2610         if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2611                         is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2612                 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2613                                 pending_rel_seqs->size());
2614                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2615                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2616                                 model_thread);
2617                 set_current_action(fixup);
2618                 return true;
2619         }
2620
2621         /* next == NULL -> don't take any more steps */
2622         if (!next)
2623                 return false;
2624
2625         next->set_state(THREAD_RUNNING);
2626
2627         if (next->get_pending() != NULL) {
2628                 /* restart a pending action */
2629                 set_current_action(next->get_pending());
2630                 next->set_pending(NULL);
2631                 next->set_state(THREAD_READY);
2632                 return true;
2633         }
2634
2635         /* Return false only if swap fails with an error */
2636         return (Thread::swap(&system_context, next) == 0);
2637 }
2638
2639 /** Wrapper to run the user's main function, with appropriate arguments */
2640 void user_main_wrapper(void *)
2641 {
2642         user_main(model->params.argc, model->params.argv);
2643 }
2644
2645 /** @brief Run ModelChecker for the user program */
2646 void ModelChecker::run()
2647 {
2648         do {
2649                 thrd_t user_thread;
2650
2651                 /* Start user program */
2652                 add_thread(new Thread(&user_thread, &user_main_wrapper, NULL));
2653
2654                 /* Wait for all threads to complete */
2655                 while (take_step());
2656         } while (next_execution());
2657
2658         print_stats();
2659 }