af54b54a54cc43e962c8606115eb10be467ddffa
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <new>
4 #include <stdarg.h>
5
6 #include "model.h"
7 #include "execution.h"
8 #include "action.h"
9 #include "schedule.h"
10 #include "common.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
13 #include "datarace.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
16 #include "history.h"
17 #include "fuzzer.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 /**
22  * Structure for holding small ModelChecker members that should be snapshotted
23  */
24 struct model_snapshot_members {
25         model_snapshot_members() :
26                 /* First thread created will have id INITIAL_THREAD_ID */
27                 next_thread_id(INITIAL_THREAD_ID),
28                 used_sequence_numbers(0),
29                 bugs(),
30                 bad_synchronization(false),
31                 asserted(false)
32         { }
33
34         ~model_snapshot_members() {
35                 for (unsigned int i = 0;i < bugs.size();i++)
36                         delete bugs[i];
37                 bugs.clear();
38         }
39
40         unsigned int next_thread_id;
41         modelclock_t used_sequence_numbers;
42         SnapVector<bug_message *> bugs;
43         /** @brief Incorrectly-ordered synchronization was made */
44         bool bad_synchronization;
45         bool asserted;
46
47         SNAPSHOTALLOC
48 };
49
50
51 #ifdef TLS
52 #include <dlfcn.h>
53
54 //Code taken from LLVM and licensed under the University of Illinois Open Source
55 //License.
56 static uintptr_t thread_descriptor_size;
57 #if __LP64__ || defined(_WIN64)
58 #  define SANITIZER_WORDSIZE 64
59 #else
60 #  define SANITIZER_WORDSIZE 32
61 #endif
62
63 #if SANITIZER_WORDSIZE == 64
64 # define FIRST_32_SECOND_64(a, b) (b)
65 #else
66 # define FIRST_32_SECOND_64(a, b) (a)
67 #endif
68
69 #if defined(__x86_64__) && !defined(_LP64)
70 # define SANITIZER_X32 1
71 #else
72 # define SANITIZER_X32 0
73 #endif
74
75 #if defined(__arm__)
76 # define SANITIZER_ARM 1
77 #else
78 # define SANITIZER_ARM 0
79 #endif
80
81 uintptr_t ThreadDescriptorSize() {
82         uintptr_t val = thread_descriptor_size;
83         if (val)
84                 return val;
85 #if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
86 #ifdef _CS_GNU_LIBC_VERSION
87         char buf[64];
88         uintptr_t len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
89         if (len < sizeof(buf) && strncmp(buf, "glibc 2.", 8) == 0) {
90                 char *end;
91                 int minor = strtoll(buf + 8, &end, 10);
92                 if (end != buf + 8 && (*end == '\0' || *end == '.' || *end == '-')) {
93                         int patch = 0;
94                         if (*end == '.')
95                                 // strtoll will return 0 if no valid conversion could be performed
96                                 patch = strtoll(end + 1, nullptr, 10);
97
98                         /* sizeof(struct pthread) values from various glibc versions.  */
99                         if (SANITIZER_X32)
100                                 val = 1728;// Assume only one particular version for x32.
101                         // For ARM sizeof(struct pthread) changed in Glibc 2.23.
102                         else if (SANITIZER_ARM)
103                                 val = minor <= 22 ? 1120 : 1216;
104                         else if (minor <= 3)
105                                 val = FIRST_32_SECOND_64(1104, 1696);
106                         else if (minor == 4)
107                                 val = FIRST_32_SECOND_64(1120, 1728);
108                         else if (minor == 5)
109                                 val = FIRST_32_SECOND_64(1136, 1728);
110                         else if (minor <= 9)
111                                 val = FIRST_32_SECOND_64(1136, 1712);
112                         else if (minor == 10)
113                                 val = FIRST_32_SECOND_64(1168, 1776);
114                         else if (minor == 11 || (minor == 12 && patch == 1))
115                                 val = FIRST_32_SECOND_64(1168, 2288);
116                         else if (minor <= 13)
117                                 val = FIRST_32_SECOND_64(1168, 2304);
118                         else
119                                 val = FIRST_32_SECOND_64(1216, 2304);
120                 }
121         }
122 #endif
123 #elif defined(__mips__)
124         // TODO(sagarthakur): add more values as per different glibc versions.
125         val = FIRST_32_SECOND_64(1152, 1776);
126 #elif defined(__aarch64__)
127         // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
128         val = 1776;
129 #elif defined(__powerpc64__)
130         val = 1776;     // from glibc.ppc64le 2.20-8.fc21
131 #elif defined(__s390__)
132         val = FIRST_32_SECOND_64(1152, 1776);   // valid for glibc 2.22
133 #endif
134         if (val)
135                 thread_descriptor_size = val;
136         return val;
137 }
138
139 #ifdef __i386__
140 # define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
141 #else
142 # define DL_INTERNAL_FUNCTION
143 #endif
144
145 intptr_t RoundUpTo(uintptr_t size, uintptr_t boundary) {
146         return (size + boundary - 1) & ~(boundary - 1);
147 }
148
149 uintptr_t getTlsSize() {
150         // all current supported platforms have 16 bytes stack alignment
151         const size_t kStackAlign = 16;
152         typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
153         get_tls_func get_tls;
154         void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
155         memcpy(&get_tls, &get_tls_static_info_ptr, sizeof(get_tls_static_info_ptr));
156         ASSERT(get_tls != 0);
157         size_t tls_size = 0;
158         size_t tls_align = 0;
159         get_tls(&tls_size, &tls_align);
160         if (tls_align < kStackAlign)
161                 tls_align = kStackAlign;
162         return RoundUpTo(tls_size, tls_align);
163 }
164
165 #endif
166
167 /** @brief Constructor */
168 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
169         model(m),
170         params(NULL),
171         scheduler(scheduler),
172         action_trace(),
173         thread_map(2),  /* We'll always need at least 2 threads */
174         pthread_map(0),
175         pthread_counter(1),
176         obj_map(),
177         condvar_waiters_map(),
178         obj_thrd_map(),
179         mutex_map(),
180         thrd_last_action(1),
181         thrd_last_fence_release(),
182         priv(new struct model_snapshot_members ()),
183         mo_graph(new CycleGraph()),
184         fuzzer(new Fuzzer()),
185         thrd_func_list(),
186         thrd_func_inst_lists()
187 #ifdef TLS
188         ,tls_base(NULL),
189         tls_addr(0),
190         tls_size(0),
191         thd_desc_size(0)
192 #endif
193 {
194         /* Initialize a model-checker thread, for special ModelActions */
195         model_thread = new Thread(get_next_id());
196         add_thread(model_thread);
197         scheduler->register_engine(this);
198 }
199
200 #ifdef TLS
201 void ModelExecution::initTLS() {
202         tls_addr = get_tls_addr();
203         tls_size = getTlsSize();
204         tls_addr -= tls_size;
205         thd_desc_size = ThreadDescriptorSize();
206         tls_addr += thd_desc_size;
207         tls_base = (char *) snapshot_calloc(tls_size,1);
208         memcpy(tls_base, reinterpret_cast<const char *>(tls_addr), tls_size);
209 }
210 #endif
211
212 /** @brief Destructor */
213 ModelExecution::~ModelExecution()
214 {
215         for (unsigned int i = 0;i < get_num_threads();i++)
216                 delete get_thread(int_to_id(i));
217
218         delete mo_graph;
219         delete priv;
220 }
221
222 int ModelExecution::get_execution_number() const
223 {
224         return model->get_execution_number();
225 }
226
227 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
228 {
229         action_list_t *tmp = hash->get(ptr);
230         if (tmp == NULL) {
231                 tmp = new action_list_t();
232                 hash->put(ptr, tmp);
233         }
234         return tmp;
235 }
236
237 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
238 {
239         SnapVector<action_list_t> *tmp = hash->get(ptr);
240         if (tmp == NULL) {
241                 tmp = new SnapVector<action_list_t>();
242                 hash->put(ptr, tmp);
243         }
244         return tmp;
245 }
246
247 /** @return a thread ID for a new Thread */
248 thread_id_t ModelExecution::get_next_id()
249 {
250         return priv->next_thread_id++;
251 }
252
253 /** @return the number of user threads created during this execution */
254 unsigned int ModelExecution::get_num_threads() const
255 {
256         return priv->next_thread_id;
257 }
258
259 /** @return a sequence number for a new ModelAction */
260 modelclock_t ModelExecution::get_next_seq_num()
261 {
262         return ++priv->used_sequence_numbers;
263 }
264
265 /**
266  * @brief Should the current action wake up a given thread?
267  *
268  * @param curr The current action
269  * @param thread The thread that we might wake up
270  * @return True, if we should wake up the sleeping thread; false otherwise
271  */
272 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
273 {
274         const ModelAction *asleep = thread->get_pending();
275         /* Don't allow partial RMW to wake anyone up */
276         if (curr->is_rmwr())
277                 return false;
278         /* Synchronizing actions may have been backtracked */
279         if (asleep->could_synchronize_with(curr))
280                 return true;
281         /* All acquire/release fences and fence-acquire/store-release */
282         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
283                 return true;
284         /* Fence-release + store can awake load-acquire on the same location */
285         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
286                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
287                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
288                         return true;
289         }
290         return false;
291 }
292
293 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
294 {
295         for (unsigned int i = 0;i < get_num_threads();i++) {
296                 Thread *thr = get_thread(int_to_id(i));
297                 if (scheduler->is_sleep_set(thr)) {
298                         if (should_wake_up(curr, thr))
299                                 /* Remove this thread from sleep set */
300                                 scheduler->remove_sleep(thr);
301                 }
302         }
303 }
304
305 /** @brief Alert the model-checker that an incorrectly-ordered
306  * synchronization was made */
307 void ModelExecution::set_bad_synchronization()
308 {
309         priv->bad_synchronization = true;
310 }
311
312 bool ModelExecution::assert_bug(const char *msg)
313 {
314         priv->bugs.push_back(new bug_message(msg));
315
316         if (isfeasibleprefix()) {
317                 set_assert();
318                 return true;
319         }
320         return false;
321 }
322
323 /** @return True, if any bugs have been reported for this execution */
324 bool ModelExecution::have_bug_reports() const
325 {
326         return priv->bugs.size() != 0;
327 }
328
329 /** @return True, if any fatal bugs have been reported for this execution.
330  *  Any bug other than a data race is considered a fatal bug. Data races
331  *  are not considered fatal unless the number of races is exceeds
332  *  a threshold (temporarily set as 15).
333  */
334 bool ModelExecution::have_fatal_bug_reports() const
335 {
336         return priv->bugs.size() != 0;
337 }
338
339 SnapVector<bug_message *> * ModelExecution::get_bugs() const
340 {
341         return &priv->bugs;
342 }
343
344 /**
345  * Check whether the current trace has triggered an assertion which should halt
346  * its execution.
347  *
348  * @return True, if the execution should be aborted; false otherwise
349  */
350 bool ModelExecution::has_asserted() const
351 {
352         return priv->asserted;
353 }
354
355 /**
356  * Trigger a trace assertion which should cause this execution to be halted.
357  * This can be due to a detected bug or due to an infeasibility that should
358  * halt ASAP.
359  */
360 void ModelExecution::set_assert()
361 {
362         priv->asserted = true;
363 }
364
365 /**
366  * Check if we are in a deadlock. Should only be called at the end of an
367  * execution, although it should not give false positives in the middle of an
368  * execution (there should be some ENABLED thread).
369  *
370  * @return True if program is in a deadlock; false otherwise
371  */
372 bool ModelExecution::is_deadlocked() const
373 {
374         bool blocking_threads = false;
375         for (unsigned int i = 0;i < get_num_threads();i++) {
376                 thread_id_t tid = int_to_id(i);
377                 if (is_enabled(tid))
378                         return false;
379                 Thread *t = get_thread(tid);
380                 if (!t->is_model_thread() && t->get_pending())
381                         blocking_threads = true;
382         }
383         return blocking_threads;
384 }
385
386 /**
387  * Check if this is a complete execution. That is, have all thread completed
388  * execution (rather than exiting because sleep sets have forced a redundant
389  * execution).
390  *
391  * @return True if the execution is complete.
392  */
393 bool ModelExecution::is_complete_execution() const
394 {
395         for (unsigned int i = 0;i < get_num_threads();i++)
396                 if (is_enabled(int_to_id(i)))
397                         return false;
398         return true;
399 }
400
401 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
402         uint64_t value = *((const uint64_t *) location);
403         modelclock_t storeclock;
404         thread_id_t storethread;
405         getStoreThreadAndClock(location, &storethread, &storeclock);
406         setAtomicStoreFlag(location);
407         ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
408         act->set_seq_number(storeclock);
409         add_normal_write_to_lists(act);
410         add_write_to_lists(act);
411         w_modification_order(act);
412         return act;
413 }
414
415
416 /**
417  * Processes a read model action.
418  * @param curr is the read model action to process.
419  * @param rf_set is the set of model actions we can possibly read from
420  * @return True if processing this read updates the mo_graph.
421  */
422 void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
423 {
424         SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
425         bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
426         if (hasnonatomicstore) {
427                 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
428                 rf_set->push_back(nonatomicstore);
429         }
430
431         while(true) {
432                 int index = fuzzer->selectWrite(curr, rf_set);
433                 ModelAction *rf = (*rf_set)[index];
434
435
436                 ASSERT(rf);
437                 bool canprune = false;
438                 if (r_modification_order(curr, rf, priorset, &canprune)) {
439                         for(unsigned int i=0;i<priorset->size();i++) {
440                                 mo_graph->addEdge((*priorset)[i], rf);
441                         }
442                         read_from(curr, rf);
443                         get_thread(curr)->set_return_value(curr->get_return_value());
444                         delete priorset;
445                         if (canprune && curr->get_type() == ATOMIC_READ) {
446                                 int tid = id_to_int(curr->get_tid());
447                                 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
448                         }
449                         return;
450                 }
451                 priorset->clear();
452                 (*rf_set)[index] = rf_set->back();
453                 rf_set->pop_back();
454         }
455 }
456
457 /**
458  * Processes a lock, trylock, or unlock model action.  @param curr is
459  * the read model action to process.
460  *
461  * The try lock operation checks whether the lock is taken.  If not,
462  * it falls to the normal lock operation case.  If so, it returns
463  * fail.
464  *
465  * The lock operation has already been checked that it is enabled, so
466  * it just grabs the lock and synchronizes with the previous unlock.
467  *
468  * The unlock operation has to re-enable all of the threads that are
469  * waiting on the lock.
470  *
471  * @return True if synchronization was updated; false otherwise
472  */
473 bool ModelExecution::process_mutex(ModelAction *curr)
474 {
475         cdsc::mutex *mutex = curr->get_mutex();
476         struct cdsc::mutex_state *state = NULL;
477
478         if (mutex)
479                 state = mutex->get_state();
480
481         switch (curr->get_type()) {
482         case ATOMIC_TRYLOCK: {
483                 bool success = !state->locked;
484                 curr->set_try_lock(success);
485                 if (!success) {
486                         get_thread(curr)->set_return_value(0);
487                         break;
488                 }
489                 get_thread(curr)->set_return_value(1);
490         }
491         //otherwise fall into the lock case
492         case ATOMIC_LOCK: {
493                 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
494                 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
495                 //      assert_bug("Lock access before initialization");
496                 state->locked = get_thread(curr);
497                 ModelAction *unlock = get_last_unlock(curr);
498                 //synchronize with the previous unlock statement
499                 if (unlock != NULL) {
500                         synchronize(unlock, curr);
501                         return true;
502                 }
503                 break;
504         }
505         case ATOMIC_WAIT:
506         case ATOMIC_UNLOCK: {
507                 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T...  TIMED WAITS MUST EVENMTUALLY RELEASE...
508
509                 /* wake up the other threads */
510                 for (unsigned int i = 0;i < get_num_threads();i++) {
511                         Thread *t = get_thread(int_to_id(i));
512                         Thread *curr_thrd = get_thread(curr);
513                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
514                                 scheduler->wake(t);
515                 }
516
517                 /* unlock the lock - after checking who was waiting on it */
518                 state->locked = NULL;
519
520                 if (!curr->is_wait())
521                         break;/* The rest is only for ATOMIC_WAIT */
522
523                 break;
524         }
525         case ATOMIC_NOTIFY_ALL: {
526                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
527                 //activate all the waiting threads
528                 for (action_list_t::iterator rit = waiters->begin();rit != waiters->end();rit++) {
529                         scheduler->wake(get_thread(*rit));
530                 }
531                 waiters->clear();
532                 break;
533         }
534         case ATOMIC_NOTIFY_ONE: {
535                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
536                 if (waiters->size() != 0) {
537                         Thread * thread = fuzzer->selectNotify(waiters);
538                         scheduler->wake(thread);
539                 }
540                 break;
541         }
542
543         default:
544                 ASSERT(0);
545         }
546         return false;
547 }
548
549 /**
550  * Process a write ModelAction
551  * @param curr The ModelAction to process
552  * @return True if the mo_graph was updated or promises were resolved
553  */
554 void ModelExecution::process_write(ModelAction *curr)
555 {
556         w_modification_order(curr);
557         get_thread(curr)->set_return_value(VALUE_NONE);
558 }
559
560 /**
561  * Process a fence ModelAction
562  * @param curr The ModelAction to process
563  * @return True if synchronization was updated
564  */
565 bool ModelExecution::process_fence(ModelAction *curr)
566 {
567         /*
568          * fence-relaxed: no-op
569          * fence-release: only log the occurence (not in this function), for
570          *   use in later synchronization
571          * fence-acquire (this function): search for hypothetical release
572          *   sequences
573          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
574          */
575         bool updated = false;
576         if (curr->is_acquire()) {
577                 action_list_t *list = &action_trace;
578                 action_list_t::reverse_iterator rit;
579                 /* Find X : is_read(X) && X --sb-> curr */
580                 for (rit = list->rbegin();rit != list->rend();rit++) {
581                         ModelAction *act = *rit;
582                         if (act == curr)
583                                 continue;
584                         if (act->get_tid() != curr->get_tid())
585                                 continue;
586                         /* Stop at the beginning of the thread */
587                         if (act->is_thread_start())
588                                 break;
589                         /* Stop once we reach a prior fence-acquire */
590                         if (act->is_fence() && act->is_acquire())
591                                 break;
592                         if (!act->is_read())
593                                 continue;
594                         /* read-acquire will find its own release sequences */
595                         if (act->is_acquire())
596                                 continue;
597
598                         /* Establish hypothetical release sequences */
599                         ClockVector *cv = get_hb_from_write(act->get_reads_from());
600                         if (cv != NULL && curr->get_cv()->merge(cv))
601                                 updated = true;
602                 }
603         }
604         return updated;
605 }
606
607 /**
608  * @brief Process the current action for thread-related activity
609  *
610  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
611  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
612  * synchronization, etc.  This function is a no-op for non-THREAD actions
613  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
614  *
615  * @param curr The current action
616  * @return True if synchronization was updated or a thread completed
617  */
618 bool ModelExecution::process_thread_action(ModelAction *curr)
619 {
620         bool updated = false;
621
622         switch (curr->get_type()) {
623         case THREAD_CREATE: {
624                 thrd_t *thrd = (thrd_t *)curr->get_location();
625                 struct thread_params *params = (struct thread_params *)curr->get_value();
626                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
627                 curr->set_thread_operand(th);
628                 add_thread(th);
629                 th->set_creation(curr);
630                 break;
631         }
632         case PTHREAD_CREATE: {
633                 (*(uint32_t *)curr->get_location()) = pthread_counter++;
634
635                 struct pthread_params *params = (struct pthread_params *)curr->get_value();
636                 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
637                 curr->set_thread_operand(th);
638                 add_thread(th);
639                 th->set_creation(curr);
640
641                 if ( pthread_map.size() < pthread_counter )
642                         pthread_map.resize( pthread_counter );
643                 pthread_map[ pthread_counter-1 ] = th;
644
645                 break;
646         }
647         case THREAD_JOIN: {
648                 Thread *blocking = curr->get_thread_operand();
649                 ModelAction *act = get_last_action(blocking->get_id());
650                 synchronize(act, curr);
651                 updated = true; /* trigger rel-seq checks */
652                 break;
653         }
654         case PTHREAD_JOIN: {
655                 Thread *blocking = curr->get_thread_operand();
656                 ModelAction *act = get_last_action(blocking->get_id());
657                 synchronize(act, curr);
658                 updated = true; /* trigger rel-seq checks */
659                 break;  // WL: to be add (modified)
660         }
661
662         case THREAD_FINISH: {
663                 Thread *th = get_thread(curr);
664                 /* Wake up any joining threads */
665                 for (unsigned int i = 0;i < get_num_threads();i++) {
666                         Thread *waiting = get_thread(int_to_id(i));
667                         if (waiting->waiting_on() == th &&
668                                         waiting->get_pending()->is_thread_join())
669                                 scheduler->wake(waiting);
670                 }
671                 th->complete();
672                 updated = true; /* trigger rel-seq checks */
673                 break;
674         }
675         case THREAD_START: {
676                 break;
677         }
678         default:
679                 break;
680         }
681
682         return updated;
683 }
684
685 /**
686  * Initialize the current action by performing one or more of the following
687  * actions, as appropriate: merging RMWR and RMWC/RMW actions,
688  * manipulating backtracking sets, allocating and
689  * initializing clock vectors, and computing the promises to fulfill.
690  *
691  * @param curr The current action, as passed from the user context; may be
692  * freed/invalidated after the execution of this function, with a different
693  * action "returned" its place (pass-by-reference)
694  * @return True if curr is a newly-explored action; false otherwise
695  */
696 bool ModelExecution::initialize_curr_action(ModelAction **curr)
697 {
698         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
699                 ModelAction *newcurr = process_rmw(*curr);
700                 delete *curr;
701
702                 *curr = newcurr;
703                 return false;
704         } else {
705                 ModelAction *newcurr = *curr;
706
707                 newcurr->set_seq_number(get_next_seq_num());
708                 /* Always compute new clock vector */
709                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
710
711                 /* Assign most recent release fence */
712                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
713
714                 return true;    /* This was a new ModelAction */
715         }
716 }
717
718 /**
719  * @brief Establish reads-from relation between two actions
720  *
721  * Perform basic operations involved with establishing a concrete rf relation,
722  * including setting the ModelAction data and checking for release sequences.
723  *
724  * @param act The action that is reading (must be a read)
725  * @param rf The action from which we are reading (must be a write)
726  *
727  * @return True if this read established synchronization
728  */
729
730 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
731 {
732         ASSERT(rf);
733         ASSERT(rf->is_write());
734
735         act->set_read_from(rf);
736         if (act->is_acquire()) {
737                 ClockVector *cv = get_hb_from_write(rf);
738                 if (cv == NULL)
739                         return;
740                 act->get_cv()->merge(cv);
741         }
742 }
743
744 /**
745  * @brief Synchronizes two actions
746  *
747  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
748  * This function performs the synchronization as well as providing other hooks
749  * for other checks along with synchronization.
750  *
751  * @param first The left-hand side of the synchronizes-with relation
752  * @param second The right-hand side of the synchronizes-with relation
753  * @return True if the synchronization was successful (i.e., was consistent
754  * with the execution order); false otherwise
755  */
756 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
757 {
758         if (*second < *first) {
759                 set_bad_synchronization();
760                 return false;
761         }
762         return second->synchronize_with(first);
763 }
764
765 /**
766  * @brief Check whether a model action is enabled.
767  *
768  * Checks whether an operation would be successful (i.e., is a lock already
769  * locked, or is the joined thread already complete).
770  *
771  * For yield-blocking, yields are never enabled.
772  *
773  * @param curr is the ModelAction to check whether it is enabled.
774  * @return a bool that indicates whether the action is enabled.
775  */
776 bool ModelExecution::check_action_enabled(ModelAction *curr) {
777         if (curr->is_lock()) {
778                 cdsc::mutex *lock = curr->get_mutex();
779                 struct cdsc::mutex_state *state = lock->get_state();
780                 if (state->locked)
781                         return false;
782         } else if (curr->is_thread_join()) {
783                 Thread *blocking = curr->get_thread_operand();
784                 if (!blocking->is_complete()) {
785                         return false;
786                 }
787         }
788
789         return true;
790 }
791
792 /**
793  * This is the heart of the model checker routine. It performs model-checking
794  * actions corresponding to a given "current action." Among other processes, it
795  * calculates reads-from relationships, updates synchronization clock vectors,
796  * forms a memory_order constraints graph, and handles replay/backtrack
797  * execution when running permutations of previously-observed executions.
798  *
799  * @param curr The current action to process
800  * @return The ModelAction that is actually executed; may be different than
801  * curr
802  */
803 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
804 {
805         ASSERT(curr);
806         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
807         bool newly_explored = initialize_curr_action(&curr);
808
809         DBG();
810
811         wake_up_sleeping_actions(curr);
812
813         /* Add the action to lists before any other model-checking tasks */
814         if (!second_part_of_rmw && curr->get_type() != NOOP)
815                 add_action_to_lists(curr);
816
817         if (curr->is_write())
818                 add_write_to_lists(curr);
819
820         SnapVector<ModelAction *> * rf_set = NULL;
821         /* Build may_read_from set for newly-created actions */
822         if (newly_explored && curr->is_read())
823                 rf_set = build_may_read_from(curr);
824
825         process_thread_action(curr);
826
827         if (curr->is_read() && !second_part_of_rmw) {
828                 process_read(curr, rf_set);
829                 delete rf_set;
830         } else {
831                 ASSERT(rf_set == NULL);
832         }
833
834         if (curr->is_write())
835                 process_write(curr);
836
837         if (curr->is_fence())
838                 process_fence(curr);
839
840         if (curr->is_mutex_op())
841                 process_mutex(curr);
842
843         return curr;
844 }
845
846 /**
847  * This is the strongest feasibility check available.
848  * @return whether the current trace (partial or complete) must be a prefix of
849  * a feasible trace.
850  */
851 bool ModelExecution::isfeasibleprefix() const
852 {
853         return !is_infeasible();
854 }
855
856 /**
857  * Print disagnostic information about an infeasible execution
858  * @param prefix A string to prefix the output with; if NULL, then a default
859  * message prefix will be provided
860  */
861 void ModelExecution::print_infeasibility(const char *prefix) const
862 {
863         char buf[100];
864         char *ptr = buf;
865         if (priv->bad_synchronization)
866                 ptr += sprintf(ptr, "[bad sw ordering]");
867         if (ptr != buf)
868                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
869 }
870
871 /**
872  * Check if the current partial trace is infeasible. Does not check any
873  * end-of-execution flags, which might rule out the execution. Thus, this is
874  * useful only for ruling an execution as infeasible.
875  * @return whether the current partial trace is infeasible.
876  */
877 bool ModelExecution::is_infeasible() const
878 {
879         return priv->bad_synchronization;
880 }
881
882 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
883 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
884         ModelAction *lastread = get_last_action(act->get_tid());
885         lastread->process_rmw(act);
886         if (act->is_rmw()) {
887                 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
888         }
889         return lastread;
890 }
891
892 /**
893  * @brief Updates the mo_graph with the constraints imposed from the current
894  * read.
895  *
896  * Basic idea is the following: Go through each other thread and find
897  * the last action that happened before our read.  Two cases:
898  *
899  * -# The action is a write: that write must either occur before
900  * the write we read from or be the write we read from.
901  * -# The action is a read: the write that that action read from
902  * must occur before the write we read from or be the same write.
903  *
904  * @param curr The current action. Must be a read.
905  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
906  * @return True if modification order edges were added; false otherwise
907  */
908
909 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune)
910 {
911         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
912         unsigned int i;
913         ASSERT(curr->is_read());
914
915         /* Last SC fence in the current thread */
916         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
917
918         int tid = curr->get_tid();
919         ModelAction *prev_same_thread = NULL;
920         /* Iterate over all threads */
921         for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
922                 /* Last SC fence in thread tid */
923                 ModelAction *last_sc_fence_thread_local = NULL;
924                 if (i != 0)
925                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
926
927                 /* Last SC fence in thread tid, before last SC fence in current thread */
928                 ModelAction *last_sc_fence_thread_before = NULL;
929                 if (last_sc_fence_local)
930                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
931
932                 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
933                 if (prev_same_thread != NULL &&
934                                 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
935                                 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
936                         continue;
937                 }
938
939                 /* Iterate over actions in thread, starting from most recent */
940                 action_list_t *list = &(*thrd_lists)[tid];
941                 action_list_t::reverse_iterator rit;
942                 for (rit = list->rbegin();rit != list->rend();rit++) {
943                         ModelAction *act = *rit;
944
945                         /* Skip curr */
946                         if (act == curr)
947                                 continue;
948                         /* Don't want to add reflexive edges on 'rf' */
949                         if (act->equals(rf)) {
950                                 if (act->happens_before(curr))
951                                         break;
952                                 else
953                                         continue;
954                         }
955
956                         if (act->is_write()) {
957                                 /* C++, Section 29.3 statement 5 */
958                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
959                                                 *act < *last_sc_fence_thread_local) {
960                                         if (mo_graph->checkReachable(rf, act))
961                                                 return false;
962                                         priorset->push_back(act);
963                                         break;
964                                 }
965                                 /* C++, Section 29.3 statement 4 */
966                                 else if (act->is_seqcst() && last_sc_fence_local &&
967                                                                  *act < *last_sc_fence_local) {
968                                         if (mo_graph->checkReachable(rf, act))
969                                                 return false;
970                                         priorset->push_back(act);
971                                         break;
972                                 }
973                                 /* C++, Section 29.3 statement 6 */
974                                 else if (last_sc_fence_thread_before &&
975                                                                  *act < *last_sc_fence_thread_before) {
976                                         if (mo_graph->checkReachable(rf, act))
977                                                 return false;
978                                         priorset->push_back(act);
979                                         break;
980                                 }
981                         }
982
983                         /*
984                          * Include at most one act per-thread that "happens
985                          * before" curr
986                          */
987                         if (act->happens_before(curr)) {
988                                 if (i==0) {
989                                         if (last_sc_fence_local == NULL ||
990                                                         (*last_sc_fence_local < *act)) {
991                                                 prev_same_thread = act;
992                                         }
993                                 }
994                                 if (act->is_write()) {
995                                         if (mo_graph->checkReachable(rf, act))
996                                                 return false;
997                                         priorset->push_back(act);
998                                 } else {
999                                         const ModelAction *prevrf = act->get_reads_from();
1000                                         if (!prevrf->equals(rf)) {
1001                                                 if (mo_graph->checkReachable(rf, prevrf))
1002                                                         return false;
1003                                                 priorset->push_back(prevrf);
1004                                         } else {
1005                                                 if (act->get_tid() == curr->get_tid()) {
1006                                                         //Can prune curr from obj list
1007                                                         *canprune = true;
1008                                                 }
1009                                         }
1010                                 }
1011                                 break;
1012                         }
1013                 }
1014         }
1015         return true;
1016 }
1017
1018 /**
1019  * Updates the mo_graph with the constraints imposed from the current write.
1020  *
1021  * Basic idea is the following: Go through each other thread and find
1022  * the lastest action that happened before our write.  Two cases:
1023  *
1024  * (1) The action is a write => that write must occur before
1025  * the current write
1026  *
1027  * (2) The action is a read => the write that that action read from
1028  * must occur before the current write.
1029  *
1030  * This method also handles two other issues:
1031  *
1032  * (I) Sequential Consistency: Making sure that if the current write is
1033  * seq_cst, that it occurs after the previous seq_cst write.
1034  *
1035  * (II) Sending the write back to non-synchronizing reads.
1036  *
1037  * @param curr The current action. Must be a write.
1038  * @param send_fv A vector for stashing reads to which we may pass our future
1039  * value. If NULL, then don't record any future values.
1040  * @return True if modification order edges were added; false otherwise
1041  */
1042 void ModelExecution::w_modification_order(ModelAction *curr)
1043 {
1044         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1045         unsigned int i;
1046         ASSERT(curr->is_write());
1047
1048         if (curr->is_seqcst()) {
1049                 /* We have to at least see the last sequentially consistent write,
1050                          so we are initialized. */
1051                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1052                 if (last_seq_cst != NULL) {
1053                         mo_graph->addEdge(last_seq_cst, curr);
1054                 }
1055         }
1056
1057         /* Last SC fence in the current thread */
1058         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1059
1060         /* Iterate over all threads */
1061         for (i = 0;i < thrd_lists->size();i++) {
1062                 /* Last SC fence in thread i, before last SC fence in current thread */
1063                 ModelAction *last_sc_fence_thread_before = NULL;
1064                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1065                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1066
1067                 /* Iterate over actions in thread, starting from most recent */
1068                 action_list_t *list = &(*thrd_lists)[i];
1069                 action_list_t::reverse_iterator rit;
1070                 bool force_edge = false;
1071                 for (rit = list->rbegin();rit != list->rend();rit++) {
1072                         ModelAction *act = *rit;
1073                         if (act == curr) {
1074                                 /*
1075                                  * 1) If RMW and it actually read from something, then we
1076                                  * already have all relevant edges, so just skip to next
1077                                  * thread.
1078                                  *
1079                                  * 2) If RMW and it didn't read from anything, we should
1080                                  * whatever edge we can get to speed up convergence.
1081                                  *
1082                                  * 3) If normal write, we need to look at earlier actions, so
1083                                  * continue processing list.
1084                                  */
1085                                 force_edge = true;
1086                                 if (curr->is_rmw()) {
1087                                         if (curr->get_reads_from() != NULL)
1088                                                 break;
1089                                         else
1090                                                 continue;
1091                                 } else
1092                                         continue;
1093                         }
1094
1095                         /* C++, Section 29.3 statement 7 */
1096                         if (last_sc_fence_thread_before && act->is_write() &&
1097                                         *act < *last_sc_fence_thread_before) {
1098                                 mo_graph->addEdge(act, curr, force_edge);
1099                                 break;
1100                         }
1101
1102                         /*
1103                          * Include at most one act per-thread that "happens
1104                          * before" curr
1105                          */
1106                         if (act->happens_before(curr)) {
1107                                 /*
1108                                  * Note: if act is RMW, just add edge:
1109                                  *   act --mo--> curr
1110                                  * The following edge should be handled elsewhere:
1111                                  *   readfrom(act) --mo--> act
1112                                  */
1113                                 if (act->is_write())
1114                                         mo_graph->addEdge(act, curr, force_edge);
1115                                 else if (act->is_read()) {
1116                                         //if previous read accessed a null, just keep going
1117                                         mo_graph->addEdge(act->get_reads_from(), curr, force_edge);
1118                                 }
1119                                 break;
1120                         }
1121                 }
1122         }
1123 }
1124
1125 /**
1126  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1127  * some constraints. This method checks one the following constraint (others
1128  * require compiler support):
1129  *
1130  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1131  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1132  */
1133 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1134 {
1135         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1136         unsigned int i;
1137         /* Iterate over all threads */
1138         for (i = 0;i < thrd_lists->size();i++) {
1139                 const ModelAction *write_after_read = NULL;
1140
1141                 /* Iterate over actions in thread, starting from most recent */
1142                 action_list_t *list = &(*thrd_lists)[i];
1143                 action_list_t::reverse_iterator rit;
1144                 for (rit = list->rbegin();rit != list->rend();rit++) {
1145                         ModelAction *act = *rit;
1146
1147                         /* Don't disallow due to act == reader */
1148                         if (!reader->happens_before(act) || reader == act)
1149                                 break;
1150                         else if (act->is_write())
1151                                 write_after_read = act;
1152                         else if (act->is_read() && act->get_reads_from() != NULL)
1153                                 write_after_read = act->get_reads_from();
1154                 }
1155
1156                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1157                         return false;
1158         }
1159         return true;
1160 }
1161
1162 /**
1163  * Computes the clock vector that happens before propagates from this write.
1164  *
1165  * @param rf The action that might be part of a release sequence. Must be a
1166  * write.
1167  * @return ClockVector of happens before relation.
1168  */
1169
1170 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1171         SnapVector<ModelAction *> * processset = NULL;
1172         for ( ;rf != NULL;rf = rf->get_reads_from()) {
1173                 ASSERT(rf->is_write());
1174                 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1175                         break;
1176                 if (processset == NULL)
1177                         processset = new SnapVector<ModelAction *>();
1178                 processset->push_back(rf);
1179         }
1180
1181         int i = (processset == NULL) ? 0 : processset->size();
1182
1183         ClockVector * vec = NULL;
1184         while(true) {
1185                 if (rf->get_rfcv() != NULL) {
1186                         vec = rf->get_rfcv();
1187                 } else if (rf->is_acquire() && rf->is_release()) {
1188                         vec = rf->get_cv();
1189                 } else if (rf->is_release() && !rf->is_rmw()) {
1190                         vec = rf->get_cv();
1191                 } else if (rf->is_release()) {
1192                         //have rmw that is release and doesn't have a rfcv
1193                         (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1194                         rf->set_rfcv(vec);
1195                 } else {
1196                         //operation that isn't release
1197                         if (rf->get_last_fence_release()) {
1198                                 if (vec == NULL)
1199                                         vec = rf->get_last_fence_release()->get_cv();
1200                                 else
1201                                         (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1202                         }
1203                         rf->set_rfcv(vec);
1204                 }
1205                 i--;
1206                 if (i >= 0) {
1207                         rf = (*processset)[i];
1208                 } else
1209                         break;
1210         }
1211         if (processset != NULL)
1212                 delete processset;
1213         return vec;
1214 }
1215
1216 /**
1217  * Performs various bookkeeping operations for the current ModelAction. For
1218  * instance, adds action to the per-object, per-thread action vector and to the
1219  * action trace list of all thread actions.
1220  *
1221  * @param act is the ModelAction to add.
1222  */
1223 void ModelExecution::add_action_to_lists(ModelAction *act)
1224 {
1225         int tid = id_to_int(act->get_tid());
1226         ModelAction *uninit = NULL;
1227         int uninit_id = -1;
1228         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1229         if (list->empty() && act->is_atomic_var()) {
1230                 uninit = get_uninitialized_action(act);
1231                 uninit_id = id_to_int(uninit->get_tid());
1232                 list->push_front(uninit);
1233                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1234                 if (uninit_id >= (int)vec->size())
1235                         vec->resize(uninit_id + 1);
1236                 (*vec)[uninit_id].push_front(uninit);
1237         }
1238         list->push_back(act);
1239
1240         // Update action trace, a total order of all actions
1241         action_trace.push_back(act);
1242         if (uninit)
1243                 action_trace.push_front(uninit);
1244
1245         // Update obj_thrd_map, a per location, per thread, order of actions
1246         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1247         if (tid >= (int)vec->size())
1248                 vec->resize(priv->next_thread_id);
1249         (*vec)[tid].push_back(act);
1250         if (uninit)
1251                 (*vec)[uninit_id].push_front(uninit);
1252
1253         // Update thrd_last_action, the last action taken by each thrad
1254         if ((int)thrd_last_action.size() <= tid)
1255                 thrd_last_action.resize(get_num_threads());
1256         thrd_last_action[tid] = act;
1257         if (uninit)
1258                 thrd_last_action[uninit_id] = uninit;
1259
1260         // Update thrd_last_fence_release, the last release fence taken by each thread
1261         if (act->is_fence() && act->is_release()) {
1262                 if ((int)thrd_last_fence_release.size() <= tid)
1263                         thrd_last_fence_release.resize(get_num_threads());
1264                 thrd_last_fence_release[tid] = act;
1265         }
1266
1267         if (act->is_wait()) {
1268                 void *mutex_loc = (void *) act->get_value();
1269                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1270
1271                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1272                 if (tid >= (int)vec->size())
1273                         vec->resize(priv->next_thread_id);
1274                 (*vec)[tid].push_back(act);
1275         }
1276 }
1277
1278 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1279         action_list_t::reverse_iterator rit = list->rbegin();
1280         modelclock_t next_seq = act->get_seq_number();
1281         if (rit == list->rend() || (*rit)->get_seq_number() == next_seq)
1282                 list->push_back(act);
1283         else {
1284                 for(;rit != list->rend();rit++) {
1285                         if ((*rit)->get_seq_number() == next_seq) {
1286                                 action_list_t::iterator it = rit.base();
1287                                 list->insert(it, act);
1288                                 break;
1289                         }
1290                 }
1291         }
1292 }
1293
1294 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1295         action_list_t::reverse_iterator rit = list->rbegin();
1296         modelclock_t next_seq = act->get_seq_number();
1297         if (rit == list->rend()) {
1298                 act->create_cv(NULL);
1299         } else if ((*rit)->get_seq_number() == next_seq) {
1300                 act->create_cv((*rit));
1301                 list->push_back(act);
1302         } else {
1303                 for(;rit != list->rend();rit++) {
1304                         if ((*rit)->get_seq_number() == next_seq) {
1305                                 act->create_cv((*rit));
1306                                 action_list_t::iterator it = rit.base();
1307                                 list->insert(it, act);
1308                                 break;
1309                         }
1310                 }
1311         }
1312 }
1313
1314 /**
1315  * Performs various bookkeeping operations for a normal write.  The
1316  * complication is that we are typically inserting a normal write
1317  * lazily, so we need to insert it into the middle of lists.
1318  *
1319  * @param act is the ModelAction to add.
1320  */
1321
1322 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1323 {
1324         int tid = id_to_int(act->get_tid());
1325         insertIntoActionListAndSetCV(&action_trace, act);
1326
1327         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1328         insertIntoActionList(list, act);
1329
1330         // Update obj_thrd_map, a per location, per thread, order of actions
1331         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1332         if (tid >= (int)vec->size())
1333                 vec->resize(priv->next_thread_id);
1334         insertIntoActionList(&(*vec)[tid],act);
1335
1336         // Update thrd_last_action, the last action taken by each thrad
1337         if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
1338                 thrd_last_action[tid] = act;
1339 }
1340
1341
1342 void ModelExecution::add_write_to_lists(ModelAction *write) {
1343         // Update seq_cst map
1344         if (write->is_seqcst())
1345                 obj_last_sc_map.put(write->get_location(), write);
1346
1347         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1348         int tid = id_to_int(write->get_tid());
1349         if (tid >= (int)vec->size())
1350                 vec->resize(priv->next_thread_id);
1351         (*vec)[tid].push_back(write);
1352 }
1353
1354 /**
1355  * @brief Get the last action performed by a particular Thread
1356  * @param tid The thread ID of the Thread in question
1357  * @return The last action in the thread
1358  */
1359 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1360 {
1361         int threadid = id_to_int(tid);
1362         if (threadid < (int)thrd_last_action.size())
1363                 return thrd_last_action[id_to_int(tid)];
1364         else
1365                 return NULL;
1366 }
1367
1368 /**
1369  * @brief Get the last fence release performed by a particular Thread
1370  * @param tid The thread ID of the Thread in question
1371  * @return The last fence release in the thread, if one exists; NULL otherwise
1372  */
1373 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1374 {
1375         int threadid = id_to_int(tid);
1376         if (threadid < (int)thrd_last_fence_release.size())
1377                 return thrd_last_fence_release[id_to_int(tid)];
1378         else
1379                 return NULL;
1380 }
1381
1382 /**
1383  * Gets the last memory_order_seq_cst write (in the total global sequence)
1384  * performed on a particular object (i.e., memory location), not including the
1385  * current action.
1386  * @param curr The current ModelAction; also denotes the object location to
1387  * check
1388  * @return The last seq_cst write
1389  */
1390 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1391 {
1392         void *location = curr->get_location();
1393         return obj_last_sc_map.get(location);
1394 }
1395
1396 /**
1397  * Gets the last memory_order_seq_cst fence (in the total global sequence)
1398  * performed in a particular thread, prior to a particular fence.
1399  * @param tid The ID of the thread to check
1400  * @param before_fence The fence from which to begin the search; if NULL, then
1401  * search for the most recent fence in the thread.
1402  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1403  */
1404 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1405 {
1406         /* All fences should have location FENCE_LOCATION */
1407         action_list_t *list = obj_map.get(FENCE_LOCATION);
1408
1409         if (!list)
1410                 return NULL;
1411
1412         action_list_t::reverse_iterator rit = list->rbegin();
1413
1414         if (before_fence) {
1415                 for (;rit != list->rend();rit++)
1416                         if (*rit == before_fence)
1417                                 break;
1418
1419                 ASSERT(*rit == before_fence);
1420                 rit++;
1421         }
1422
1423         for (;rit != list->rend();rit++)
1424                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
1425                         return *rit;
1426         return NULL;
1427 }
1428
1429 /**
1430  * Gets the last unlock operation performed on a particular mutex (i.e., memory
1431  * location). This function identifies the mutex according to the current
1432  * action, which is presumed to perform on the same mutex.
1433  * @param curr The current ModelAction; also denotes the object location to
1434  * check
1435  * @return The last unlock operation
1436  */
1437 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1438 {
1439         void *location = curr->get_location();
1440
1441         action_list_t *list = obj_map.get(location);
1442         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1443         action_list_t::reverse_iterator rit;
1444         for (rit = list->rbegin();rit != list->rend();rit++)
1445                 if ((*rit)->is_unlock() || (*rit)->is_wait())
1446                         return *rit;
1447         return NULL;
1448 }
1449
1450 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1451 {
1452         ModelAction *parent = get_last_action(tid);
1453         if (!parent)
1454                 parent = get_thread(tid)->get_creation();
1455         return parent;
1456 }
1457
1458 /**
1459  * Returns the clock vector for a given thread.
1460  * @param tid The thread whose clock vector we want
1461  * @return Desired clock vector
1462  */
1463 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1464 {
1465         ModelAction *firstaction=get_parent_action(tid);
1466         return firstaction != NULL ? firstaction->get_cv() : NULL;
1467 }
1468
1469 bool valequals(uint64_t val1, uint64_t val2, int size) {
1470         switch(size) {
1471         case 1:
1472                 return ((uint8_t)val1) == ((uint8_t)val2);
1473         case 2:
1474                 return ((uint16_t)val1) == ((uint16_t)val2);
1475         case 4:
1476                 return ((uint32_t)val1) == ((uint32_t)val2);
1477         case 8:
1478                 return val1==val2;
1479         default:
1480                 ASSERT(0);
1481                 return false;
1482         }
1483 }
1484
1485 /**
1486  * Build up an initial set of all past writes that this 'read' action may read
1487  * from, as well as any previously-observed future values that must still be valid.
1488  *
1489  * @param curr is the current ModelAction that we are exploring; it must be a
1490  * 'read' operation.
1491  */
1492 SnapVector<ModelAction *> *  ModelExecution::build_may_read_from(ModelAction *curr)
1493 {
1494         SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1495         unsigned int i;
1496         ASSERT(curr->is_read());
1497
1498         ModelAction *last_sc_write = NULL;
1499
1500         if (curr->is_seqcst())
1501                 last_sc_write = get_last_seq_cst_write(curr);
1502
1503         SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1504
1505         /* Iterate over all threads */
1506         for (i = 0;i < thrd_lists->size();i++) {
1507                 /* Iterate over actions in thread, starting from most recent */
1508                 action_list_t *list = &(*thrd_lists)[i];
1509                 action_list_t::reverse_iterator rit;
1510                 for (rit = list->rbegin();rit != list->rend();rit++) {
1511                         ModelAction *act = *rit;
1512
1513                         if (act == curr)
1514                                 continue;
1515
1516                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1517                         bool allow_read = true;
1518
1519                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1520                                 allow_read = false;
1521
1522                         /* Need to check whether we will have two RMW reading from the same value */
1523                         if (curr->is_rmwr()) {
1524                                 /* It is okay if we have a failing CAS */
1525                                 if (!curr->is_rmwrcas() ||
1526                                                 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1527                                         //Need to make sure we aren't the second RMW
1528                                         CycleNode * node = mo_graph->getNode_noCreate(act);
1529                                         if (node != NULL && node->getRMW() != NULL) {
1530                                                 //we are the second RMW
1531                                                 allow_read = false;
1532                                         }
1533                                 }
1534                         }
1535
1536                         if (allow_read) {
1537                                 /* Only add feasible reads */
1538                                 rf_set->push_back(act);
1539                         }
1540
1541                         /* Include at most one act per-thread that "happens before" curr */
1542                         if (act->happens_before(curr))
1543                                 break;
1544                 }
1545         }
1546
1547         if (DBG_ENABLED()) {
1548                 model_print("Reached read action:\n");
1549                 curr->print();
1550                 model_print("End printing read_from_past\n");
1551         }
1552         return rf_set;
1553 }
1554
1555 /**
1556  * @brief Get an action representing an uninitialized atomic
1557  *
1558  * This function may create a new one.
1559  *
1560  * @param curr The current action, which prompts the creation of an UNINIT action
1561  * @return A pointer to the UNINIT ModelAction
1562  */
1563 ModelAction * ModelExecution::get_uninitialized_action(ModelAction *curr) const
1564 {
1565         ModelAction *act = curr->get_uninit_action();
1566         if (!act) {
1567                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1568                 curr->set_uninit_action(act);
1569         }
1570         act->create_cv(NULL);
1571         return act;
1572 }
1573
1574 static void print_list(const action_list_t *list)
1575 {
1576         action_list_t::const_iterator it;
1577
1578         model_print("------------------------------------------------------------------------------------\n");
1579         model_print("#    t    Action type     MO       Location         Value               Rf  CV\n");
1580         model_print("------------------------------------------------------------------------------------\n");
1581
1582         unsigned int hash = 0;
1583
1584         for (it = list->begin();it != list->end();it++) {
1585                 const ModelAction *act = *it;
1586                 if (act->get_seq_number() > 0)
1587                         act->print();
1588                 hash = hash^(hash<<3)^((*it)->hash());
1589         }
1590         model_print("HASH %u\n", hash);
1591         model_print("------------------------------------------------------------------------------------\n");
1592 }
1593
1594 #if SUPPORT_MOD_ORDER_DUMP
1595 void ModelExecution::dumpGraph(char *filename) const
1596 {
1597         char buffer[200];
1598         sprintf(buffer, "%s.dot", filename);
1599         FILE *file = fopen(buffer, "w");
1600         fprintf(file, "digraph %s {\n", filename);
1601         mo_graph->dumpNodes(file);
1602         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1603
1604         for (action_list_t::const_iterator it = action_trace.begin();it != action_trace.end();it++) {
1605                 ModelAction *act = *it;
1606                 if (act->is_read()) {
1607                         mo_graph->dot_print_node(file, act);
1608                         mo_graph->dot_print_edge(file,
1609                                                                                                                          act->get_reads_from(),
1610                                                                                                                          act,
1611                                                                                                                          "label=\"rf\", color=red, weight=2");
1612                 }
1613                 if (thread_array[act->get_tid()]) {
1614                         mo_graph->dot_print_edge(file,
1615                                                                                                                          thread_array[id_to_int(act->get_tid())],
1616                                                                                                                          act,
1617                                                                                                                          "label=\"sb\", color=blue, weight=400");
1618                 }
1619
1620                 thread_array[act->get_tid()] = act;
1621         }
1622         fprintf(file, "}\n");
1623         model_free(thread_array);
1624         fclose(file);
1625 }
1626 #endif
1627
1628 /** @brief Prints an execution trace summary. */
1629 void ModelExecution::print_summary() const
1630 {
1631 #if SUPPORT_MOD_ORDER_DUMP
1632         char buffername[100];
1633         sprintf(buffername, "exec%04u", get_execution_number());
1634         mo_graph->dumpGraphToFile(buffername);
1635         sprintf(buffername, "graph%04u", get_execution_number());
1636         dumpGraph(buffername);
1637 #endif
1638
1639         model_print("Execution trace %d:", get_execution_number());
1640         if (isfeasibleprefix()) {
1641                 if (scheduler->all_threads_sleeping())
1642                         model_print(" SLEEP-SET REDUNDANT");
1643                 if (have_bug_reports())
1644                         model_print(" DETECTED BUG(S)");
1645         } else
1646                 print_infeasibility(" INFEASIBLE");
1647         model_print("\n");
1648
1649         print_list(&action_trace);
1650         model_print("\n");
1651
1652 }
1653
1654 /**
1655  * Add a Thread to the system for the first time. Should only be called once
1656  * per thread.
1657  * @param t The Thread to add
1658  */
1659 void ModelExecution::add_thread(Thread *t)
1660 {
1661         unsigned int i = id_to_int(t->get_id());
1662         if (i >= thread_map.size())
1663                 thread_map.resize(i + 1);
1664         thread_map[i] = t;
1665         if (!t->is_model_thread())
1666                 scheduler->add_thread(t);
1667 }
1668
1669 /**
1670  * @brief Get a Thread reference by its ID
1671  * @param tid The Thread's ID
1672  * @return A Thread reference
1673  */
1674 Thread * ModelExecution::get_thread(thread_id_t tid) const
1675 {
1676         unsigned int i = id_to_int(tid);
1677         if (i < thread_map.size())
1678                 return thread_map[i];
1679         return NULL;
1680 }
1681
1682 /**
1683  * @brief Get a reference to the Thread in which a ModelAction was executed
1684  * @param act The ModelAction
1685  * @return A Thread reference
1686  */
1687 Thread * ModelExecution::get_thread(const ModelAction *act) const
1688 {
1689         return get_thread(act->get_tid());
1690 }
1691
1692 /**
1693  * @brief Get a Thread reference by its pthread ID
1694  * @param index The pthread's ID
1695  * @return A Thread reference
1696  */
1697 Thread * ModelExecution::get_pthread(pthread_t pid) {
1698         union {
1699                 pthread_t p;
1700                 uint32_t v;
1701         } x;
1702         x.p = pid;
1703         uint32_t thread_id = x.v;
1704         if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1705         else return NULL;
1706 }
1707
1708 /**
1709  * @brief Check if a Thread is currently enabled
1710  * @param t The Thread to check
1711  * @return True if the Thread is currently enabled
1712  */
1713 bool ModelExecution::is_enabled(Thread *t) const
1714 {
1715         return scheduler->is_enabled(t);
1716 }
1717
1718 /**
1719  * @brief Check if a Thread is currently enabled
1720  * @param tid The ID of the Thread to check
1721  * @return True if the Thread is currently enabled
1722  */
1723 bool ModelExecution::is_enabled(thread_id_t tid) const
1724 {
1725         return scheduler->is_enabled(tid);
1726 }
1727
1728 /**
1729  * @brief Select the next thread to execute based on the curren action
1730  *
1731  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1732  * actions should be followed by the execution of their child thread. In either
1733  * case, the current action should determine the next thread schedule.
1734  *
1735  * @param curr The current action
1736  * @return The next thread to run, if the current action will determine this
1737  * selection; otherwise NULL
1738  */
1739 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1740 {
1741         /* Do not split atomic RMW */
1742         if (curr->is_rmwr())
1743                 return get_thread(curr);
1744         /* Follow CREATE with the created thread */
1745         /* which is not needed, because model.cc takes care of this */
1746         if (curr->get_type() == THREAD_CREATE)
1747                 return curr->get_thread_operand();
1748         if (curr->get_type() == PTHREAD_CREATE) {
1749                 return curr->get_thread_operand();
1750         }
1751         return NULL;
1752 }
1753
1754 /**
1755  * Takes the next step in the execution, if possible.
1756  * @param curr The current step to take
1757  * @return Returns the next Thread to run, if any; NULL if this execution
1758  * should terminate
1759  */
1760 Thread * ModelExecution::take_step(ModelAction *curr)
1761 {
1762         Thread *curr_thrd = get_thread(curr);
1763         ASSERT(curr_thrd->get_state() == THREAD_READY);
1764
1765         ASSERT(check_action_enabled(curr));     /* May have side effects? */
1766         curr = check_current_action(curr);
1767         ASSERT(curr);
1768
1769         /* Process this action in ModelHistory for records*/
1770         model->get_history()->process_action( curr, curr_thrd->get_id() );
1771
1772         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1773                 scheduler->remove_thread(curr_thrd);
1774
1775         return action_select_next_thread(curr);
1776 }
1777
1778 Fuzzer * ModelExecution::getFuzzer() {
1779         return fuzzer;
1780 }