Merge branch 'firefox-test' of /home/git/random-fuzzer into firefox-test
[c11tester.git] / datarace.cc
1 #include "datarace.h"
2 #include "model.h"
3 #include "threads-model.h"
4 #include <stdio.h>
5 #include <cstring>
6 #include "mymemory.h"
7 #include "clockvector.h"
8 #include "config.h"
9 #include "action.h"
10 #include "execution.h"
11 #include "stl-model.h"
12 #include <execinfo.h>
13
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
18
19 static unsigned int store8_count = 0;
20 static unsigned int store16_count = 0;
21 static unsigned int store32_count = 0;
22 static unsigned int store64_count = 0;
23
24 static unsigned int load8_count = 0;
25 static unsigned int load16_count = 0;
26 static unsigned int load32_count = 0;
27 static unsigned int load64_count = 0;
28
29 static const ModelExecution * get_execution()
30 {
31         return model->get_execution();
32 }
33
34 /** This function initialized the data race detector. */
35 void initRaceDetector()
36 {
37         root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
38         memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
39         memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
40         raceset = new RaceSet();
41 }
42
43 void * table_calloc(size_t size)
44 {
45         if ((((char *)memory_base) + size) > memory_top) {
46                 return snapshot_calloc(size, 1);
47         } else {
48                 void *tmp = memory_base;
49                 memory_base = ((char *)memory_base) + size;
50                 return tmp;
51         }
52 }
53
54 /** This function looks up the entry in the shadow table corresponding to a
55  * given address.*/
56 static uint64_t * lookupAddressEntry(const void *address)
57 {
58         struct ShadowTable *currtable = root;
59 #if BIT48
60         currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
61         if (currtable == NULL) {
62                 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
63         }
64 #endif
65
66         struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
67         if (basetable == NULL) {
68                 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
69         }
70         return &basetable->array[((uintptr_t)address) & MASK16BIT];
71 }
72
73
74 bool hasNonAtomicStore(const void *address) {
75         uint64_t * shadow = lookupAddressEntry(address);
76         uint64_t shadowval = *shadow;
77         if (ISSHORTRECORD(shadowval)) {
78                 //Do we have a non atomic write with a non-zero clock
79                 return !(ATOMICMASK & shadowval);
80         } else {
81                 if (shadowval == 0)
82                         return true;
83                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
84                 return !record->isAtomic;
85         }
86 }
87
88 void setAtomicStoreFlag(const void *address) {
89         uint64_t * shadow = lookupAddressEntry(address);
90         uint64_t shadowval = *shadow;
91         if (ISSHORTRECORD(shadowval)) {
92                 *shadow = shadowval | ATOMICMASK;
93         } else {
94                 if (shadowval == 0) {
95                         *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
96                         return;
97                 }
98                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
99                 record->isAtomic = 1;
100         }
101 }
102
103 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
104         uint64_t * shadow = lookupAddressEntry(address);
105         uint64_t shadowval = *shadow;
106         if (ISSHORTRECORD(shadowval) || shadowval == 0) {
107                 //Do we have a non atomic write with a non-zero clock
108                 *thread = WRTHREADID(shadowval);
109                 *clock = WRITEVECTOR(shadowval);
110         } else {
111                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
112                 *thread = record->writeThread;
113                 *clock = record->writeClock;
114         }
115 }
116
117 /**
118  * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
119  * to check the potential for a data race.
120  * @param clock1 The current clock vector
121  * @param tid1 The current thread; paired with clock1
122  * @param clock2 The clock value for the potentially-racing action
123  * @param tid2 The thread ID for the potentially-racing action
124  * @return true if the current clock allows a race with the event at clock2/tid2
125  */
126 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
127                                                                                                          modelclock_t clock2, thread_id_t tid2)
128 {
129         return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
130 }
131
132 /**
133  * Expands a record from the compact form to the full form.  This is
134  * necessary for multiple readers or for very large thread ids or time
135  * stamps. */
136 static void expandRecord(uint64_t *shadow)
137 {
138         uint64_t shadowval = *shadow;
139
140         modelclock_t readClock = READVECTOR(shadowval);
141         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
142         modelclock_t writeClock = WRITEVECTOR(shadowval);
143         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
144
145         struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
146         record->writeThread = writeThread;
147         record->writeClock = writeClock;
148
149         if (readClock != 0) {
150                 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
151                 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
152                 record->numReads = 1;
153                 ASSERT(readThread >= 0);
154                 record->thread[0] = readThread;
155                 record->readClock[0] = readClock;
156         } else {
157                 record->thread = NULL;
158         }
159         if (shadowval & ATOMICMASK)
160                 record->isAtomic = 1;
161         *shadow = (uint64_t) record;
162 }
163
164 #define FIRST_STACK_FRAME 2
165
166 unsigned int race_hash(struct DataRace *race) {
167         unsigned int hash = 0;
168         for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
169                 hash ^= ((uintptr_t)race->backtrace[i]);
170                 hash = (hash >> 3) | (hash << 29);
171         }
172         return hash;
173 }
174
175 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
176         if (r1->numframes != r2->numframes)
177                 return false;
178         for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
179                 if (r1->backtrace[i] != r2->backtrace[i])
180                         return false;
181         }
182         return true;
183 }
184
185 /** This function is called when we detect a data race.*/
186 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
187 {
188 #ifdef REPORT_DATA_RACES
189         struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
190         race->oldthread = oldthread;
191         race->oldclock = oldclock;
192         race->isoldwrite = isoldwrite;
193         race->newaction = newaction;
194         race->isnewwrite = isnewwrite;
195         race->address = address;
196         return race;
197 #else
198         return NULL;
199 #endif
200 }
201
202 /**
203  * @brief Assert a data race
204  *
205  * Asserts a data race which is currently realized, causing the execution to
206  * end and stashing a message in the model-checker's bug list
207  *
208  * @param race The race to report
209  */
210 void assert_race(struct DataRace *race)
211 {
212         model_print("Race detected at location: \n");
213         backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
214         model_print("\nData race detected @ address %p:\n"
215                                                         "    Access 1: %5s in thread %2d @ clock %3u\n"
216                                                         "    Access 2: %5s in thread %2d @ clock %3u\n\n",
217                                                         race->address,
218                                                         race->isoldwrite ? "write" : "read",
219                                                         id_to_int(race->oldthread),
220                                                         race->oldclock,
221                                                         race->isnewwrite ? "write" : "read",
222                                                         id_to_int(race->newaction->get_tid()),
223                                                         race->newaction->get_seq_number()
224                                                         );
225 }
226
227 /** This function does race detection for a write on an expanded record. */
228 struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
229 {
230         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
231         struct DataRace * race = NULL;
232
233         /* Check for datarace against last read. */
234
235         for (int i = 0;i < record->numReads;i++) {
236                 modelclock_t readClock = record->readClock[i];
237                 thread_id_t readThread = record->thread[i];
238
239                 /* Note that readClock can't actuall be zero here, so it could be
240                          optimized. */
241
242                 if (clock_may_race(currClock, thread, readClock, readThread)) {
243                         /* We have a datarace */
244                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
245                         goto Exit;
246                 }
247         }
248
249         /* Check for datarace against last write. */
250         {
251                 modelclock_t writeClock = record->writeClock;
252                 thread_id_t writeThread = record->writeThread;
253
254                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
255                         /* We have a datarace */
256                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
257                         goto Exit;
258                 }
259         }
260 Exit:
261         record->numReads = 0;
262         record->writeThread = thread;
263         record->isAtomic = 0;
264         modelclock_t ourClock = currClock->getClock(thread);
265         record->writeClock = ourClock;
266         return race;
267 }
268
269 /** This function does race detection on a write. */
270 void raceCheckWrite(thread_id_t thread, void *location)
271 {
272         uint64_t *shadow = lookupAddressEntry(location);
273         uint64_t shadowval = *shadow;
274         ClockVector *currClock = get_execution()->get_cv(thread);
275         if (currClock == NULL)
276                 return;
277
278         struct DataRace * race = NULL;
279         /* Do full record */
280         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
281                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
282                 goto Exit;
283         }
284
285         {
286                 int threadid = id_to_int(thread);
287                 modelclock_t ourClock = currClock->getClock(thread);
288
289                 /* Thread ID is too large or clock is too large. */
290                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
291                         expandRecord(shadow);
292                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
293                         goto Exit;
294                 }
295
296                 {
297                         /* Check for datarace against last read. */
298                         modelclock_t readClock = READVECTOR(shadowval);
299                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
300
301                         if (clock_may_race(currClock, thread, readClock, readThread)) {
302                                 /* We have a datarace */
303                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
304                                 goto ShadowExit;
305                         }
306                 }
307
308                 {
309                         /* Check for datarace against last write. */
310                         modelclock_t writeClock = WRITEVECTOR(shadowval);
311                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
312
313                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
314                                 /* We have a datarace */
315                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
316                                 goto ShadowExit;
317                         }
318                 }
319
320 ShadowExit:
321                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
322         }
323
324 Exit:
325         if (race) {
326 #ifdef REPORT_DATA_RACES
327                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
328                 if (raceset->add(race))
329                         assert_race(race);
330                 else model_free(race);
331 #else
332                 model_free(race);
333 #endif
334         }
335 }
336
337 /** This function does race detection for a write on an expanded record. */
338 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
339 {
340         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
341         struct DataRace * race = NULL;
342
343         if (record->isAtomic)
344                 goto Exit;
345
346         /* Check for datarace against last read. */
347
348         for (int i = 0;i < record->numReads;i++) {
349                 modelclock_t readClock = record->readClock[i];
350                 thread_id_t readThread = record->thread[i];
351
352                 /* Note that readClock can't actuall be zero here, so it could be
353                          optimized. */
354
355                 if (clock_may_race(currClock, thread, readClock, readThread)) {
356                         /* We have a datarace */
357                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
358                         goto Exit;
359                 }
360         }
361
362         /* Check for datarace against last write. */
363
364         {
365                 modelclock_t writeClock = record->writeClock;
366                 thread_id_t writeThread = record->writeThread;
367
368                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
369                         /* We have a datarace */
370                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
371                         goto Exit;
372                 }
373         }
374 Exit:
375         record->numReads = 0;
376         record->writeThread = thread;
377         record->isAtomic = 1;
378         modelclock_t ourClock = currClock->getClock(thread);
379         record->writeClock = ourClock;
380         return race;
381 }
382
383 /** This function does race detection on a write. */
384 void atomraceCheckWrite(thread_id_t thread, void *location)
385 {
386         uint64_t *shadow = lookupAddressEntry(location);
387         uint64_t shadowval = *shadow;
388         ClockVector *currClock = get_execution()->get_cv(thread);
389         if (currClock == NULL)
390                 return;
391
392         struct DataRace * race = NULL;
393         /* Do full record */
394         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
395                 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
396                 goto Exit;
397         }
398
399         {
400                 int threadid = id_to_int(thread);
401                 modelclock_t ourClock = currClock->getClock(thread);
402
403                 /* Thread ID is too large or clock is too large. */
404                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
405                         expandRecord(shadow);
406                         race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
407                         goto Exit;
408                 }
409
410                 /* Can't race with atomic */
411                 if (shadowval & ATOMICMASK)
412                         goto ShadowExit;
413
414                 {
415                         /* Check for datarace against last read. */
416                         modelclock_t readClock = READVECTOR(shadowval);
417                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
418
419                         if (clock_may_race(currClock, thread, readClock, readThread)) {
420                                 /* We have a datarace */
421                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
422                                 goto ShadowExit;
423                         }
424                 }
425
426                 {
427                         /* Check for datarace against last write. */
428                         modelclock_t writeClock = WRITEVECTOR(shadowval);
429                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
430
431                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
432                                 /* We have a datarace */
433                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
434                                 goto ShadowExit;
435                         }
436                 }
437
438 ShadowExit:
439                 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
440         }
441
442 Exit:
443         if (race) {
444 #ifdef REPORT_DATA_RACES
445                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
446                 if (raceset->add(race))
447                         assert_race(race);
448                 else model_free(race);
449 #else
450                 model_free(race);
451 #endif
452         }
453 }
454
455 /** This function does race detection for a write on an expanded record. */
456 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
457         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
458         record->numReads = 0;
459         record->writeThread = thread;
460         modelclock_t ourClock = currClock->getClock(thread);
461         record->writeClock = ourClock;
462         record->isAtomic = 1;
463 }
464
465 /** This function does race detection for a write on an expanded record. */
466 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
467         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
468         record->numReads = 0;
469         record->writeThread = thread;
470         modelclock_t ourClock = currClock->getClock(thread);
471         record->writeClock = ourClock;
472         record->isAtomic = 0;
473 }
474
475 /** This function just updates metadata on atomic write. */
476 void recordWrite(thread_id_t thread, void *location) {
477         uint64_t *shadow = lookupAddressEntry(location);
478         uint64_t shadowval = *shadow;
479         ClockVector *currClock = get_execution()->get_cv(thread);
480         /* Do full record */
481         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
482                 fullRecordWrite(thread, location, shadow, currClock);
483                 return;
484         }
485
486         int threadid = id_to_int(thread);
487         modelclock_t ourClock = currClock->getClock(thread);
488
489         /* Thread ID is too large or clock is too large. */
490         if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
491                 expandRecord(shadow);
492                 fullRecordWrite(thread, location, shadow, currClock);
493                 return;
494         }
495
496         *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
497 }
498
499 /** This function just updates metadata on atomic write. */
500 void recordCalloc(void *location, size_t size) {
501         thread_id_t thread = thread_current()->get_id();
502         for(;size != 0;size--) {
503                 uint64_t *shadow = lookupAddressEntry(location);
504                 uint64_t shadowval = *shadow;
505                 ClockVector *currClock = get_execution()->get_cv(thread);
506                 /* Do full record */
507                 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
508                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
509                         return;
510                 }
511
512                 int threadid = id_to_int(thread);
513                 modelclock_t ourClock = currClock->getClock(thread);
514
515                 /* Thread ID is too large or clock is too large. */
516                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
517                         expandRecord(shadow);
518                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
519                         return;
520                 }
521
522                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
523                 location = (void *)(((char *) location) + 1);
524         }
525 }
526
527 /** This function does race detection on a read for an expanded record. */
528 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
529 {
530         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
531         struct DataRace * race = NULL;
532         /* Check for datarace against last write. */
533
534         modelclock_t writeClock = record->writeClock;
535         thread_id_t writeThread = record->writeThread;
536
537         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
538                 /* We have a datarace */
539                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
540         }
541
542         /* Shorten vector when possible */
543
544         int copytoindex = 0;
545
546         for (int i = 0;i < record->numReads;i++) {
547                 modelclock_t readClock = record->readClock[i];
548                 thread_id_t readThread = record->thread[i];
549
550                 /*  Note that is not really a datarace check as reads cannot
551                                 actually race.  It is just determining that this read subsumes
552                                 another in the sense that either this read races or neither
553                                 read races. Note that readClock can't actually be zero, so it
554                                 could be optimized.  */
555
556                 if (clock_may_race(currClock, thread, readClock, readThread)) {
557                         /* Still need this read in vector */
558                         if (copytoindex != i) {
559                                 ASSERT(record->thread[i] >= 0);
560                                 record->readClock[copytoindex] = record->readClock[i];
561                                 record->thread[copytoindex] = record->thread[i];
562                         }
563                         copytoindex++;
564                 }
565         }
566
567         if (__builtin_popcount(copytoindex) <= 1) {
568                 if (copytoindex == 0 && record->thread == NULL) {
569                         int newCapacity = INITCAPACITY;
570                         record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
571                         record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
572                 } else if (copytoindex>=INITCAPACITY) {
573                         int newCapacity = copytoindex * 2;
574                         thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
575                         modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
576                         std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
577                         std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
578                         snapshot_free(record->readClock);
579                         snapshot_free(record->thread);
580                         record->readClock = newreadClock;
581                         record->thread = newthread;
582                 }
583         }
584
585         modelclock_t ourClock = currClock->getClock(thread);
586
587         ASSERT(thread >= 0);
588         record->thread[copytoindex] = thread;
589         record->readClock[copytoindex] = ourClock;
590         record->numReads = copytoindex + 1;
591         return race;
592 }
593
594 /** This function does race detection on a read. */
595 void raceCheckRead(thread_id_t thread, const void *location)
596 {
597         uint64_t *shadow = lookupAddressEntry(location);
598         uint64_t shadowval = *shadow;
599         ClockVector *currClock = get_execution()->get_cv(thread);
600         if (currClock == NULL)
601                 return;
602
603         struct DataRace * race = NULL;
604
605         /* Do full record */
606         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
607                 race = fullRaceCheckRead(thread, location, shadow, currClock);
608                 goto Exit;
609         }
610
611         {
612                 int threadid = id_to_int(thread);
613                 modelclock_t ourClock = currClock->getClock(thread);
614
615                 /* Thread ID is too large or clock is too large. */
616                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
617                         expandRecord(shadow);
618                         race = fullRaceCheckRead(thread, location, shadow, currClock);
619                         goto Exit;
620                 }
621
622                 /* Check for datarace against last write. */
623
624                 modelclock_t writeClock = WRITEVECTOR(shadowval);
625                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
626
627                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
628                         /* We have a datarace */
629                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
630                         goto ShadowExit;
631                 }
632
633 ShadowExit:
634                 {
635                         modelclock_t readClock = READVECTOR(shadowval);
636                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
637
638                         if (clock_may_race(currClock, thread, readClock, readThread)) {
639                                 /* We don't subsume this read... Have to expand record. */
640                                 expandRecord(shadow);
641                                 fullRaceCheckRead(thread, location, shadow, currClock);
642                                 goto Exit;
643                         }
644                 }
645
646                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
647         }
648 Exit:
649         if (race) {
650 #ifdef REPORT_DATA_RACES
651                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
652                 if (raceset->add(race))
653                         assert_race(race);
654                 else model_free(race);
655 #else
656                 model_free(race);
657 #endif
658         }
659 }
660
661
662 /** This function does race detection on a read for an expanded record. */
663 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
664 {
665         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
666         struct DataRace * race = NULL;
667         /* Check for datarace against last write. */
668         if (record->isAtomic)
669                 return NULL;
670
671         modelclock_t writeClock = record->writeClock;
672         thread_id_t writeThread = record->writeThread;
673
674         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
675                 /* We have a datarace */
676                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
677         }
678         return race;
679 }
680
681 /** This function does race detection on a read. */
682 void atomraceCheckRead(thread_id_t thread, const void *location)
683 {
684         uint64_t *shadow = lookupAddressEntry(location);
685         uint64_t shadowval = *shadow;
686         ClockVector *currClock = get_execution()->get_cv(thread);
687         if (currClock == NULL)
688                 return;
689
690         struct DataRace * race = NULL;
691
692         /* Do full record */
693         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
694                 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
695                 goto Exit;
696         }
697
698         if (shadowval & ATOMICMASK)
699                 return;
700
701         {
702                 /* Check for datarace against last write. */
703                 modelclock_t writeClock = WRITEVECTOR(shadowval);
704                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
705
706                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
707                         /* We have a datarace */
708                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
709                         goto Exit;
710                 }
711
712
713         }
714 Exit:
715         if (race) {
716 #ifdef REPORT_DATA_RACES
717                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
718                 if (raceset->add(race))
719                         assert_race(race);
720                 else model_free(race);
721 #else
722                 model_free(race);
723 #endif
724         }
725 }
726
727 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
728 {
729         uint64_t *shadow = lookupAddressEntry(location);
730         uint64_t shadowval = *shadow;
731
732         ClockVector *currClock = get_execution()->get_cv(thread);
733         if (currClock == NULL)
734                 return shadow;
735
736         struct DataRace * race = NULL;
737
738         /* Do full record */
739         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
740                 race = fullRaceCheckRead(thread, location, shadow, currClock);
741                 goto Exit;
742         }
743
744         {
745                 int threadid = id_to_int(thread);
746                 modelclock_t ourClock = currClock->getClock(thread);
747
748                 /* Thread ID is too large or clock is too large. */
749                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
750                         expandRecord(shadow);
751                         race = fullRaceCheckRead(thread, location, shadow, currClock);
752                         goto Exit;
753                 }
754
755                 /* Check for datarace against last write. */
756                 modelclock_t writeClock = WRITEVECTOR(shadowval);
757                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
758
759                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
760                         /* We have a datarace */
761                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
762                 }
763
764                 modelclock_t readClock = READVECTOR(shadowval);
765                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
766
767                 if (clock_may_race(currClock, thread, readClock, readThread)) {
768                         /* We don't subsume this read... Have to expand record. */
769                         expandRecord(shadow);
770                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
771                         record->thread[1] = thread;
772                         record->readClock[1] = ourClock;
773                         record->numReads++;
774
775                         goto Exit;
776                 }
777
778                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
779
780                 *old_val = shadowval;
781                 *new_val = *shadow;
782         }
783 Exit:
784         if (race) {
785 #ifdef REPORT_DATA_RACES
786                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
787                 if (raceset->add(race))
788                         assert_race(race);
789                 else model_free(race);
790 #else
791                 model_free(race);
792 #endif
793         }
794
795         return shadow;
796 }
797
798 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
799         uint64_t *shadow = lookupAddressEntry(location);
800
801         uint64_t shadowval = *shadow;
802
803         ClockVector *currClock = get_execution()->get_cv(thread);
804         if (currClock == NULL)
805                 return;
806
807         struct DataRace * race = NULL;
808
809         /* Do full record */
810         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
811                 race = fullRaceCheckRead(thread, location, shadow, currClock);
812                 goto Exit;
813         }
814
815         {
816                 int threadid = id_to_int(thread);
817                 modelclock_t ourClock = currClock->getClock(thread);
818
819                 /* Thread ID is too large or clock is too large. */
820                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
821                         expandRecord(shadow);
822                         race = fullRaceCheckRead(thread, location, shadow, currClock);
823                         goto Exit;
824                 }
825
826                 /* Check for datarace against last write. */
827                 modelclock_t writeClock = WRITEVECTOR(shadowval);
828                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
829
830                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
831                         /* We have a datarace */
832                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
833                 }
834
835                 modelclock_t readClock = READVECTOR(shadowval);
836                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
837
838                 if (clock_may_race(currClock, thread, readClock, readThread)) {
839                         /* We don't subsume this read... Have to expand record. */
840                         expandRecord(shadow);
841                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
842                         record->thread[1] = thread;
843                         record->readClock[1] = ourClock;
844                         record->numReads++;
845
846                         goto Exit;
847                 }
848
849                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
850         }
851 Exit:
852         if (race) {
853 #ifdef REPORT_DATA_RACES
854                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
855                 if (raceset->add(race))
856                         assert_race(race);
857                 else model_free(race);
858 #else
859                 model_free(race);
860 #endif
861         }
862 }
863
864 void raceCheckRead64(thread_id_t thread, const void *location)
865 {
866         uint64_t old_shadowval, new_shadowval;
867         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
868 #ifdef COLLECT_STAT
869         load64_count++;
870 #endif
871         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
872         if (CHECKBOUNDARY(location, 7)) {
873                 if (shadow[1]==old_shadowval)
874                         shadow[1] = new_shadowval;
875                 else goto L1;
876                 if (shadow[2]==old_shadowval)
877                         shadow[2] = new_shadowval;
878                 else goto L2;
879                 if (shadow[3]==old_shadowval)
880                         shadow[3] = new_shadowval;
881                 else goto L3;
882                 if (shadow[4]==old_shadowval)
883                         shadow[4] = new_shadowval;
884                 else goto L4;
885                 if (shadow[5]==old_shadowval)
886                         shadow[5] = new_shadowval;
887                 else goto L5;
888                 if (shadow[6]==old_shadowval)
889                         shadow[6] = new_shadowval;
890                 else goto L6;
891                 if (shadow[7]==old_shadowval)
892                         shadow[7] = new_shadowval;
893                 else goto L7;
894                 return;
895         }
896
897 L1:
898         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
899 L2:
900         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
901 L3:
902         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
903 L4:
904         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
905 L5:
906         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
907 L6:
908         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
909 L7:
910         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
911 }
912
913 void raceCheckRead32(thread_id_t thread, const void *location)
914 {
915         uint64_t old_shadowval, new_shadowval;
916         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
917 #ifdef COLLECT_STAT
918         load32_count++;
919 #endif
920         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
921         if (CHECKBOUNDARY(location, 3)) {
922                 if (shadow[1]==old_shadowval)
923                         shadow[1] = new_shadowval;
924                 else goto L1;
925                 if (shadow[2]==old_shadowval)
926                         shadow[2] = new_shadowval;
927                 else goto L2;
928                 if (shadow[3]==old_shadowval)
929                         shadow[3] = new_shadowval;
930                 else goto L3;
931                 return;
932         }
933
934 L1:
935         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
936 L2:
937         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
938 L3:
939         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
940 }
941
942 void raceCheckRead16(thread_id_t thread, const void *location)
943 {
944         uint64_t old_shadowval, new_shadowval;
945         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
946 #ifdef COLLECT_STAT
947         load16_count++;
948 #endif
949         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
950         if (CHECKBOUNDARY(location, 1)) {
951                 if (shadow[1]==old_shadowval) {
952                         shadow[1] = new_shadowval;
953                         return;
954                 }
955         }
956         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
957 }
958
959 void raceCheckRead8(thread_id_t thread, const void *location)
960 {
961         uint64_t old_shadowval, new_shadowval;
962         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
963 #ifdef COLLECT_STAT
964         load8_count++;
965 #endif
966         raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
967 }
968
969 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
970 {
971         uint64_t *shadow = lookupAddressEntry(location);
972         uint64_t shadowval = *shadow;
973         ClockVector *currClock = get_execution()->get_cv(thread);
974         if (currClock == NULL)
975                 return shadow;
976
977         struct DataRace * race = NULL;
978         /* Do full record */
979         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
980                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
981                 goto Exit;
982         }
983
984         {
985                 int threadid = id_to_int(thread);
986                 modelclock_t ourClock = currClock->getClock(thread);
987
988                 /* Thread ID is too large or clock is too large. */
989                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
990                         expandRecord(shadow);
991                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
992                         goto Exit;
993                 }
994
995                 {
996                         /* Check for datarace against last read. */
997                         modelclock_t readClock = READVECTOR(shadowval);
998                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
999
1000                         if (clock_may_race(currClock, thread, readClock, readThread)) {
1001                                 /* We have a datarace */
1002                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1003                                 goto ShadowExit;
1004                         }
1005                 }
1006
1007                 {
1008                         /* Check for datarace against last write. */
1009                         modelclock_t writeClock = WRITEVECTOR(shadowval);
1010                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1011
1012                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1013                                 /* We have a datarace */
1014                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1015                                 goto ShadowExit;
1016                         }
1017                 }
1018
1019 ShadowExit:
1020                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1021
1022                 *old_val = shadowval;
1023                 *new_val = *shadow;
1024         }
1025
1026 Exit:
1027         if (race) {
1028 #ifdef REPORT_DATA_RACES
1029                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1030                 if (raceset->add(race))
1031                         assert_race(race);
1032                 else model_free(race);
1033 #else
1034                 model_free(race);
1035 #endif
1036         }
1037
1038         return shadow;
1039 }
1040
1041 static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
1042         uint64_t *shadow = lookupAddressEntry(location);
1043
1044         uint64_t shadowval = *shadow;
1045
1046         ClockVector *currClock = get_execution()->get_cv(thread);
1047         if (currClock == NULL)
1048                 return;
1049
1050         struct DataRace * race = NULL;
1051         /* Do full record */
1052         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1053                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1054                 goto Exit;
1055         }
1056
1057         {
1058                 int threadid = id_to_int(thread);
1059                 modelclock_t ourClock = currClock->getClock(thread);
1060
1061                 /* Thread ID is too large or clock is too large. */
1062                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1063                         expandRecord(shadow);
1064                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
1065                         goto Exit;
1066                 }
1067
1068                 {
1069                         /* Check for datarace against last read. */
1070                         modelclock_t readClock = READVECTOR(shadowval);
1071                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1072
1073                         if (clock_may_race(currClock, thread, readClock, readThread)) {
1074                                 /* We have a datarace */
1075                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1076                                 goto ShadowExit;
1077                         }
1078                 }
1079
1080                 {
1081                         /* Check for datarace against last write. */
1082                         modelclock_t writeClock = WRITEVECTOR(shadowval);
1083                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1084
1085                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1086                                 /* We have a datarace */
1087                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1088                                 goto ShadowExit;
1089                         }
1090                 }
1091
1092 ShadowExit:
1093                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1094         }
1095
1096 Exit:
1097         if (race) {
1098 #ifdef REPORT_DATA_RACES
1099                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1100                 if (raceset->add(race))
1101                         assert_race(race);
1102                 else model_free(race);
1103 #else
1104                 model_free(race);
1105 #endif
1106         }
1107 }
1108
1109 void raceCheckWrite64(thread_id_t thread, const void *location)
1110 {
1111         uint64_t old_shadowval, new_shadowval;
1112         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1113 #ifdef COLLECT_STAT
1114         store64_count++;
1115 #endif
1116         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1117         if (CHECKBOUNDARY(location, 7)) {
1118                 if (shadow[1]==old_shadowval)
1119                         shadow[1] = new_shadowval;
1120                 else goto L1;
1121                 if (shadow[2]==old_shadowval)
1122                         shadow[2] = new_shadowval;
1123                 else goto L2;
1124                 if (shadow[3]==old_shadowval)
1125                         shadow[3] = new_shadowval;
1126                 else goto L3;
1127                 if (shadow[4]==old_shadowval)
1128                         shadow[4] = new_shadowval;
1129                 else goto L4;
1130                 if (shadow[5]==old_shadowval)
1131                         shadow[5] = new_shadowval;
1132                 else goto L5;
1133                 if (shadow[6]==old_shadowval)
1134                         shadow[6] = new_shadowval;
1135                 else goto L6;
1136                 if (shadow[7]==old_shadowval)
1137                         shadow[7] = new_shadowval;
1138                 else goto L7;
1139                 return;
1140         }
1141
1142 L1:
1143         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1144 L2:
1145         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1146 L3:
1147         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1148 L4:
1149         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
1150 L5:
1151         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
1152 L6:
1153         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
1154 L7:
1155         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
1156 }
1157
1158 void raceCheckWrite32(thread_id_t thread, const void *location)
1159 {
1160         uint64_t old_shadowval, new_shadowval;
1161         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1162 #ifdef COLLECT_STAT
1163         store32_count++;
1164 #endif
1165         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1166         if (CHECKBOUNDARY(location, 3)) {
1167                 if (shadow[1]==old_shadowval)
1168                         shadow[1] = new_shadowval;
1169                 else goto L1;
1170                 if (shadow[2]==old_shadowval)
1171                         shadow[2] = new_shadowval;
1172                 else goto L2;
1173                 if (shadow[3]==old_shadowval)
1174                         shadow[3] = new_shadowval;
1175                 else goto L3;
1176                 return;
1177         }
1178
1179 L1:
1180         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1181 L2:
1182         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1183 L3:
1184         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1185 }
1186
1187 void raceCheckWrite16(thread_id_t thread, const void *location)
1188 {
1189         uint64_t old_shadowval, new_shadowval;
1190         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1191 #ifdef COLLECT_STAT
1192         store16_count++;
1193 #endif
1194
1195         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1196         if (CHECKBOUNDARY(location, 1)) {
1197                 if (shadow[1]==old_shadowval) {
1198                         shadow[1] = new_shadowval;
1199                         return;
1200                 }
1201         }
1202         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1203 }
1204
1205 void raceCheckWrite8(thread_id_t thread, const void *location)
1206 {
1207         uint64_t old_shadowval, new_shadowval;
1208         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1209 #ifdef COLLECT_STAT
1210         store8_count++;
1211 #endif
1212         raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1213 }
1214
1215 void print_normal_accesses()
1216 {
1217         model_print("store 8  count: %u\n", store8_count);
1218         model_print("store 16 count: %u\n", store16_count);
1219         model_print("store 32 count: %u\n", store32_count);
1220         model_print("store 64 count: %u\n", store64_count);
1221
1222         model_print("load  8  count: %u\n", load8_count);
1223         model_print("load  16 count: %u\n", load16_count);
1224         model_print("load  32 count: %u\n", load32_count);
1225         model_print("load  64 count: %u\n", load64_count);
1226 }