Add back model_thread; it is still needed
[c11tester.git] / datarace.cc
1 #include "datarace.h"
2 #include "model.h"
3 #include "threads-model.h"
4 #include <stdio.h>
5 #include <cstring>
6 #include "mymemory.h"
7 #include "clockvector.h"
8 #include "config.h"
9 #include "action.h"
10 #include "execution.h"
11 #include "stl-model.h"
12 #include <execinfo.h>
13
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
18
19 #ifdef COLLECT_STAT
20 static unsigned int store8_count = 0;
21 static unsigned int store16_count = 0;
22 static unsigned int store32_count = 0;
23 static unsigned int store64_count = 0;
24
25 static unsigned int load8_count = 0;
26 static unsigned int load16_count = 0;
27 static unsigned int load32_count = 0;
28 static unsigned int load64_count = 0;
29 #endif
30
31 static const ModelExecution * get_execution()
32 {
33         return model->get_execution();
34 }
35
36 /** This function initialized the data race detector. */
37 void initRaceDetector()
38 {
39         root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
40         memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
41         memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
42         raceset = new RaceSet();
43 }
44
45 void * table_calloc(size_t size)
46 {
47         if ((((char *)memory_base) + size) > memory_top) {
48                 return snapshot_calloc(size, 1);
49         } else {
50                 void *tmp = memory_base;
51                 memory_base = ((char *)memory_base) + size;
52                 return tmp;
53         }
54 }
55
56 /** This function looks up the entry in the shadow table corresponding to a
57  * given address.*/
58 static inline uint64_t * lookupAddressEntry(const void *address)
59 {
60         struct ShadowTable *currtable = root;
61 #if BIT48
62         currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
63         if (currtable == NULL) {
64                 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
65         }
66 #endif
67
68         struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
69         if (basetable == NULL) {
70                 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
71         }
72         return &basetable->array[((uintptr_t)address) & MASK16BIT];
73 }
74
75
76 bool hasNonAtomicStore(const void *address) {
77         uint64_t * shadow = lookupAddressEntry(address);
78         uint64_t shadowval = *shadow;
79         if (ISSHORTRECORD(shadowval)) {
80                 //Do we have a non atomic write with a non-zero clock
81                 return !(ATOMICMASK & shadowval);
82         } else {
83                 if (shadowval == 0)
84                         return true;
85                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
86                 return !record->isAtomic;
87         }
88 }
89
90 void setAtomicStoreFlag(const void *address) {
91         uint64_t * shadow = lookupAddressEntry(address);
92         uint64_t shadowval = *shadow;
93         if (ISSHORTRECORD(shadowval)) {
94                 *shadow = shadowval | ATOMICMASK;
95         } else {
96                 if (shadowval == 0) {
97                         *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
98                         return;
99                 }
100                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
101                 record->isAtomic = 1;
102         }
103 }
104
105 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
106         uint64_t * shadow = lookupAddressEntry(address);
107         uint64_t shadowval = *shadow;
108         if (ISSHORTRECORD(shadowval) || shadowval == 0) {
109                 //Do we have a non atomic write with a non-zero clock
110                 *thread = WRTHREADID(shadowval);
111                 *clock = WRITEVECTOR(shadowval);
112         } else {
113                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
114                 *thread = record->writeThread;
115                 *clock = record->writeClock;
116         }
117 }
118
119 /**
120  * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
121  * to check the potential for a data race.
122  * @param clock1 The current clock vector
123  * @param tid1 The current thread; paired with clock1
124  * @param clock2 The clock value for the potentially-racing action
125  * @param tid2 The thread ID for the potentially-racing action
126  * @return true if the current clock allows a race with the event at clock2/tid2
127  */
128 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
129                                                                                                          modelclock_t clock2, thread_id_t tid2)
130 {
131         return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
132 }
133
134 /**
135  * Expands a record from the compact form to the full form.  This is
136  * necessary for multiple readers or for very large thread ids or time
137  * stamps. */
138 static void expandRecord(uint64_t *shadow)
139 {
140         uint64_t shadowval = *shadow;
141
142         modelclock_t readClock = READVECTOR(shadowval);
143         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
144         modelclock_t writeClock = WRITEVECTOR(shadowval);
145         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
146
147         struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
148         record->writeThread = writeThread;
149         record->writeClock = writeClock;
150
151         if (readClock != 0) {
152                 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
153                 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
154                 record->numReads = 1;
155                 ASSERT(readThread >= 0);
156                 record->thread[0] = readThread;
157                 record->readClock[0] = readClock;
158         } else {
159                 record->thread = NULL;
160         }
161         if (shadowval & ATOMICMASK)
162                 record->isAtomic = 1;
163         *shadow = (uint64_t) record;
164 }
165
166 #define FIRST_STACK_FRAME 2
167
168 unsigned int race_hash(struct DataRace *race) {
169         unsigned int hash = 0;
170         for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
171                 hash ^= ((uintptr_t)race->backtrace[i]);
172                 hash = (hash >> 3) | (hash << 29);
173         }
174         return hash;
175 }
176
177 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
178         if (r1->numframes != r2->numframes)
179                 return false;
180         for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
181                 if (r1->backtrace[i] != r2->backtrace[i])
182                         return false;
183         }
184         return true;
185 }
186
187 /** This function is called when we detect a data race.*/
188 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
189 {
190 #ifdef REPORT_DATA_RACES
191         struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
192         race->oldthread = oldthread;
193         race->oldclock = oldclock;
194         race->isoldwrite = isoldwrite;
195         race->newaction = newaction;
196         race->isnewwrite = isnewwrite;
197         race->address = address;
198         return race;
199 #else
200         return NULL;
201 #endif
202 }
203
204 /**
205  * @brief Assert a data race
206  *
207  * Asserts a data race which is currently realized, causing the execution to
208  * end and stashing a message in the model-checker's bug list
209  *
210  * @param race The race to report
211  */
212 void assert_race(struct DataRace *race)
213 {
214         model_print("Race detected at location: \n");
215         backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
216         model_print("\nData race detected @ address %p:\n"
217                                                         "    Access 1: %5s in thread %2d @ clock %3u\n"
218                                                         "    Access 2: %5s in thread %2d @ clock %3u\n\n",
219                                                         race->address,
220                                                         race->isoldwrite ? "write" : "read",
221                                                         id_to_int(race->oldthread),
222                                                         race->oldclock,
223                                                         race->isnewwrite ? "write" : "read",
224                                                         id_to_int(race->newaction->get_tid()),
225                                                         race->newaction->get_seq_number()
226                                                         );
227 }
228
229 /** This function does race detection for a write on an expanded record. */
230 struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
231 {
232         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
233         struct DataRace * race = NULL;
234
235         /* Check for datarace against last read. */
236
237         for (int i = 0;i < record->numReads;i++) {
238                 modelclock_t readClock = record->readClock[i];
239                 thread_id_t readThread = record->thread[i];
240
241                 /* Note that readClock can't actuall be zero here, so it could be
242                          optimized. */
243
244                 if (clock_may_race(currClock, thread, readClock, readThread)) {
245                         /* We have a datarace */
246                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
247                         goto Exit;
248                 }
249         }
250
251         /* Check for datarace against last write. */
252         {
253                 modelclock_t writeClock = record->writeClock;
254                 thread_id_t writeThread = record->writeThread;
255
256                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
257                         /* We have a datarace */
258                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
259                         goto Exit;
260                 }
261         }
262 Exit:
263         record->numReads = 0;
264         record->writeThread = thread;
265         record->isAtomic = 0;
266         modelclock_t ourClock = currClock->getClock(thread);
267         record->writeClock = ourClock;
268         return race;
269 }
270
271 /** This function does race detection on a write. */
272 void raceCheckWrite(thread_id_t thread, void *location)
273 {
274         uint64_t *shadow = lookupAddressEntry(location);
275         uint64_t shadowval = *shadow;
276         ClockVector *currClock = get_execution()->get_cv(thread);
277         if (currClock == NULL)
278                 return;
279
280         struct DataRace * race = NULL;
281         /* Do full record */
282         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
283                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
284                 goto Exit;
285         }
286
287         {
288                 int threadid = id_to_int(thread);
289                 modelclock_t ourClock = currClock->getClock(thread);
290
291                 /* Thread ID is too large or clock is too large. */
292                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
293                         expandRecord(shadow);
294                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
295                         goto Exit;
296                 }
297
298                 {
299                         /* Check for datarace against last read. */
300                         modelclock_t readClock = READVECTOR(shadowval);
301                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
302
303                         if (clock_may_race(currClock, thread, readClock, readThread)) {
304                                 /* We have a datarace */
305                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
306                                 goto ShadowExit;
307                         }
308                 }
309
310                 {
311                         /* Check for datarace against last write. */
312                         modelclock_t writeClock = WRITEVECTOR(shadowval);
313                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
314
315                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
316                                 /* We have a datarace */
317                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
318                                 goto ShadowExit;
319                         }
320                 }
321
322 ShadowExit:
323                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
324         }
325
326 Exit:
327         if (race) {
328 #ifdef REPORT_DATA_RACES
329                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
330                 if (raceset->add(race))
331                         assert_race(race);
332                 else model_free(race);
333 #else
334                 model_free(race);
335 #endif
336         }
337 }
338
339 /** This function does race detection for a write on an expanded record. */
340 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
341 {
342         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
343         struct DataRace * race = NULL;
344
345         if (record->isAtomic)
346                 goto Exit;
347
348         /* Check for datarace against last read. */
349
350         for (int i = 0;i < record->numReads;i++) {
351                 modelclock_t readClock = record->readClock[i];
352                 thread_id_t readThread = record->thread[i];
353
354                 /* Note that readClock can't actuall be zero here, so it could be
355                          optimized. */
356
357                 if (clock_may_race(currClock, thread, readClock, readThread)) {
358                         /* We have a datarace */
359                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
360                         goto Exit;
361                 }
362         }
363
364         /* Check for datarace against last write. */
365
366         {
367                 modelclock_t writeClock = record->writeClock;
368                 thread_id_t writeThread = record->writeThread;
369
370                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
371                         /* We have a datarace */
372                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
373                         goto Exit;
374                 }
375         }
376 Exit:
377         record->numReads = 0;
378         record->writeThread = thread;
379         record->isAtomic = 1;
380         modelclock_t ourClock = currClock->getClock(thread);
381         record->writeClock = ourClock;
382         return race;
383 }
384
385 /** This function does race detection on a write. */
386 void atomraceCheckWrite(thread_id_t thread, void *location)
387 {
388         uint64_t *shadow = lookupAddressEntry(location);
389         uint64_t shadowval = *shadow;
390         ClockVector *currClock = get_execution()->get_cv(thread);
391         if (currClock == NULL)
392                 return;
393
394         struct DataRace * race = NULL;
395         /* Do full record */
396         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
397                 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
398                 goto Exit;
399         }
400
401         {
402                 int threadid = id_to_int(thread);
403                 modelclock_t ourClock = currClock->getClock(thread);
404
405                 /* Thread ID is too large or clock is too large. */
406                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
407                         expandRecord(shadow);
408                         race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
409                         goto Exit;
410                 }
411
412                 /* Can't race with atomic */
413                 if (shadowval & ATOMICMASK)
414                         goto ShadowExit;
415
416                 {
417                         /* Check for datarace against last read. */
418                         modelclock_t readClock = READVECTOR(shadowval);
419                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
420
421                         if (clock_may_race(currClock, thread, readClock, readThread)) {
422                                 /* We have a datarace */
423                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
424                                 goto ShadowExit;
425                         }
426                 }
427
428                 {
429                         /* Check for datarace against last write. */
430                         modelclock_t writeClock = WRITEVECTOR(shadowval);
431                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
432
433                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
434                                 /* We have a datarace */
435                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
436                                 goto ShadowExit;
437                         }
438                 }
439
440 ShadowExit:
441                 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
442         }
443
444 Exit:
445         if (race) {
446 #ifdef REPORT_DATA_RACES
447                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
448                 if (raceset->add(race))
449                         assert_race(race);
450                 else model_free(race);
451 #else
452                 model_free(race);
453 #endif
454         }
455 }
456
457 /** This function does race detection for a write on an expanded record. */
458 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
459         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
460         record->numReads = 0;
461         record->writeThread = thread;
462         modelclock_t ourClock = currClock->getClock(thread);
463         record->writeClock = ourClock;
464         record->isAtomic = 1;
465 }
466
467 /** This function does race detection for a write on an expanded record. */
468 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
469         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
470         record->numReads = 0;
471         record->writeThread = thread;
472         modelclock_t ourClock = currClock->getClock(thread);
473         record->writeClock = ourClock;
474         record->isAtomic = 0;
475 }
476
477 /** This function just updates metadata on atomic write. */
478 void recordWrite(thread_id_t thread, void *location) {
479         uint64_t *shadow = lookupAddressEntry(location);
480         uint64_t shadowval = *shadow;
481         ClockVector *currClock = get_execution()->get_cv(thread);
482         /* Do full record */
483         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
484                 fullRecordWrite(thread, location, shadow, currClock);
485                 return;
486         }
487
488         int threadid = id_to_int(thread);
489         modelclock_t ourClock = currClock->getClock(thread);
490
491         /* Thread ID is too large or clock is too large. */
492         if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
493                 expandRecord(shadow);
494                 fullRecordWrite(thread, location, shadow, currClock);
495                 return;
496         }
497
498         *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
499 }
500
501 /** This function just updates metadata on atomic write. */
502 void recordCalloc(void *location, size_t size) {
503         thread_id_t thread = thread_current_id();
504         for(;size != 0;size--) {
505                 uint64_t *shadow = lookupAddressEntry(location);
506                 uint64_t shadowval = *shadow;
507                 ClockVector *currClock = get_execution()->get_cv(thread);
508                 /* Do full record */
509                 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
510                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
511                         return;
512                 }
513
514                 int threadid = id_to_int(thread);
515                 modelclock_t ourClock = currClock->getClock(thread);
516
517                 /* Thread ID is too large or clock is too large. */
518                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
519                         expandRecord(shadow);
520                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
521                         return;
522                 }
523
524                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
525                 location = (void *)(((char *) location) + 1);
526         }
527 }
528
529 /** This function does race detection on a read for an expanded record. */
530 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
531 {
532         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
533         struct DataRace * race = NULL;
534         /* Check for datarace against last write. */
535
536         modelclock_t writeClock = record->writeClock;
537         thread_id_t writeThread = record->writeThread;
538
539         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
540                 /* We have a datarace */
541                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
542         }
543
544         /* Shorten vector when possible */
545
546         int copytoindex = 0;
547
548         for (int i = 0;i < record->numReads;i++) {
549                 modelclock_t readClock = record->readClock[i];
550                 thread_id_t readThread = record->thread[i];
551
552                 /*  Note that is not really a datarace check as reads cannot
553                                 actually race.  It is just determining that this read subsumes
554                                 another in the sense that either this read races or neither
555                                 read races. Note that readClock can't actually be zero, so it
556                                 could be optimized.  */
557
558                 if (clock_may_race(currClock, thread, readClock, readThread)) {
559                         /* Still need this read in vector */
560                         if (copytoindex != i) {
561                                 ASSERT(readThread >= 0);
562                                 record->readClock[copytoindex] = readClock;
563                                 record->thread[copytoindex] = readThread;
564                         }
565                         copytoindex++;
566                 }
567         }
568
569         if (__builtin_popcount(copytoindex) <= 1) {
570                 if (copytoindex == 0 && record->thread == NULL) {
571                         int newCapacity = INITCAPACITY;
572                         record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
573                         record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
574                 } else if (copytoindex>=INITCAPACITY) {
575                         int newCapacity = copytoindex * 2;
576                         thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
577                         modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
578                         real_memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
579                         real_memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
580                         snapshot_free(record->readClock);
581                         snapshot_free(record->thread);
582                         record->readClock = newreadClock;
583                         record->thread = newthread;
584                 }
585         }
586
587         modelclock_t ourClock = currClock->getClock(thread);
588
589         ASSERT(thread >= 0);
590         record->thread[copytoindex] = thread;
591         record->readClock[copytoindex] = ourClock;
592         record->numReads = copytoindex + 1;
593         return race;
594 }
595
596 /** This function does race detection on a read. */
597 void raceCheckRead(thread_id_t thread, const void *location)
598 {
599         uint64_t *shadow = lookupAddressEntry(location);
600         uint64_t shadowval = *shadow;
601         ClockVector *currClock = get_execution()->get_cv(thread);
602         if (currClock == NULL)
603                 return;
604
605         struct DataRace * race = NULL;
606
607         /* Do full record */
608         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
609                 race = fullRaceCheckRead(thread, location, shadow, currClock);
610                 goto Exit;
611         }
612
613         {
614                 int threadid = id_to_int(thread);
615                 modelclock_t ourClock = currClock->getClock(thread);
616
617                 /* Thread ID is too large or clock is too large. */
618                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
619                         expandRecord(shadow);
620                         race = fullRaceCheckRead(thread, location, shadow, currClock);
621                         goto Exit;
622                 }
623
624                 /* Check for datarace against last write. */
625
626                 modelclock_t writeClock = WRITEVECTOR(shadowval);
627                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
628
629                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
630                         /* We have a datarace */
631                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
632                         goto ShadowExit;
633                 }
634
635 ShadowExit:
636                 {
637                         modelclock_t readClock = READVECTOR(shadowval);
638                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
639
640                         if (clock_may_race(currClock, thread, readClock, readThread)) {
641                                 /* We don't subsume this read... Have to expand record. */
642                                 expandRecord(shadow);
643                                 fullRaceCheckRead(thread, location, shadow, currClock);
644                                 goto Exit;
645                         }
646                 }
647
648                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
649         }
650 Exit:
651         if (race) {
652 #ifdef REPORT_DATA_RACES
653                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
654                 if (raceset->add(race))
655                         assert_race(race);
656                 else model_free(race);
657 #else
658                 model_free(race);
659 #endif
660         }
661 }
662
663
664 /** This function does race detection on a read for an expanded record. */
665 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
666 {
667         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
668         struct DataRace * race = NULL;
669         /* Check for datarace against last write. */
670         if (record->isAtomic)
671                 return NULL;
672
673         modelclock_t writeClock = record->writeClock;
674         thread_id_t writeThread = record->writeThread;
675
676         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
677                 /* We have a datarace */
678                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
679         }
680         return race;
681 }
682
683 /** This function does race detection on a read. */
684 void atomraceCheckRead(thread_id_t thread, const void *location)
685 {
686         uint64_t *shadow = lookupAddressEntry(location);
687         uint64_t shadowval = *shadow;
688         ClockVector *currClock = get_execution()->get_cv(thread);
689         if (currClock == NULL)
690                 return;
691
692         struct DataRace * race = NULL;
693
694         /* Do full record */
695         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
696                 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
697                 goto Exit;
698         }
699
700         if (shadowval & ATOMICMASK)
701                 return;
702
703         {
704                 /* Check for datarace against last write. */
705                 modelclock_t writeClock = WRITEVECTOR(shadowval);
706                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
707
708                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
709                         /* We have a datarace */
710                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
711                         goto Exit;
712                 }
713
714
715         }
716 Exit:
717         if (race) {
718 #ifdef REPORT_DATA_RACES
719                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
720                 if (raceset->add(race))
721                         assert_race(race);
722                 else model_free(race);
723 #else
724                 model_free(race);
725 #endif
726         }
727 }
728
729 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
730 {
731         uint64_t *shadow = lookupAddressEntry(location);
732         uint64_t shadowval = *shadow;
733
734         ClockVector *currClock = get_execution()->get_cv(thread);
735         if (currClock == NULL)
736                 return shadow;
737
738         struct DataRace * race = NULL;
739
740         /* Do full record */
741         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
742                 race = fullRaceCheckRead(thread, location, shadow, currClock);
743                 goto Exit;
744         }
745
746         {
747                 int threadid = id_to_int(thread);
748                 modelclock_t ourClock = currClock->getClock(thread);
749
750                 /* Thread ID is too large or clock is too large. */
751                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
752                         expandRecord(shadow);
753                         race = fullRaceCheckRead(thread, location, shadow, currClock);
754                         goto Exit;
755                 }
756
757                 /* Check for datarace against last write. */
758                 modelclock_t writeClock = WRITEVECTOR(shadowval);
759                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
760
761                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
762                         /* We have a datarace */
763                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
764                 }
765
766                 modelclock_t readClock = READVECTOR(shadowval);
767                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
768
769                 if (clock_may_race(currClock, thread, readClock, readThread)) {
770                         /* We don't subsume this read... Have to expand record. */
771                         expandRecord(shadow);
772                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
773                         record->thread[1] = thread;
774                         record->readClock[1] = ourClock;
775                         record->numReads++;
776
777                         goto Exit;
778                 }
779
780                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
781
782                 *old_val = shadowval;
783                 *new_val = *shadow;
784         }
785 Exit:
786         if (race) {
787 #ifdef REPORT_DATA_RACES
788                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
789                 if (raceset->add(race))
790                         assert_race(race);
791                 else model_free(race);
792 #else
793                 model_free(race);
794 #endif
795         }
796
797         return shadow;
798 }
799
800 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
801         uint64_t *shadow = lookupAddressEntry(location);
802
803         uint64_t shadowval = *shadow;
804
805         ClockVector *currClock = get_execution()->get_cv(thread);
806         if (currClock == NULL)
807                 return;
808
809         struct DataRace * race = NULL;
810
811         /* Do full record */
812         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
813                 race = fullRaceCheckRead(thread, location, shadow, currClock);
814                 goto Exit;
815         }
816
817         {
818                 int threadid = id_to_int(thread);
819                 modelclock_t ourClock = currClock->getClock(thread);
820
821                 /* Thread ID is too large or clock is too large. */
822                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
823                         expandRecord(shadow);
824                         race = fullRaceCheckRead(thread, location, shadow, currClock);
825                         goto Exit;
826                 }
827
828                 /* Check for datarace against last write. */
829                 modelclock_t writeClock = WRITEVECTOR(shadowval);
830                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
831
832                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
833                         /* We have a datarace */
834                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
835                 }
836
837                 modelclock_t readClock = READVECTOR(shadowval);
838                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
839
840                 if (clock_may_race(currClock, thread, readClock, readThread)) {
841                         /* We don't subsume this read... Have to expand record. */
842                         expandRecord(shadow);
843                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
844                         record->thread[1] = thread;
845                         record->readClock[1] = ourClock;
846                         record->numReads++;
847
848                         goto Exit;
849                 }
850
851                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
852         }
853 Exit:
854         if (race) {
855 #ifdef REPORT_DATA_RACES
856                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
857                 if (raceset->add(race))
858                         assert_race(race);
859                 else model_free(race);
860 #else
861                 model_free(race);
862 #endif
863         }
864 }
865
866 void raceCheckRead64(thread_id_t thread, const void *location)
867 {
868         int old_flag = GET_MODEL_FLAG;
869         ENTER_MODEL_FLAG;
870
871         uint64_t old_shadowval, new_shadowval;
872         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
873 #ifdef COLLECT_STAT
874         load64_count++;
875 #endif
876         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
877         if (CHECKBOUNDARY(location, 7)) {
878                 if (shadow[1]==old_shadowval)
879                         shadow[1] = new_shadowval;
880                 else goto L1;
881                 if (shadow[2]==old_shadowval)
882                         shadow[2] = new_shadowval;
883                 else goto L2;
884                 if (shadow[3]==old_shadowval)
885                         shadow[3] = new_shadowval;
886                 else goto L3;
887                 if (shadow[4]==old_shadowval)
888                         shadow[4] = new_shadowval;
889                 else goto L4;
890                 if (shadow[5]==old_shadowval)
891                         shadow[5] = new_shadowval;
892                 else goto L5;
893                 if (shadow[6]==old_shadowval)
894                         shadow[6] = new_shadowval;
895                 else goto L6;
896                 if (shadow[7]==old_shadowval)
897                         shadow[7] = new_shadowval;
898                 else goto L7;
899                 RESTORE_MODEL_FLAG(old_flag);
900                 return;
901         }
902
903 L1:
904         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
905 L2:
906         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
907 L3:
908         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
909 L4:
910         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
911 L5:
912         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
913 L6:
914         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
915 L7:
916         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
917         RESTORE_MODEL_FLAG(old_flag);
918 }
919
920 void raceCheckRead32(thread_id_t thread, const void *location)
921 {
922         int old_flag = GET_MODEL_FLAG;
923         ENTER_MODEL_FLAG;
924
925         uint64_t old_shadowval, new_shadowval;
926         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
927 #ifdef COLLECT_STAT
928         load32_count++;
929 #endif
930         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
931         if (CHECKBOUNDARY(location, 3)) {
932                 if (shadow[1]==old_shadowval)
933                         shadow[1] = new_shadowval;
934                 else goto L1;
935                 if (shadow[2]==old_shadowval)
936                         shadow[2] = new_shadowval;
937                 else goto L2;
938                 if (shadow[3]==old_shadowval)
939                         shadow[3] = new_shadowval;
940                 else goto L3;
941                 RESTORE_MODEL_FLAG(old_flag);
942                 return;
943         }
944
945 L1:
946         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
947 L2:
948         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
949 L3:
950         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
951         RESTORE_MODEL_FLAG(old_flag);
952 }
953
954 void raceCheckRead16(thread_id_t thread, const void *location)
955 {
956         int old_flag = GET_MODEL_FLAG;
957         ENTER_MODEL_FLAG;
958
959         uint64_t old_shadowval, new_shadowval;
960         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
961 #ifdef COLLECT_STAT
962         load16_count++;
963 #endif
964         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
965         if (CHECKBOUNDARY(location, 1)) {
966                 if (shadow[1]==old_shadowval) {
967                         shadow[1] = new_shadowval;
968                         RESTORE_MODEL_FLAG(old_flag);
969                         return;
970                 }
971         }
972         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
973         RESTORE_MODEL_FLAG(old_flag);
974 }
975
976 void raceCheckRead8(thread_id_t thread, const void *location)
977 {
978         int old_flag = GET_MODEL_FLAG;
979         ENTER_MODEL_FLAG;
980
981         uint64_t old_shadowval, new_shadowval;
982         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
983 #ifdef COLLECT_STAT
984         load8_count++;
985 #endif
986         raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
987         RESTORE_MODEL_FLAG(old_flag);
988 }
989
990 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
991 {
992         uint64_t *shadow = lookupAddressEntry(location);
993         uint64_t shadowval = *shadow;
994         ClockVector *currClock = get_execution()->get_cv(thread);
995         if (currClock == NULL)
996                 return shadow;
997
998         struct DataRace * race = NULL;
999         /* Do full record */
1000         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1001                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1002                 goto Exit;
1003         }
1004
1005         {
1006                 int threadid = id_to_int(thread);
1007                 modelclock_t ourClock = currClock->getClock(thread);
1008
1009                 /* Thread ID is too large or clock is too large. */
1010                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1011                         expandRecord(shadow);
1012                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
1013                         goto Exit;
1014                 }
1015
1016                 {
1017                         /* Check for datarace against last read. */
1018                         modelclock_t readClock = READVECTOR(shadowval);
1019                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1020
1021                         if (clock_may_race(currClock, thread, readClock, readThread)) {
1022                                 /* We have a datarace */
1023                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1024                                 goto ShadowExit;
1025                         }
1026                 }
1027
1028                 {
1029                         /* Check for datarace against last write. */
1030                         modelclock_t writeClock = WRITEVECTOR(shadowval);
1031                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1032
1033                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1034                                 /* We have a datarace */
1035                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1036                                 goto ShadowExit;
1037                         }
1038                 }
1039
1040 ShadowExit:
1041                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1042
1043                 *old_val = shadowval;
1044                 *new_val = *shadow;
1045         }
1046
1047 Exit:
1048         if (race) {
1049 #ifdef REPORT_DATA_RACES
1050                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1051                 if (raceset->add(race))
1052                         assert_race(race);
1053                 else model_free(race);
1054 #else
1055                 model_free(race);
1056 #endif
1057         }
1058
1059         return shadow;
1060 }
1061
1062 static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
1063         uint64_t *shadow = lookupAddressEntry(location);
1064
1065         uint64_t shadowval = *shadow;
1066
1067         ClockVector *currClock = get_execution()->get_cv(thread);
1068         if (currClock == NULL)
1069                 return;
1070
1071         struct DataRace * race = NULL;
1072         /* Do full record */
1073         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1074                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1075                 goto Exit;
1076         }
1077
1078         {
1079                 int threadid = id_to_int(thread);
1080                 modelclock_t ourClock = currClock->getClock(thread);
1081
1082                 /* Thread ID is too large or clock is too large. */
1083                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1084                         expandRecord(shadow);
1085                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
1086                         goto Exit;
1087                 }
1088
1089                 {
1090                         /* Check for datarace against last read. */
1091                         modelclock_t readClock = READVECTOR(shadowval);
1092                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1093
1094                         if (clock_may_race(currClock, thread, readClock, readThread)) {
1095                                 /* We have a datarace */
1096                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1097                                 goto ShadowExit;
1098                         }
1099                 }
1100
1101                 {
1102                         /* Check for datarace against last write. */
1103                         modelclock_t writeClock = WRITEVECTOR(shadowval);
1104                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1105
1106                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1107                                 /* We have a datarace */
1108                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1109                                 goto ShadowExit;
1110                         }
1111                 }
1112
1113 ShadowExit:
1114                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1115         }
1116
1117 Exit:
1118         if (race) {
1119 #ifdef REPORT_DATA_RACES
1120                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1121                 if (raceset->add(race))
1122                         assert_race(race);
1123                 else model_free(race);
1124 #else
1125                 model_free(race);
1126 #endif
1127         }
1128 }
1129
1130 void raceCheckWrite64(thread_id_t thread, const void *location)
1131 {
1132         int old_flag = GET_MODEL_FLAG;
1133         ENTER_MODEL_FLAG;
1134         uint64_t old_shadowval, new_shadowval;
1135         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1136 #ifdef COLLECT_STAT
1137         store64_count++;
1138 #endif
1139         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1140         if (CHECKBOUNDARY(location, 7)) {
1141                 if (shadow[1]==old_shadowval)
1142                         shadow[1] = new_shadowval;
1143                 else goto L1;
1144                 if (shadow[2]==old_shadowval)
1145                         shadow[2] = new_shadowval;
1146                 else goto L2;
1147                 if (shadow[3]==old_shadowval)
1148                         shadow[3] = new_shadowval;
1149                 else goto L3;
1150                 if (shadow[4]==old_shadowval)
1151                         shadow[4] = new_shadowval;
1152                 else goto L4;
1153                 if (shadow[5]==old_shadowval)
1154                         shadow[5] = new_shadowval;
1155                 else goto L5;
1156                 if (shadow[6]==old_shadowval)
1157                         shadow[6] = new_shadowval;
1158                 else goto L6;
1159                 if (shadow[7]==old_shadowval)
1160                         shadow[7] = new_shadowval;
1161                 else goto L7;
1162                 RESTORE_MODEL_FLAG(old_flag);
1163                 return;
1164         }
1165
1166 L1:
1167         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1168 L2:
1169         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1170 L3:
1171         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1172 L4:
1173         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
1174 L5:
1175         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
1176 L6:
1177         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
1178 L7:
1179         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
1180         RESTORE_MODEL_FLAG(old_flag);
1181 }
1182
1183 void raceCheckWrite32(thread_id_t thread, const void *location)
1184 {
1185         int old_flag = GET_MODEL_FLAG;
1186         ENTER_MODEL_FLAG;
1187
1188         uint64_t old_shadowval, new_shadowval;
1189         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1190 #ifdef COLLECT_STAT
1191         store32_count++;
1192 #endif
1193         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1194         if (CHECKBOUNDARY(location, 3)) {
1195                 if (shadow[1]==old_shadowval)
1196                         shadow[1] = new_shadowval;
1197                 else goto L1;
1198                 if (shadow[2]==old_shadowval)
1199                         shadow[2] = new_shadowval;
1200                 else goto L2;
1201                 if (shadow[3]==old_shadowval)
1202                         shadow[3] = new_shadowval;
1203                 else goto L3;
1204                 RESTORE_MODEL_FLAG(old_flag);
1205                 return;
1206         }
1207
1208 L1:
1209         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1210 L2:
1211         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1212 L3:
1213         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1214         RESTORE_MODEL_FLAG(old_flag);
1215 }
1216
1217 void raceCheckWrite16(thread_id_t thread, const void *location)
1218 {
1219         int old_flag = GET_MODEL_FLAG;
1220         ENTER_MODEL_FLAG;
1221
1222         uint64_t old_shadowval, new_shadowval;
1223         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1224 #ifdef COLLECT_STAT
1225         store16_count++;
1226 #endif
1227
1228         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1229         if (CHECKBOUNDARY(location, 1)) {
1230                 if (shadow[1]==old_shadowval) {
1231                         shadow[1] = new_shadowval;
1232                         RESTORE_MODEL_FLAG(old_flag);
1233                         return;
1234                 }
1235         }
1236         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1237         RESTORE_MODEL_FLAG(old_flag);
1238 }
1239
1240 void raceCheckWrite8(thread_id_t thread, const void *location)
1241 {
1242         int old_flag = GET_MODEL_FLAG;
1243         ENTER_MODEL_FLAG;
1244
1245         uint64_t old_shadowval, new_shadowval;
1246         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1247 #ifdef COLLECT_STAT
1248         store8_count++;
1249 #endif
1250         raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1251         RESTORE_MODEL_FLAG(old_flag);
1252 }
1253
1254 #ifdef COLLECT_STAT
1255 void print_normal_accesses()
1256 {
1257         model_print("store 8  count: %u\n", store8_count);
1258         model_print("store 16 count: %u\n", store16_count);
1259         model_print("store 32 count: %u\n", store32_count);
1260         model_print("store 64 count: %u\n", store64_count);
1261
1262         model_print("load  8  count: %u\n", load8_count);
1263         model_print("load  16 count: %u\n", load16_count);
1264         model_print("load  32 count: %u\n", load32_count);
1265         model_print("load  64 count: %u\n", load64_count);
1266 }
1267 #endif