Count some statistics
[c11tester.git] / datarace.cc
1 #include "datarace.h"
2 #include "model.h"
3 #include "threads-model.h"
4 #include <stdio.h>
5 #include <cstring>
6 #include "mymemory.h"
7 #include "clockvector.h"
8 #include "config.h"
9 #include "action.h"
10 #include "execution.h"
11 #include "stl-model.h"
12 #include <execinfo.h>
13
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
18
19 static unsigned int store8_count = 0;
20 static unsigned int store16_count = 0;
21 static unsigned int store32_count = 0;
22 static unsigned int store64_count = 0;
23
24 static unsigned int load8_count = 0;
25 static unsigned int load16_count = 0;
26 static unsigned int load32_count = 0;
27 static unsigned int load64_count = 0;
28
29 static const ModelExecution * get_execution()
30 {
31         return model->get_execution();
32 }
33
34 /** This function initialized the data race detector. */
35 void initRaceDetector()
36 {
37         root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
38         memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
39         memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
40         raceset = new RaceSet();
41 }
42
43 void * table_calloc(size_t size)
44 {
45         if ((((char *)memory_base) + size) > memory_top) {
46                 return snapshot_calloc(size, 1);
47         } else {
48                 void *tmp = memory_base;
49                 memory_base = ((char *)memory_base) + size;
50                 return tmp;
51         }
52 }
53
54 /** This function looks up the entry in the shadow table corresponding to a
55  * given address.*/
56 static uint64_t * lookupAddressEntry(const void *address)
57 {
58         struct ShadowTable *currtable = root;
59 #if BIT48
60         currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
61         if (currtable == NULL) {
62                 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
63         }
64 #endif
65
66         struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
67         if (basetable == NULL) {
68                 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
69         }
70         return &basetable->array[((uintptr_t)address) & MASK16BIT];
71 }
72
73
74 bool hasNonAtomicStore(const void *address) {
75         uint64_t * shadow = lookupAddressEntry(address);
76         uint64_t shadowval = *shadow;
77         if (ISSHORTRECORD(shadowval)) {
78                 //Do we have a non atomic write with a non-zero clock
79                 return !(ATOMICMASK & shadowval);
80         } else {
81                 if (shadowval == 0)
82                         return true;
83                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
84                 return !record->isAtomic;
85         }
86 }
87
88 void setAtomicStoreFlag(const void *address) {
89         uint64_t * shadow = lookupAddressEntry(address);
90         uint64_t shadowval = *shadow;
91         if (ISSHORTRECORD(shadowval)) {
92                 *shadow = shadowval | ATOMICMASK;
93         } else {
94                 if (shadowval == 0) {
95                         *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
96                         return;
97                 }
98                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
99                 record->isAtomic = 1;
100         }
101 }
102
103 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
104         uint64_t * shadow = lookupAddressEntry(address);
105         uint64_t shadowval = *shadow;
106         if (ISSHORTRECORD(shadowval) || shadowval == 0) {
107                 //Do we have a non atomic write with a non-zero clock
108                 *thread = WRTHREADID(shadowval);
109                 *clock = WRITEVECTOR(shadowval);
110         } else {
111                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
112                 *thread = record->writeThread;
113                 *clock = record->writeClock;
114         }
115 }
116
117 /**
118  * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
119  * to check the potential for a data race.
120  * @param clock1 The current clock vector
121  * @param tid1 The current thread; paired with clock1
122  * @param clock2 The clock value for the potentially-racing action
123  * @param tid2 The thread ID for the potentially-racing action
124  * @return true if the current clock allows a race with the event at clock2/tid2
125  */
126 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
127                                                                                                          modelclock_t clock2, thread_id_t tid2)
128 {
129         return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
130 }
131
132 /**
133  * Expands a record from the compact form to the full form.  This is
134  * necessary for multiple readers or for very large thread ids or time
135  * stamps. */
136 static void expandRecord(uint64_t *shadow)
137 {
138         uint64_t shadowval = *shadow;
139
140         modelclock_t readClock = READVECTOR(shadowval);
141         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
142         modelclock_t writeClock = WRITEVECTOR(shadowval);
143         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
144
145         struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
146         record->writeThread = writeThread;
147         record->writeClock = writeClock;
148
149         if (readClock != 0) {
150                 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
151                 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
152                 record->numReads = 1;
153                 ASSERT(readThread >= 0);
154                 record->thread[0] = readThread;
155                 record->readClock[0] = readClock;
156         }
157         if (shadowval & ATOMICMASK)
158                 record->isAtomic = 1;
159         *shadow = (uint64_t) record;
160 }
161
162 #define FIRST_STACK_FRAME 2
163
164 unsigned int race_hash(struct DataRace *race) {
165         unsigned int hash = 0;
166         for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
167                 hash ^= ((uintptr_t)race->backtrace[i]);
168                 hash = (hash >> 3) | (hash << 29);
169         }
170         return hash;
171 }
172
173 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
174         if (r1->numframes != r2->numframes)
175                 return false;
176         for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
177                 if (r1->backtrace[i] != r2->backtrace[i])
178                         return false;
179         }
180         return true;
181 }
182
183 /** This function is called when we detect a data race.*/
184 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
185 {
186 #ifdef REPORT_DATA_RACES
187         struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
188         race->oldthread = oldthread;
189         race->oldclock = oldclock;
190         race->isoldwrite = isoldwrite;
191         race->newaction = newaction;
192         race->isnewwrite = isnewwrite;
193         race->address = address;
194         return race;
195 #else
196         return NULL;
197 #endif
198 }
199
200 /**
201  * @brief Assert a data race
202  *
203  * Asserts a data race which is currently realized, causing the execution to
204  * end and stashing a message in the model-checker's bug list
205  *
206  * @param race The race to report
207  */
208 void assert_race(struct DataRace *race)
209 {
210         model_print("Race detected at location: \n");
211         backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
212         model_print("\nData race detected @ address %p:\n"
213                                                         "    Access 1: %5s in thread %2d @ clock %3u\n"
214                                                         "    Access 2: %5s in thread %2d @ clock %3u\n\n",
215                                                         race->address,
216                                                         race->isoldwrite ? "write" : "read",
217                                                         id_to_int(race->oldthread),
218                                                         race->oldclock,
219                                                         race->isnewwrite ? "write" : "read",
220                                                         id_to_int(race->newaction->get_tid()),
221                                                         race->newaction->get_seq_number()
222                                                         );
223 }
224
225 /** This function does race detection for a write on an expanded record. */
226 struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
227 {
228         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
229         struct DataRace * race = NULL;
230
231         /* Check for datarace against last read. */
232
233         for (int i = 0;i < record->numReads;i++) {
234                 modelclock_t readClock = record->readClock[i];
235                 thread_id_t readThread = record->thread[i];
236
237                 /* Note that readClock can't actuall be zero here, so it could be
238                          optimized. */
239
240                 if (clock_may_race(currClock, thread, readClock, readThread)) {
241                         /* We have a datarace */
242                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
243                         goto Exit;
244                 }
245         }
246
247         /* Check for datarace against last write. */
248         {
249                 modelclock_t writeClock = record->writeClock;
250                 thread_id_t writeThread = record->writeThread;
251
252                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
253                         /* We have a datarace */
254                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
255                         goto Exit;
256                 }
257         }
258 Exit:
259         record->numReads = 0;
260         record->writeThread = thread;
261         record->isAtomic = 0;
262         modelclock_t ourClock = currClock->getClock(thread);
263         record->writeClock = ourClock;
264         return race;
265 }
266
267 /** This function does race detection on a write. */
268 void raceCheckWrite(thread_id_t thread, void *location)
269 {
270         uint64_t *shadow = lookupAddressEntry(location);
271         uint64_t shadowval = *shadow;
272         ClockVector *currClock = get_execution()->get_cv(thread);
273         if (currClock == NULL)
274                 return;
275
276         struct DataRace * race = NULL;
277         /* Do full record */
278         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
279                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
280                 goto Exit;
281         }
282
283         {
284                 int threadid = id_to_int(thread);
285                 modelclock_t ourClock = currClock->getClock(thread);
286
287                 /* Thread ID is too large or clock is too large. */
288                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
289                         expandRecord(shadow);
290                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
291                         goto Exit;
292                 }
293
294                 {
295                         /* Check for datarace against last read. */
296                         modelclock_t readClock = READVECTOR(shadowval);
297                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
298
299                         if (clock_may_race(currClock, thread, readClock, readThread)) {
300                                 /* We have a datarace */
301                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
302                                 goto ShadowExit;
303                         }
304                 }
305
306                 {
307                         /* Check for datarace against last write. */
308                         modelclock_t writeClock = WRITEVECTOR(shadowval);
309                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
310
311                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
312                                 /* We have a datarace */
313                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
314                                 goto ShadowExit;
315                         }
316                 }
317
318 ShadowExit:
319                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
320         }
321
322 Exit:
323         if (race) {
324 #ifdef REPORT_DATA_RACES
325                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
326                 if (raceset->add(race))
327                         assert_race(race);
328                 else model_free(race);
329 #else
330                 model_free(race);
331 #endif
332         }
333 }
334
335 /** This function does race detection for a write on an expanded record. */
336 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
337 {
338         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
339         struct DataRace * race = NULL;
340
341         if (record->isAtomic)
342                 goto Exit;
343
344         /* Check for datarace against last read. */
345
346         for (int i = 0;i < record->numReads;i++) {
347                 modelclock_t readClock = record->readClock[i];
348                 thread_id_t readThread = record->thread[i];
349
350                 /* Note that readClock can't actuall be zero here, so it could be
351                          optimized. */
352
353                 if (clock_may_race(currClock, thread, readClock, readThread)) {
354                         /* We have a datarace */
355                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
356                         goto Exit;
357                 }
358         }
359
360         /* Check for datarace against last write. */
361
362         {
363                 modelclock_t writeClock = record->writeClock;
364                 thread_id_t writeThread = record->writeThread;
365
366                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
367                         /* We have a datarace */
368                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
369                         goto Exit;
370                 }
371         }
372 Exit:
373         record->numReads = 0;
374         record->writeThread = thread;
375         record->isAtomic = 1;
376         modelclock_t ourClock = currClock->getClock(thread);
377         record->writeClock = ourClock;
378         return race;
379 }
380
381 /** This function does race detection on a write. */
382 void atomraceCheckWrite(thread_id_t thread, void *location)
383 {
384         uint64_t *shadow = lookupAddressEntry(location);
385         uint64_t shadowval = *shadow;
386         ClockVector *currClock = get_execution()->get_cv(thread);
387         if (currClock == NULL)
388                 return;
389
390         struct DataRace * race = NULL;
391         /* Do full record */
392         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
393                 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
394                 goto Exit;
395         }
396
397         {
398                 int threadid = id_to_int(thread);
399                 modelclock_t ourClock = currClock->getClock(thread);
400
401                 /* Thread ID is too large or clock is too large. */
402                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
403                         expandRecord(shadow);
404                         race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
405                         goto Exit;
406                 }
407
408                 /* Can't race with atomic */
409                 if (shadowval & ATOMICMASK)
410                         goto ShadowExit;
411
412                 {
413                         /* Check for datarace against last read. */
414                         modelclock_t readClock = READVECTOR(shadowval);
415                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
416
417                         if (clock_may_race(currClock, thread, readClock, readThread)) {
418                                 /* We have a datarace */
419                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
420                                 goto ShadowExit;
421                         }
422                 }
423
424                 {
425                         /* Check for datarace against last write. */
426                         modelclock_t writeClock = WRITEVECTOR(shadowval);
427                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
428
429                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
430                                 /* We have a datarace */
431                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
432                                 goto ShadowExit;
433                         }
434                 }
435
436 ShadowExit:
437                 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
438         }
439
440 Exit:
441         if (race) {
442 #ifdef REPORT_DATA_RACES
443                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
444                 if (raceset->add(race))
445                         assert_race(race);
446                 else model_free(race);
447 #else
448                 model_free(race);
449 #endif
450         }
451 }
452
453 /** This function does race detection for a write on an expanded record. */
454 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
455         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
456         record->numReads = 0;
457         record->writeThread = thread;
458         modelclock_t ourClock = currClock->getClock(thread);
459         record->writeClock = ourClock;
460         record->isAtomic = 1;
461 }
462
463 /** This function does race detection for a write on an expanded record. */
464 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
465         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
466         record->numReads = 0;
467         record->writeThread = thread;
468         modelclock_t ourClock = currClock->getClock(thread);
469         record->writeClock = ourClock;
470         record->isAtomic = 0;
471 }
472
473 /** This function just updates metadata on atomic write. */
474 void recordWrite(thread_id_t thread, void *location) {
475         uint64_t *shadow = lookupAddressEntry(location);
476         uint64_t shadowval = *shadow;
477         ClockVector *currClock = get_execution()->get_cv(thread);
478         /* Do full record */
479         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
480                 fullRecordWrite(thread, location, shadow, currClock);
481                 return;
482         }
483
484         int threadid = id_to_int(thread);
485         modelclock_t ourClock = currClock->getClock(thread);
486
487         /* Thread ID is too large or clock is too large. */
488         if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
489                 expandRecord(shadow);
490                 fullRecordWrite(thread, location, shadow, currClock);
491                 return;
492         }
493
494         *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
495 }
496
497 /** This function just updates metadata on atomic write. */
498 void recordCalloc(void *location, size_t size) {
499         thread_id_t thread = thread_current()->get_id();
500         for(;size != 0;size--) {
501                 uint64_t *shadow = lookupAddressEntry(location);
502                 uint64_t shadowval = *shadow;
503                 ClockVector *currClock = get_execution()->get_cv(thread);
504                 /* Do full record */
505                 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
506                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
507                         return;
508                 }
509
510                 int threadid = id_to_int(thread);
511                 modelclock_t ourClock = currClock->getClock(thread);
512
513                 /* Thread ID is too large or clock is too large. */
514                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
515                         expandRecord(shadow);
516                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
517                         return;
518                 }
519
520                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
521                 location = (void *)(((char *) location) + 1);
522         }
523 }
524
525 /** This function does race detection on a read for an expanded record. */
526 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
527 {
528         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
529         struct DataRace * race = NULL;
530         /* Check for datarace against last write. */
531
532         modelclock_t writeClock = record->writeClock;
533         thread_id_t writeThread = record->writeThread;
534
535         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
536                 /* We have a datarace */
537                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
538         }
539
540         /* Shorten vector when possible */
541
542         int copytoindex = 0;
543
544         for (int i = 0;i < record->numReads;i++) {
545                 modelclock_t readClock = record->readClock[i];
546                 thread_id_t readThread = record->thread[i];
547
548                 /*  Note that is not really a datarace check as reads cannot
549                                 actually race.  It is just determining that this read subsumes
550                                 another in the sense that either this read races or neither
551                                 read races. Note that readClock can't actually be zero, so it
552                                 could be optimized.  */
553
554                 if (clock_may_race(currClock, thread, readClock, readThread)) {
555                         /* Still need this read in vector */
556                         if (copytoindex != i) {
557                                 ASSERT(record->thread[i] >= 0);
558                                 record->readClock[copytoindex] = record->readClock[i];
559                                 record->thread[copytoindex] = record->thread[i];
560                         }
561                         copytoindex++;
562                 }
563         }
564
565         if (__builtin_popcount(copytoindex) <= 1) {
566                 if (copytoindex == 0) {
567                         int newCapacity = INITCAPACITY;
568                         record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
569                         record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
570                 } else if (copytoindex>=INITCAPACITY) {
571                         int newCapacity = copytoindex * 2;
572                         thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
573                         modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
574                         std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
575                         std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
576                         snapshot_free(record->readClock);
577                         snapshot_free(record->thread);
578                         record->readClock = newreadClock;
579                         record->thread = newthread;
580                 }
581         }
582
583         modelclock_t ourClock = currClock->getClock(thread);
584
585         ASSERT(thread >= 0);
586         record->thread[copytoindex] = thread;
587         record->readClock[copytoindex] = ourClock;
588         record->numReads = copytoindex + 1;
589         return race;
590 }
591
592 /** This function does race detection on a read. */
593 void raceCheckRead(thread_id_t thread, const void *location)
594 {
595         uint64_t *shadow = lookupAddressEntry(location);
596         uint64_t shadowval = *shadow;
597         ClockVector *currClock = get_execution()->get_cv(thread);
598         if (currClock == NULL)
599                 return;
600
601         struct DataRace * race = NULL;
602
603         /* Do full record */
604         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
605                 race = fullRaceCheckRead(thread, location, shadow, currClock);
606                 goto Exit;
607         }
608
609         {
610                 int threadid = id_to_int(thread);
611                 modelclock_t ourClock = currClock->getClock(thread);
612
613                 /* Thread ID is too large or clock is too large. */
614                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
615                         expandRecord(shadow);
616                         race = fullRaceCheckRead(thread, location, shadow, currClock);
617                         goto Exit;
618                 }
619
620                 /* Check for datarace against last write. */
621
622                 modelclock_t writeClock = WRITEVECTOR(shadowval);
623                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
624
625                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
626                         /* We have a datarace */
627                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
628                         goto ShadowExit;
629                 }
630
631 ShadowExit:
632                 {
633                         modelclock_t readClock = READVECTOR(shadowval);
634                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
635
636                         if (clock_may_race(currClock, thread, readClock, readThread)) {
637                                 /* We don't subsume this read... Have to expand record. */
638                                 expandRecord(shadow);
639                                 fullRaceCheckRead(thread, location, shadow, currClock);
640                                 goto Exit;
641                         }
642                 }
643
644                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
645         }
646 Exit:
647         if (race) {
648 #ifdef REPORT_DATA_RACES
649                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
650                 if (raceset->add(race))
651                         assert_race(race);
652                 else model_free(race);
653 #else
654                 model_free(race);
655 #endif
656         }
657 }
658
659
660 /** This function does race detection on a read for an expanded record. */
661 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
662 {
663         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
664         struct DataRace * race = NULL;
665         /* Check for datarace against last write. */
666         if (record->isAtomic)
667                 return NULL;
668
669         modelclock_t writeClock = record->writeClock;
670         thread_id_t writeThread = record->writeThread;
671
672         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
673                 /* We have a datarace */
674                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
675         }
676         return race;
677 }
678
679 /** This function does race detection on a read. */
680 void atomraceCheckRead(thread_id_t thread, const void *location)
681 {
682         uint64_t *shadow = lookupAddressEntry(location);
683         uint64_t shadowval = *shadow;
684         ClockVector *currClock = get_execution()->get_cv(thread);
685         if (currClock == NULL)
686                 return;
687
688         struct DataRace * race = NULL;
689
690         /* Do full record */
691         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
692                 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
693                 goto Exit;
694         }
695
696         if (shadowval & ATOMICMASK)
697                 return;
698
699         {
700                 /* Check for datarace against last write. */
701                 modelclock_t writeClock = WRITEVECTOR(shadowval);
702                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
703
704                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
705                         /* We have a datarace */
706                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
707                         goto Exit;
708                 }
709
710
711         }
712 Exit:
713         if (race) {
714 #ifdef REPORT_DATA_RACES
715                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
716                 if (raceset->add(race))
717                         assert_race(race);
718                 else model_free(race);
719 #else
720                 model_free(race);
721 #endif
722         }
723 }
724
725 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
726 {
727         uint64_t *shadow = lookupAddressEntry(location);
728         uint64_t shadowval = *shadow;
729
730         ClockVector *currClock = get_execution()->get_cv(thread);
731         if (currClock == NULL)
732                 return shadow;
733
734         struct DataRace * race = NULL;
735
736         /* Do full record */
737         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
738                 race = fullRaceCheckRead(thread, location, shadow, currClock);
739                 goto Exit;
740         }
741
742         {
743                 int threadid = id_to_int(thread);
744                 modelclock_t ourClock = currClock->getClock(thread);
745
746                 /* Thread ID is too large or clock is too large. */
747                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
748                         expandRecord(shadow);
749                         race = fullRaceCheckRead(thread, location, shadow, currClock);
750                         goto Exit;
751                 }
752
753                 /* Check for datarace against last write. */
754                 modelclock_t writeClock = WRITEVECTOR(shadowval);
755                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
756
757                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
758                         /* We have a datarace */
759                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
760                 }
761
762                 modelclock_t readClock = READVECTOR(shadowval);
763                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
764
765                 if (clock_may_race(currClock, thread, readClock, readThread)) {
766                         /* We don't subsume this read... Have to expand record. */
767                         expandRecord(shadow);
768                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
769                         record->thread[1] = thread;
770                         record->readClock[1] = ourClock;
771                         record->numReads++;
772
773                         goto Exit;
774                 }
775
776                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
777
778                 *old_val = shadowval;
779                 *new_val = *shadow;
780         }
781 Exit:
782         if (race) {
783 #ifdef REPORT_DATA_RACES
784                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
785                 if (raceset->add(race))
786                         assert_race(race);
787                 else model_free(race);
788 #else
789                 model_free(race);
790 #endif
791         }
792
793         return shadow;
794 }
795
796 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
797         uint64_t *shadow = lookupAddressEntry(location);
798
799         uint64_t shadowval = *shadow;
800
801         ClockVector *currClock = get_execution()->get_cv(thread);
802         if (currClock == NULL)
803                 return;
804
805         struct DataRace * race = NULL;
806
807         /* Do full record */
808         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
809                 race = fullRaceCheckRead(thread, location, shadow, currClock);
810                 goto Exit;
811         }
812
813         {
814                 int threadid = id_to_int(thread);
815                 modelclock_t ourClock = currClock->getClock(thread);
816
817                 /* Thread ID is too large or clock is too large. */
818                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
819                         expandRecord(shadow);
820                         race = fullRaceCheckRead(thread, location, shadow, currClock);
821                         goto Exit;
822                 }
823
824                 /* Check for datarace against last write. */
825                 modelclock_t writeClock = WRITEVECTOR(shadowval);
826                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
827
828                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
829                         /* We have a datarace */
830                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
831                 }
832
833                 modelclock_t readClock = READVECTOR(shadowval);
834                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
835
836                 if (clock_may_race(currClock, thread, readClock, readThread)) {
837                         /* We don't subsume this read... Have to expand record. */
838                         expandRecord(shadow);
839                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
840                         record->thread[1] = thread;
841                         record->readClock[1] = ourClock;
842                         record->numReads++;
843
844                         goto Exit;
845                 }
846
847                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
848         }
849 Exit:
850         if (race) {
851 #ifdef REPORT_DATA_RACES
852                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
853                 if (raceset->add(race))
854                         assert_race(race);
855                 else model_free(race);
856 #else
857                 model_free(race);
858 #endif
859         }
860 }
861
862 void raceCheckRead64(thread_id_t thread, const void *location)
863 {
864         uint64_t old_shadowval, new_shadowval;
865         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
866 #ifdef COLLECT_STAT
867         load64_count++;
868 #endif
869         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
870         if (CHECKBOUNDARY(location, 7)) {
871                 if (shadow[1]==old_shadowval)
872                         shadow[1] = new_shadowval;
873                 else goto L1;
874                 if (shadow[2]==old_shadowval)
875                         shadow[2] = new_shadowval;
876                 else goto L2;
877                 if (shadow[3]==old_shadowval)
878                         shadow[3] = new_shadowval;
879                 else goto L3;
880                 if (shadow[4]==old_shadowval)
881                         shadow[4] = new_shadowval;
882                 else goto L4;
883                 if (shadow[5]==old_shadowval)
884                         shadow[5] = new_shadowval;
885                 else goto L5;
886                 if (shadow[6]==old_shadowval)
887                         shadow[6] = new_shadowval;
888                 else goto L6;
889                 if (shadow[7]==old_shadowval)
890                         shadow[7] = new_shadowval;
891                 else goto L7;
892                 return;
893         }
894
895 L1:
896         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
897 L2:
898         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
899 L3:
900         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
901 L4:
902         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
903 L5:
904         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
905 L6:
906         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
907 L7:
908         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
909 }
910
911 void raceCheckRead32(thread_id_t thread, const void *location)
912 {
913         uint64_t old_shadowval, new_shadowval;
914         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
915 #ifdef COLLECT_STAT
916         load32_count++;
917 #endif
918         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
919         if (CHECKBOUNDARY(location, 3)) {
920                 if (shadow[1]==old_shadowval)
921                         shadow[1] = new_shadowval;
922                 else goto L1;
923                 if (shadow[2]==old_shadowval)
924                         shadow[2] = new_shadowval;
925                 else goto L2;
926                 if (shadow[3]==old_shadowval)
927                         shadow[3] = new_shadowval;
928                 else goto L3;
929                 return;
930         }
931
932 L1:
933         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
934 L2:
935         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
936 L3:
937         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
938 }
939
940 void raceCheckRead16(thread_id_t thread, const void *location)
941 {
942         uint64_t old_shadowval, new_shadowval;
943         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
944 #ifdef COLLECT_STAT
945         load16_count++;
946 #endif
947         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
948         if (CHECKBOUNDARY(location, 1)) {
949                 if (shadow[1]==old_shadowval) {
950                         shadow[1] = new_shadowval;
951                         return;
952                 }
953         }
954         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
955 }
956
957 void raceCheckRead8(thread_id_t thread, const void *location)
958 {
959         uint64_t old_shadowval, new_shadowval;
960         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
961 #ifdef COLLECT_STAT
962         load8_count++;
963 #endif
964         raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
965 }
966
967 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
968 {
969         uint64_t *shadow = lookupAddressEntry(location);
970         uint64_t shadowval = *shadow;
971         ClockVector *currClock = get_execution()->get_cv(thread);
972         if (currClock == NULL)
973                 return shadow;
974
975         struct DataRace * race = NULL;
976         /* Do full record */
977         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
978                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
979                 goto Exit;
980         }
981
982         {
983                 int threadid = id_to_int(thread);
984                 modelclock_t ourClock = currClock->getClock(thread);
985
986                 /* Thread ID is too large or clock is too large. */
987                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
988                         expandRecord(shadow);
989                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
990                         goto Exit;
991                 }
992
993                 {
994                         /* Check for datarace against last read. */
995                         modelclock_t readClock = READVECTOR(shadowval);
996                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
997
998                         if (clock_may_race(currClock, thread, readClock, readThread)) {
999                                 /* We have a datarace */
1000                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1001                                 goto ShadowExit;
1002                         }
1003                 }
1004
1005                 {
1006                         /* Check for datarace against last write. */
1007                         modelclock_t writeClock = WRITEVECTOR(shadowval);
1008                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1009
1010                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1011                                 /* We have a datarace */
1012                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1013                                 goto ShadowExit;
1014                         }
1015                 }
1016
1017 ShadowExit:
1018                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1019
1020                 *old_val = shadowval;
1021                 *new_val = *shadow;
1022         }
1023
1024 Exit:
1025         if (race) {
1026 #ifdef REPORT_DATA_RACES
1027                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1028                 if (raceset->add(race))
1029                         assert_race(race);
1030                 else model_free(race);
1031 #else
1032                 model_free(race);
1033 #endif
1034         }
1035
1036         return shadow;
1037 }
1038
1039 static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
1040         uint64_t *shadow = lookupAddressEntry(location);
1041
1042         uint64_t shadowval = *shadow;
1043
1044         ClockVector *currClock = get_execution()->get_cv(thread);
1045         if (currClock == NULL)
1046                 return;
1047
1048         struct DataRace * race = NULL;
1049         /* Do full record */
1050         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1051                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1052                 goto Exit;
1053         }
1054
1055         {
1056                 int threadid = id_to_int(thread);
1057                 modelclock_t ourClock = currClock->getClock(thread);
1058
1059                 /* Thread ID is too large or clock is too large. */
1060                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1061                         expandRecord(shadow);
1062                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
1063                         goto Exit;
1064                 }
1065
1066                 {
1067                         /* Check for datarace against last read. */
1068                         modelclock_t readClock = READVECTOR(shadowval);
1069                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1070
1071                         if (clock_may_race(currClock, thread, readClock, readThread)) {
1072                                 /* We have a datarace */
1073                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1074                                 goto ShadowExit;
1075                         }
1076                 }
1077
1078                 {
1079                         /* Check for datarace against last write. */
1080                         modelclock_t writeClock = WRITEVECTOR(shadowval);
1081                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1082
1083                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1084                                 /* We have a datarace */
1085                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1086                                 goto ShadowExit;
1087                         }
1088                 }
1089
1090 ShadowExit:
1091                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1092         }
1093
1094 Exit:
1095         if (race) {
1096 #ifdef REPORT_DATA_RACES
1097                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1098                 if (raceset->add(race))
1099                         assert_race(race);
1100                 else model_free(race);
1101 #else
1102                 model_free(race);
1103 #endif
1104         }
1105 }
1106
1107 void raceCheckWrite64(thread_id_t thread, const void *location)
1108 {
1109         uint64_t old_shadowval, new_shadowval;
1110         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1111 #ifdef COLLECT_STAT
1112         store64_count++;
1113 #endif
1114         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1115         if (CHECKBOUNDARY(location, 7)) {
1116                 if (shadow[1]==old_shadowval)
1117                         shadow[1] = new_shadowval;
1118                 else goto L1;
1119                 if (shadow[2]==old_shadowval)
1120                         shadow[2] = new_shadowval;
1121                 else goto L2;
1122                 if (shadow[3]==old_shadowval)
1123                         shadow[3] = new_shadowval;
1124                 else goto L3;
1125                 if (shadow[4]==old_shadowval)
1126                         shadow[4] = new_shadowval;
1127                 else goto L4;
1128                 if (shadow[5]==old_shadowval)
1129                         shadow[5] = new_shadowval;
1130                 else goto L5;
1131                 if (shadow[6]==old_shadowval)
1132                         shadow[6] = new_shadowval;
1133                 else goto L6;
1134                 if (shadow[7]==old_shadowval)
1135                         shadow[7] = new_shadowval;
1136                 else goto L7;
1137                 return;
1138         }
1139
1140 L1:
1141         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1142 L2:
1143         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1144 L3:
1145         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1146 L4:
1147         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
1148 L5:
1149         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
1150 L6:
1151         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
1152 L7:
1153         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
1154 }
1155
1156 void raceCheckWrite32(thread_id_t thread, const void *location)
1157 {
1158         uint64_t old_shadowval, new_shadowval;
1159         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1160 #ifdef COLLECT_STAT
1161         store32_count++;
1162 #endif
1163         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1164         if (CHECKBOUNDARY(location, 3)) {
1165                 if (shadow[1]==old_shadowval)
1166                         shadow[1] = new_shadowval;
1167                 else goto L1;
1168                 if (shadow[2]==old_shadowval)
1169                         shadow[2] = new_shadowval;
1170                 else goto L2;
1171                 if (shadow[3]==old_shadowval)
1172                         shadow[3] = new_shadowval;
1173                 else goto L3;
1174                 return;
1175         }
1176
1177 L1:
1178         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1179 L2:
1180         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1181 L3:
1182         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1183 }
1184
1185 void raceCheckWrite16(thread_id_t thread, const void *location)
1186 {
1187         uint64_t old_shadowval, new_shadowval;
1188         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1189 #ifdef COLLECT_STAT
1190         store16_count++;
1191 #endif
1192
1193         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1194         if (CHECKBOUNDARY(location, 1)) {
1195                 if (shadow[1]==old_shadowval) {
1196                         shadow[1] = new_shadowval;
1197                         return;
1198                 }
1199         }
1200         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1201 }
1202
1203 void raceCheckWrite8(thread_id_t thread, const void *location)
1204 {
1205         uint64_t old_shadowval, new_shadowval;
1206         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1207 #ifdef COLLECT_STAT
1208         store8_count++;
1209 #endif
1210         raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1211 }
1212
1213 void print_normal_accesses()
1214 {
1215         model_print("store 8  count: %u\n", store8_count);
1216         model_print("store 16 count: %u\n", store16_count);
1217         model_print("store 32 count: %u\n", store32_count);
1218         model_print("store 64 count: %u\n", store64_count);
1219
1220         model_print("load  8  count: %u\n", load8_count);
1221         model_print("load  16 count: %u\n", load16_count);
1222         model_print("load  32 count: %u\n", load32_count);
1223         model_print("load  64 count: %u\n", load64_count);
1224 }