efafc311de0f141e5d8933099719201ac2654a1b
[c11tester.git] / datarace.cc
1 #include "datarace.h"
2 #include "model.h"
3 #include "threads-model.h"
4 #include <stdio.h>
5 #include <cstring>
6 #include "mymemory.h"
7 #include "clockvector.h"
8 #include "config.h"
9 #include "action.h"
10 #include "execution.h"
11 #include "stl-model.h"
12 #include <execinfo.h>
13
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
18
19 static const ModelExecution * get_execution()
20 {
21         return model->get_execution();
22 }
23
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
26 {
27         root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28         memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29         memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30         raceset = new RaceSet();
31 }
32
33 void * table_calloc(size_t size)
34 {
35         if ((((char *)memory_base) + size) > memory_top) {
36                 return snapshot_calloc(size, 1);
37         } else {
38                 void *tmp = memory_base;
39                 memory_base = ((char *)memory_base) + size;
40                 return tmp;
41         }
42 }
43
44 /** This function looks up the entry in the shadow table corresponding to a
45  * given address.*/
46 static uint64_t * lookupAddressEntry(const void *address)
47 {
48         struct ShadowTable *currtable = root;
49 #if BIT48
50         currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51         if (currtable == NULL) {
52                 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
53         }
54 #endif
55
56         struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57         if (basetable == NULL) {
58                 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
59         }
60         return &basetable->array[((uintptr_t)address) & MASK16BIT];
61 }
62
63
64 bool hasNonAtomicStore(const void *address) {
65         uint64_t * shadow = lookupAddressEntry(address);
66         uint64_t shadowval = *shadow;
67         if (ISSHORTRECORD(shadowval)) {
68                 //Do we have a non atomic write with a non-zero clock
69                 return !(ATOMICMASK & shadowval);
70         } else {
71                 if (shadowval == 0)
72                         return true;
73                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74                 return !record->isAtomic;
75         }
76 }
77
78 void setAtomicStoreFlag(const void *address) {
79         uint64_t * shadow = lookupAddressEntry(address);
80         uint64_t shadowval = *shadow;
81         if (ISSHORTRECORD(shadowval)) {
82                 *shadow = shadowval | ATOMICMASK;
83         } else {
84                 if (shadowval == 0) {
85                         *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
86                         return;
87                 }
88                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
89                 record->isAtomic = 1;
90         }
91 }
92
93 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
94         uint64_t * shadow = lookupAddressEntry(address);
95         uint64_t shadowval = *shadow;
96         if (ISSHORTRECORD(shadowval) || shadowval == 0) {
97                 //Do we have a non atomic write with a non-zero clock
98                 *thread = WRTHREADID(shadowval);
99                 *clock = WRITEVECTOR(shadowval);
100         } else {
101                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
102                 *thread = record->writeThread;
103                 *clock = record->writeClock;
104         }
105 }
106
107 /**
108  * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
109  * to check the potential for a data race.
110  * @param clock1 The current clock vector
111  * @param tid1 The current thread; paired with clock1
112  * @param clock2 The clock value for the potentially-racing action
113  * @param tid2 The thread ID for the potentially-racing action
114  * @return true if the current clock allows a race with the event at clock2/tid2
115  */
116 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
117                                                                                                          modelclock_t clock2, thread_id_t tid2)
118 {
119         return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
120 }
121
122 /**
123  * Expands a record from the compact form to the full form.  This is
124  * necessary for multiple readers or for very large thread ids or time
125  * stamps. */
126 static void expandRecord(uint64_t *shadow)
127 {
128         uint64_t shadowval = *shadow;
129
130         modelclock_t readClock = READVECTOR(shadowval);
131         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
132         modelclock_t writeClock = WRITEVECTOR(shadowval);
133         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
134
135         struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
136         record->writeThread = writeThread;
137         record->writeClock = writeClock;
138
139         if (readClock != 0) {
140                 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
141                 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
142                 record->numReads = 1;
143                 ASSERT(readThread >= 0);
144                 record->thread[0] = readThread;
145                 record->readClock[0] = readClock;
146         }
147         if (shadowval & ATOMICMASK)
148                 record->isAtomic = 1;
149         *shadow = (uint64_t) record;
150 }
151
152 #define FIRST_STACK_FRAME 2
153
154 unsigned int race_hash(struct DataRace *race) {
155         unsigned int hash = 0;
156         for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
157                 hash ^= ((uintptr_t)race->backtrace[i]);
158                 hash = (hash >> 3) | (hash << 29);
159         }
160         return hash;
161 }
162
163
164 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
165         if (r1->numframes != r2->numframes)
166                 return false;
167         for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
168                 if (r1->backtrace[i] != r2->backtrace[i])
169                         return false;
170         }
171         return true;
172 }
173
174 /** This function is called when we detect a data race.*/
175 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
176 {
177         struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
178         race->oldthread = oldthread;
179         race->oldclock = oldclock;
180         race->isoldwrite = isoldwrite;
181         race->newaction = newaction;
182         race->isnewwrite = isnewwrite;
183         race->address = address;
184         return race;
185 }
186
187 /**
188  * @brief Assert a data race
189  *
190  * Asserts a data race which is currently realized, causing the execution to
191  * end and stashing a message in the model-checker's bug list
192  *
193  * @param race The race to report
194  */
195 void assert_race(struct DataRace *race)
196 {
197         model_print("Race detected at location: \n");
198         backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
199         model_print("\nData race detected @ address %p:\n"
200                                                         "    Access 1: %5s in thread %2d @ clock %3u\n"
201                                                         "    Access 2: %5s in thread %2d @ clock %3u\n\n",
202                                                         race->address,
203                                                         race->isoldwrite ? "write" : "read",
204                                                         id_to_int(race->oldthread),
205                                                         race->oldclock,
206                                                         race->isnewwrite ? "write" : "read",
207                                                         id_to_int(race->newaction->get_tid()),
208                                                         race->newaction->get_seq_number()
209                                                         );
210 }
211
212 /** This function does race detection for a write on an expanded record. */
213 struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
214 {
215         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
216         struct DataRace * race = NULL;
217
218         /* Check for datarace against last read. */
219
220         for (int i = 0;i < record->numReads;i++) {
221                 modelclock_t readClock = record->readClock[i];
222                 thread_id_t readThread = record->thread[i];
223
224                 /* Note that readClock can't actuall be zero here, so it could be
225                          optimized. */
226
227                 if (clock_may_race(currClock, thread, readClock, readThread)) {
228                         /* We have a datarace */
229                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
230                         goto Exit;
231                 }
232         }
233
234         /* Check for datarace against last write. */
235
236         {
237                 modelclock_t writeClock = record->writeClock;
238                 thread_id_t writeThread = record->writeThread;
239
240                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
241                         /* We have a datarace */
242                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
243                         goto Exit;
244                 }
245         }
246 Exit:
247         record->numReads = 0;
248         record->writeThread = thread;
249         record->isAtomic = 0;
250         modelclock_t ourClock = currClock->getClock(thread);
251         record->writeClock = ourClock;
252         return race;
253 }
254
255 /** This function does race detection on a write. */
256 void raceCheckWrite(thread_id_t thread, void *location)
257 {
258         uint64_t *shadow = lookupAddressEntry(location);
259         uint64_t shadowval = *shadow;
260         ClockVector *currClock = get_execution()->get_cv(thread);
261         if (currClock == NULL)
262                 return;
263
264         struct DataRace * race = NULL;
265         /* Do full record */
266         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
267                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
268                 goto Exit;
269         }
270
271         {
272                 int threadid = id_to_int(thread);
273                 modelclock_t ourClock = currClock->getClock(thread);
274
275                 /* Thread ID is too large or clock is too large. */
276                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
277                         expandRecord(shadow);
278                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
279                         goto Exit;
280                 }
281
282
283
284                 {
285                         /* Check for datarace against last read. */
286                         modelclock_t readClock = READVECTOR(shadowval);
287                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
288
289                         if (clock_may_race(currClock, thread, readClock, readThread)) {
290                                 /* We have a datarace */
291                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
292                                 goto ShadowExit;
293                         }
294                 }
295
296                 {
297                         /* Check for datarace against last write. */
298
299                         modelclock_t writeClock = WRITEVECTOR(shadowval);
300                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
301
302                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
303                                 /* We have a datarace */
304                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
305                                 goto ShadowExit;
306                         }
307                 }
308
309 ShadowExit:
310                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
311         }
312
313 Exit:
314         if (race) {
315                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
316                 if (raceset->add(race))
317                         assert_race(race);
318                 else model_free(race);
319         }
320 }
321
322
323 /** This function does race detection for a write on an expanded record. */
324 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
325 {
326         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
327         struct DataRace * race = NULL;
328
329         if (record->isAtomic)
330                 goto Exit;
331
332         /* Check for datarace against last read. */
333
334         for (int i = 0;i < record->numReads;i++) {
335                 modelclock_t readClock = record->readClock[i];
336                 thread_id_t readThread = record->thread[i];
337
338                 /* Note that readClock can't actuall be zero here, so it could be
339                          optimized. */
340
341                 if (clock_may_race(currClock, thread, readClock, readThread)) {
342                         /* We have a datarace */
343                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
344                         goto Exit;
345                 }
346         }
347
348         /* Check for datarace against last write. */
349
350         {
351                 modelclock_t writeClock = record->writeClock;
352                 thread_id_t writeThread = record->writeThread;
353
354                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
355                         /* We have a datarace */
356                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
357                         goto Exit;
358                 }
359         }
360 Exit:
361         record->numReads = 0;
362         record->writeThread = thread;
363         record->isAtomic = 1;
364         modelclock_t ourClock = currClock->getClock(thread);
365         record->writeClock = ourClock;
366         return race;
367 }
368
369 /** This function does race detection on a write. */
370 void atomraceCheckWrite(thread_id_t thread, void *location)
371 {
372         uint64_t *shadow = lookupAddressEntry(location);
373         uint64_t shadowval = *shadow;
374         ClockVector *currClock = get_execution()->get_cv(thread);
375         if (currClock == NULL)
376                 return;
377
378         struct DataRace * race = NULL;
379         /* Do full record */
380         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
381                 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
382                 goto Exit;
383         }
384
385         {
386                 int threadid = id_to_int(thread);
387                 modelclock_t ourClock = currClock->getClock(thread);
388
389                 /* Thread ID is too large or clock is too large. */
390                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
391                         expandRecord(shadow);
392                         race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
393                         goto Exit;
394                 }
395
396                 /* Can't race with atomic */
397                 if (shadowval & ATOMICMASK)
398                         goto ShadowExit;
399
400                 {
401                         /* Check for datarace against last read. */
402
403                         modelclock_t readClock = READVECTOR(shadowval);
404                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
405
406                         if (clock_may_race(currClock, thread, readClock, readThread)) {
407                                 /* We have a datarace */
408                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
409                                 goto ShadowExit;
410                         }
411                 }
412
413                 {
414                         /* Check for datarace against last write. */
415
416                         modelclock_t writeClock = WRITEVECTOR(shadowval);
417                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
418
419                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
420                                 /* We have a datarace */
421                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
422                                 goto ShadowExit;
423                         }
424                 }
425
426 ShadowExit:
427                 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
428         }
429
430 Exit:
431         if (race) {
432                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
433                 if (raceset->add(race))
434                         assert_race(race);
435                 else model_free(race);
436         }
437 }
438
439 /** This function does race detection for a write on an expanded record. */
440 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
441         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
442         record->numReads = 0;
443         record->writeThread = thread;
444         modelclock_t ourClock = currClock->getClock(thread);
445         record->writeClock = ourClock;
446         record->isAtomic = 1;
447 }
448
449 /** This function does race detection for a write on an expanded record. */
450 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
451         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
452         record->numReads = 0;
453         record->writeThread = thread;
454         modelclock_t ourClock = currClock->getClock(thread);
455         record->writeClock = ourClock;
456         record->isAtomic = 0;
457 }
458
459 /** This function just updates metadata on atomic write. */
460 void recordWrite(thread_id_t thread, void *location) {
461         uint64_t *shadow = lookupAddressEntry(location);
462         uint64_t shadowval = *shadow;
463         ClockVector *currClock = get_execution()->get_cv(thread);
464         /* Do full record */
465         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
466                 fullRecordWrite(thread, location, shadow, currClock);
467                 return;
468         }
469
470         int threadid = id_to_int(thread);
471         modelclock_t ourClock = currClock->getClock(thread);
472
473         /* Thread ID is too large or clock is too large. */
474         if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
475                 expandRecord(shadow);
476                 fullRecordWrite(thread, location, shadow, currClock);
477                 return;
478         }
479
480         *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
481 }
482
483 /** This function just updates metadata on atomic write. */
484 void recordCalloc(void *location, size_t size) {
485         thread_id_t thread = thread_current()->get_id();
486         for(;size != 0;size--) {
487                 uint64_t *shadow = lookupAddressEntry(location);
488                 uint64_t shadowval = *shadow;
489                 ClockVector *currClock = get_execution()->get_cv(thread);
490                 /* Do full record */
491                 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
492                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
493                         return;
494                 }
495
496                 int threadid = id_to_int(thread);
497                 modelclock_t ourClock = currClock->getClock(thread);
498
499                 /* Thread ID is too large or clock is too large. */
500                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
501                         expandRecord(shadow);
502                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
503                         return;
504                 }
505
506                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
507                 location = (void *)(((char *) location) + 1);
508         }
509 }
510
511
512
513 /** This function does race detection on a read for an expanded record. */
514 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
515 {
516         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
517         struct DataRace * race = NULL;
518         /* Check for datarace against last write. */
519
520         modelclock_t writeClock = record->writeClock;
521         thread_id_t writeThread = record->writeThread;
522
523         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
524                 /* We have a datarace */
525                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
526         }
527
528         /* Shorten vector when possible */
529
530         int copytoindex = 0;
531
532         for (int i = 0;i < record->numReads;i++) {
533                 modelclock_t readClock = record->readClock[i];
534                 thread_id_t readThread = record->thread[i];
535
536                 /*  Note that is not really a datarace check as reads cannot
537                                 actually race.  It is just determining that this read subsumes
538                                 another in the sense that either this read races or neither
539                                 read races. Note that readClock can't actually be zero, so it
540                                 could be optimized.  */
541
542                 if (clock_may_race(currClock, thread, readClock, readThread)) {
543                         /* Still need this read in vector */
544                         if (copytoindex != i) {
545                                 ASSERT(record->thread[i] >= 0);
546                                 record->readClock[copytoindex] = record->readClock[i];
547                                 record->thread[copytoindex] = record->thread[i];
548                         }
549                         copytoindex++;
550                 }
551         }
552
553         if (__builtin_popcount(copytoindex) <= 1) {
554                 if (copytoindex == 0) {
555                         int newCapacity = INITCAPACITY;
556                         record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
557                         record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
558                 } else if (copytoindex>=INITCAPACITY) {
559                         int newCapacity = copytoindex * 2;
560                         thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
561                         modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
562                         std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
563                         std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
564                         snapshot_free(record->readClock);
565                         snapshot_free(record->thread);
566                         record->readClock = newreadClock;
567                         record->thread = newthread;
568                 }
569         }
570
571         modelclock_t ourClock = currClock->getClock(thread);
572
573         ASSERT(thread >= 0);
574         record->thread[copytoindex] = thread;
575         record->readClock[copytoindex] = ourClock;
576         record->numReads = copytoindex + 1;
577         return race;
578 }
579
580 /** This function does race detection on a read. */
581 void raceCheckRead(thread_id_t thread, const void *location)
582 {
583         uint64_t *shadow = lookupAddressEntry(location);
584         uint64_t shadowval = *shadow;
585         ClockVector *currClock = get_execution()->get_cv(thread);
586         if (currClock == NULL)
587                 return;
588
589         struct DataRace * race = NULL;
590
591         /* Do full record */
592         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
593                 race = fullRaceCheckRead(thread, location, shadow, currClock);
594                 goto Exit;
595         }
596
597         {
598                 int threadid = id_to_int(thread);
599                 modelclock_t ourClock = currClock->getClock(thread);
600
601                 /* Thread ID is too large or clock is too large. */
602                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
603                         expandRecord(shadow);
604                         race = fullRaceCheckRead(thread, location, shadow, currClock);
605                         goto Exit;
606                 }
607
608                 /* Check for datarace against last write. */
609
610                 modelclock_t writeClock = WRITEVECTOR(shadowval);
611                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
612
613                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
614                         /* We have a datarace */
615                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
616                         goto ShadowExit;
617                 }
618
619 ShadowExit:
620                 {
621                         modelclock_t readClock = READVECTOR(shadowval);
622                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
623
624                         if (clock_may_race(currClock, thread, readClock, readThread)) {
625                                 /* We don't subsume this read... Have to expand record. */
626                                 expandRecord(shadow);
627                                 fullRaceCheckRead(thread, location, shadow, currClock);
628                                 goto Exit;
629                         }
630                 }
631
632                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
633         }
634 Exit:
635         if (race) {
636                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
637                 if (raceset->add(race))
638                         assert_race(race);
639                 else model_free(race);
640         }
641 }
642
643
644 /** This function does race detection on a read for an expanded record. */
645 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
646 {
647         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
648         struct DataRace * race = NULL;
649         /* Check for datarace against last write. */
650         if (record->isAtomic)
651                 return NULL;
652
653         modelclock_t writeClock = record->writeClock;
654         thread_id_t writeThread = record->writeThread;
655
656         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
657                 /* We have a datarace */
658                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
659         }
660         return race;
661 }
662
663 /** This function does race detection on a read. */
664 void atomraceCheckRead(thread_id_t thread, const void *location)
665 {
666         uint64_t *shadow = lookupAddressEntry(location);
667         uint64_t shadowval = *shadow;
668         ClockVector *currClock = get_execution()->get_cv(thread);
669         if (currClock == NULL)
670                 return;
671
672         struct DataRace * race = NULL;
673
674         /* Do full record */
675         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
676                 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
677                 goto Exit;
678         }
679
680         if (shadowval & ATOMICMASK)
681                 return;
682
683         {
684                 /* Check for datarace against last write. */
685
686                 modelclock_t writeClock = WRITEVECTOR(shadowval);
687                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
688
689                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
690                         /* We have a datarace */
691                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
692                         goto Exit;
693                 }
694
695
696         }
697 Exit:
698         if (race) {
699                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
700                 if (raceset->add(race))
701                         assert_race(race);
702                 else model_free(race);
703         }
704 }
705
706 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
707 {
708         uint64_t *shadow = lookupAddressEntry(location);
709         uint64_t shadowval = *shadow;
710
711         ClockVector *currClock = get_execution()->get_cv(thread);
712         if (currClock == NULL)
713                 return shadow;
714
715         struct DataRace * race = NULL;
716
717         /* Do full record */
718         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
719                 race = fullRaceCheckRead(thread, location, shadow, currClock);
720                 goto Exit;
721         }
722
723         {
724                 int threadid = id_to_int(thread);
725                 modelclock_t ourClock = currClock->getClock(thread);
726
727                 /* Thread ID is too large or clock is too large. */
728                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
729                         expandRecord(shadow);
730                         race = fullRaceCheckRead(thread, location, shadow, currClock);
731                         goto Exit;
732                 }
733
734                 /* Check for datarace against last write. */
735
736                 modelclock_t writeClock = WRITEVECTOR(shadowval);
737                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
738
739                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
740                         /* We have a datarace */
741                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
742                 }
743
744                 modelclock_t readClock = READVECTOR(shadowval);
745                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
746
747                 if (clock_may_race(currClock, thread, readClock, readThread)) {
748                         /* We don't subsume this read... Have to expand record. */
749                         expandRecord(shadow);
750                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
751                         record->thread[1] = thread;
752                         record->readClock[1] = ourClock;
753                         record->numReads++;
754
755                         goto Exit;
756                 }
757
758                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
759
760                 *old_val = shadowval;
761                 *new_val = *shadow;
762         }
763 Exit:
764         if (race) {
765                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
766                 if (raceset->add(race))
767                         assert_race(race);
768                 else model_free(race);
769         }
770
771         return shadow;
772 }
773
774 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location, uint64_t first_shadowval, uint64_t updated_shadowval, uint64_t * _shadow, bool fast_path)
775 {
776         uint64_t *shadow;
777         if (fast_path)
778                 shadow = _shadow;
779         else
780                 shadow = lookupAddressEntry(location);
781
782         uint64_t shadowval = *shadow;
783
784         if (shadowval == first_shadowval) {
785                 *shadow = updated_shadowval;
786                 return;
787         }
788
789         ClockVector *currClock = get_execution()->get_cv(thread);
790         if (currClock == NULL)
791                 return;
792
793         struct DataRace * race = NULL;
794
795         /* Do full record */
796         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
797                 race = fullRaceCheckRead(thread, location, shadow, currClock);
798                 goto Exit;
799         }
800
801         {
802                 int threadid = id_to_int(thread);
803                 modelclock_t ourClock = currClock->getClock(thread);
804
805                 /* Thread ID is too large or clock is too large. */
806                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
807                         expandRecord(shadow);
808                         race = fullRaceCheckRead(thread, location, shadow, currClock);
809                         goto Exit;
810                 }
811
812                 /* Check for datarace against last write. */
813                 modelclock_t writeClock = WRITEVECTOR(shadowval);
814                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
815
816                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
817                         /* We have a datarace */
818                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
819                 }
820
821                 modelclock_t readClock = READVECTOR(shadowval);
822                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
823
824                 if (clock_may_race(currClock, thread, readClock, readThread)) {
825                         /* We don't subsume this read... Have to expand record. */
826                         expandRecord(shadow);
827                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
828                         record->thread[1] = thread;
829                         record->readClock[1] = ourClock;
830                         record->numReads++;
831
832                         goto Exit;
833                 }
834
835                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
836         }
837 Exit:
838         if (race) {
839                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
840                 if (raceset->add(race))
841                         assert_race(race);
842                 else model_free(race);
843         }
844 }
845
846 void raceCheckRead64(thread_id_t thread, const void *location)
847 {
848         uint64_t old_shadowval, new_shadowval;
849         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
850         bool fast_path = CHECKBOUNDARY(location, 7);
851
852         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
853         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval, shadow + 1, fast_path);
854         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2), old_shadowval, new_shadowval, shadow + 2, fast_path);
855         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3), old_shadowval, new_shadowval, shadow + 3, fast_path);
856         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4), old_shadowval, new_shadowval, shadow + 4, fast_path);
857         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5), old_shadowval, new_shadowval, shadow + 5, fast_path);
858         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6), old_shadowval, new_shadowval, shadow + 6, fast_path);
859         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7), old_shadowval, new_shadowval, shadow + 7, fast_path);
860 }
861
862 void raceCheckRead32(thread_id_t thread, const void *location)
863 {
864         uint64_t old_shadowval, new_shadowval;
865         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
866         bool fast_path = CHECKBOUNDARY(location, 3);
867
868         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
869         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval, shadow + 1, fast_path);
870         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2), old_shadowval, new_shadowval, shadow + 2, fast_path);
871         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3), old_shadowval, new_shadowval, shadow + 3, fast_path);
872 }
873
874 void raceCheckRead16(thread_id_t thread, const void *location)
875 {
876         uint64_t old_shadowval, new_shadowval;
877         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
878         bool fast_path = CHECKBOUNDARY(location, 1);
879
880         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
881         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval, shadow + 1, fast_path);
882 }
883
884 void raceCheckRead8(thread_id_t thread, const void *location)
885 {
886         uint64_t old_shadowval, new_shadowval;
887         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
888
889         raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
890 }
891
892 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, void * location, uint64_t *old_val, uint64_t *new_val)
893 {
894         uint64_t *shadow = lookupAddressEntry(location);
895         uint64_t shadowval = *shadow;
896         ClockVector *currClock = get_execution()->get_cv(thread);
897         if (currClock == NULL)
898                 return shadow;
899
900         struct DataRace * race = NULL;
901         /* Do full record */
902         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
903                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
904                 goto Exit;
905         }
906
907         {
908                 int threadid = id_to_int(thread);
909                 modelclock_t ourClock = currClock->getClock(thread);
910
911                 /* Thread ID is too large or clock is too large. */
912                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
913                         expandRecord(shadow);
914                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
915                         goto Exit;
916                 }
917
918                 {
919                         /* Check for datarace against last read. */
920                         modelclock_t readClock = READVECTOR(shadowval);
921                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
922
923                         if (clock_may_race(currClock, thread, readClock, readThread)) {
924                                 /* We have a datarace */
925                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
926                                 goto ShadowExit;
927                         }
928                 }
929
930                 {
931                         /* Check for datarace against last write. */
932                         modelclock_t writeClock = WRITEVECTOR(shadowval);
933                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
934
935                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
936                                 /* We have a datarace */
937                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
938                                 goto ShadowExit;
939                         }
940                 }
941
942 ShadowExit:
943                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
944
945                 *old_val = shadowval;
946                 *new_val = *shadow;
947         }
948
949 Exit:
950         if (race) {
951                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
952                 if (raceset->add(race))
953                         assert_race(race);
954                 else model_free(race);
955         }
956
957         return shadow;
958 }
959
960 static inline void raceCheckWrite_otherIt(thread_id_t thread, void * location, uint64_t first_shadowval, uint64_t updated_shadowval, uint64_t * _shadow, bool fast_path)
961 {
962         uint64_t *shadow;
963         if (fast_path)
964                 shadow = _shadow;
965         else
966                 shadow = lookupAddressEntry(location);
967
968         uint64_t shadowval = *shadow;
969
970         if (shadowval == first_shadowval) {
971                 *shadow = updated_shadowval;
972                 return;
973         }
974
975         ClockVector *currClock = get_execution()->get_cv(thread);
976         if (currClock == NULL)
977                 return;
978
979         struct DataRace * race = NULL;
980         /* Do full record */
981         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
982                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
983                 goto Exit;
984         }
985
986         {
987                 int threadid = id_to_int(thread);
988                 modelclock_t ourClock = currClock->getClock(thread);
989
990                 /* Thread ID is too large or clock is too large. */
991                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
992                         expandRecord(shadow);
993                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
994                         goto Exit;
995                 }
996
997                 {
998                         /* Check for datarace against last read. */
999                         modelclock_t readClock = READVECTOR(shadowval);
1000                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1001
1002                         if (clock_may_race(currClock, thread, readClock, readThread)) {
1003                                 /* We have a datarace */
1004                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1005                                 goto ShadowExit;
1006                         }
1007                 }
1008
1009                 {
1010                         /* Check for datarace against last write. */
1011                         modelclock_t writeClock = WRITEVECTOR(shadowval);
1012                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1013
1014                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1015                                 /* We have a datarace */
1016                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1017                                 goto ShadowExit;
1018                         }
1019                 }
1020
1021 ShadowExit:
1022                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1023         }
1024
1025 Exit:
1026         if (race) {
1027                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1028                 if (raceset->add(race))
1029                         assert_race(race);
1030                 else model_free(race);
1031         }
1032 }
1033
1034 void raceCheckWrite64(thread_id_t thread, void *location)
1035 {
1036         uint64_t old_shadowval, new_shadowval;
1037         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1038         bool fast_path = CHECKBOUNDARY(location, 7);
1039
1040         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1041         raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval, shadow + 1, fast_path);
1042         raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 2), old_shadowval, new_shadowval, shadow + 2, fast_path);
1043         raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 3), old_shadowval, new_shadowval, shadow + 3, fast_path);
1044         raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 4), old_shadowval, new_shadowval, shadow + 4, fast_path);
1045         raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 5), old_shadowval, new_shadowval, shadow + 5, fast_path);
1046         raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 6), old_shadowval, new_shadowval, shadow + 6, fast_path);
1047         raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 7), old_shadowval, new_shadowval, shadow + 7, fast_path);
1048 }
1049
1050 void raceCheckWrite32(thread_id_t thread, void *location)
1051 {
1052         uint64_t old_shadowval, new_shadowval;
1053         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1054         bool fast_path = CHECKBOUNDARY(location, 3);
1055
1056         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1057         raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval, shadow + 1, fast_path);
1058         raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 2), old_shadowval, new_shadowval, shadow + 2, fast_path);
1059         raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 3), old_shadowval, new_shadowval, shadow + 3, fast_path);
1060 }
1061
1062 void raceCheckWrite16(thread_id_t thread, void *location)
1063 {
1064         uint64_t old_shadowval, new_shadowval;
1065         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1066         bool fast_path = CHECKBOUNDARY(location, 1);
1067
1068         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1069         raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval, shadow + 1, fast_path);
1070 }
1071
1072 void raceCheckWrite8(thread_id_t thread, void *location)
1073 {
1074         uint64_t old_shadowval, new_shadowval;
1075         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1076
1077         raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1078 }