uncomment out code
[c11tester.git] / datarace.cc
1 #include "datarace.h"
2 #include "model.h"
3 #include "threads-model.h"
4 #include <stdio.h>
5 #include <cstring>
6 #include "mymemory.h"
7 #include "clockvector.h"
8 #include "config.h"
9 #include "action.h"
10 #include "execution.h"
11 #include "stl-model.h"
12 #include <execinfo.h>
13
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
18
19 static const ModelExecution * get_execution()
20 {
21         return model->get_execution();
22 }
23
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
26 {
27         root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28         memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29         memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30         raceset = new RaceSet();
31 }
32
33 void * table_calloc(size_t size)
34 {
35         if ((((char *)memory_base) + size) > memory_top) {
36                 return snapshot_calloc(size, 1);
37         } else {
38                 void *tmp = memory_base;
39                 memory_base = ((char *)memory_base) + size;
40                 return tmp;
41         }
42 }
43
44 /** This function looks up the entry in the shadow table corresponding to a
45  * given address.*/
46 static uint64_t * lookupAddressEntry(const void *address)
47 {
48         struct ShadowTable *currtable = root;
49 #if BIT48
50         currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51         if (currtable == NULL) {
52                 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
53         }
54 #endif
55
56         struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57         if (basetable == NULL) {
58                 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
59         }
60         return &basetable->array[((uintptr_t)address) & MASK16BIT];
61 }
62
63
64 bool hasNonAtomicStore(const void *address) {
65         uint64_t * shadow = lookupAddressEntry(address);
66         uint64_t shadowval = *shadow;
67         if (ISSHORTRECORD(shadowval)) {
68                 //Do we have a non atomic write with a non-zero clock
69                 return !(ATOMICMASK & shadowval);
70         } else {
71                 if (shadowval == 0)
72                         return true;
73                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74                 return !record->isAtomic;
75         }
76 }
77
78 void setAtomicStoreFlag(const void *address) {
79         uint64_t * shadow = lookupAddressEntry(address);
80         uint64_t shadowval = *shadow;
81         if (ISSHORTRECORD(shadowval)) {
82                 *shadow = shadowval | ATOMICMASK;
83         } else {
84                 if (shadowval == 0) {
85                         *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
86                         return;
87                 }
88                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
89                 record->isAtomic = 1;
90         }
91 }
92
93 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
94         uint64_t * shadow = lookupAddressEntry(address);
95         uint64_t shadowval = *shadow;
96         if (ISSHORTRECORD(shadowval) || shadowval == 0) {
97                 //Do we have a non atomic write with a non-zero clock
98                 *thread = WRTHREADID(shadowval);
99                 *clock = WRITEVECTOR(shadowval);
100         } else {
101                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
102                 *thread = record->writeThread;
103                 *clock = record->writeClock;
104         }
105 }
106
107 /**
108  * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
109  * to check the potential for a data race.
110  * @param clock1 The current clock vector
111  * @param tid1 The current thread; paired with clock1
112  * @param clock2 The clock value for the potentially-racing action
113  * @param tid2 The thread ID for the potentially-racing action
114  * @return true if the current clock allows a race with the event at clock2/tid2
115  */
116 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
117                                                                                                          modelclock_t clock2, thread_id_t tid2)
118 {
119         return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
120 }
121
122 /**
123  * Expands a record from the compact form to the full form.  This is
124  * necessary for multiple readers or for very large thread ids or time
125  * stamps. */
126 static void expandRecord(uint64_t *shadow)
127 {
128         uint64_t shadowval = *shadow;
129
130         modelclock_t readClock = READVECTOR(shadowval);
131         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
132         modelclock_t writeClock = WRITEVECTOR(shadowval);
133         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
134
135         struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
136         record->writeThread = writeThread;
137         record->writeClock = writeClock;
138
139         if (readClock != 0) {
140                 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
141                 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
142                 record->numReads = 1;
143                 ASSERT(readThread >= 0);
144                 record->thread[0] = readThread;
145                 record->readClock[0] = readClock;
146         }
147         if (shadowval & ATOMICMASK)
148                 record->isAtomic = 1;
149         *shadow = (uint64_t) record;
150 }
151
152 #define FIRST_STACK_FRAME 2
153
154 unsigned int race_hash(struct DataRace *race) {
155         unsigned int hash = 0;
156         for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
157                 hash ^= ((uintptr_t)race->backtrace[i]);
158                 hash = (hash >> 3) | (hash << 29);
159         }
160         return hash;
161 }
162
163
164 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
165         if (r1->numframes != r2->numframes)
166                 return false;
167         for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
168                 if (r1->backtrace[i] != r2->backtrace[i])
169                         return false;
170         }
171         return true;
172 }
173
174 /** This function is called when we detect a data race.*/
175 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
176 {
177         struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
178         race->oldthread = oldthread;
179         race->oldclock = oldclock;
180         race->isoldwrite = isoldwrite;
181         race->newaction = newaction;
182         race->isnewwrite = isnewwrite;
183         race->address = address;
184         return race;
185 }
186
187 /**
188  * @brief Assert a data race
189  *
190  * Asserts a data race which is currently realized, causing the execution to
191  * end and stashing a message in the model-checker's bug list
192  *
193  * @param race The race to report
194  */
195 void assert_race(struct DataRace *race)
196 {
197         model_print("Race detected at location: \n");
198         backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
199         model_print("\nData race detected @ address %p:\n"
200                                                         "    Access 1: %5s in thread %2d @ clock %3u\n"
201                                                         "    Access 2: %5s in thread %2d @ clock %3u\n\n",
202                                                         race->address,
203                                                         race->isoldwrite ? "write" : "read",
204                                                         id_to_int(race->oldthread),
205                                                         race->oldclock,
206                                                         race->isnewwrite ? "write" : "read",
207                                                         id_to_int(race->newaction->get_tid()),
208                                                         race->newaction->get_seq_number()
209                                                         );
210 }
211
212 /** This function does race detection for a write on an expanded record. */
213 struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
214 {
215         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
216         struct DataRace * race = NULL;
217
218         /* Check for datarace against last read. */
219
220         for (int i = 0;i < record->numReads;i++) {
221                 modelclock_t readClock = record->readClock[i];
222                 thread_id_t readThread = record->thread[i];
223
224                 /* Note that readClock can't actuall be zero here, so it could be
225                          optimized. */
226
227                 if (clock_may_race(currClock, thread, readClock, readThread)) {
228                         /* We have a datarace */
229                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
230                         goto Exit;
231                 }
232         }
233
234         /* Check for datarace against last write. */
235
236         {
237                 modelclock_t writeClock = record->writeClock;
238                 thread_id_t writeThread = record->writeThread;
239
240                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
241                         /* We have a datarace */
242                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
243                         goto Exit;
244                 }
245         }
246 Exit:
247         record->numReads = 0;
248         record->writeThread = thread;
249         record->isAtomic = 0;
250         modelclock_t ourClock = currClock->getClock(thread);
251         record->writeClock = ourClock;
252         return race;
253 }
254
255 /** This function does race detection on a write. */
256 void raceCheckWrite(thread_id_t thread, void *location)
257 {
258         uint64_t *shadow = lookupAddressEntry(location);
259         uint64_t shadowval = *shadow;
260         ClockVector *currClock = get_execution()->get_cv(thread);
261         if (currClock == NULL)
262                 return;
263
264         struct DataRace * race = NULL;
265         /* Do full record */
266         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
267                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
268                 goto Exit;
269         }
270
271         {
272                 int threadid = id_to_int(thread);
273                 modelclock_t ourClock = currClock->getClock(thread);
274
275                 /* Thread ID is too large or clock is too large. */
276                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
277                         expandRecord(shadow);
278                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
279                         goto Exit;
280                 }
281
282
283
284                 {
285                         /* Check for datarace against last read. */
286
287                         modelclock_t readClock = READVECTOR(shadowval);
288                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
289
290                         if (clock_may_race(currClock, thread, readClock, readThread)) {
291                                 /* We have a datarace */
292                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
293                                 goto ShadowExit;
294                         }
295                 }
296
297                 {
298                         /* Check for datarace against last write. */
299
300                         modelclock_t writeClock = WRITEVECTOR(shadowval);
301                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
302
303                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
304                                 /* We have a datarace */
305                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
306                                 goto ShadowExit;
307                         }
308                 }
309
310 ShadowExit:
311                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
312         }
313
314 Exit:
315         if (race) {
316                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
317                 if (raceset->add(race))
318                         assert_race(race);
319                 else model_free(race);
320         }
321 }
322
323
324 /** This function does race detection for a write on an expanded record. */
325 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
326 {
327         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
328         struct DataRace * race = NULL;
329
330         if (record->isAtomic)
331                 goto Exit;
332
333         /* Check for datarace against last read. */
334
335         for (int i = 0;i < record->numReads;i++) {
336                 modelclock_t readClock = record->readClock[i];
337                 thread_id_t readThread = record->thread[i];
338
339                 /* Note that readClock can't actuall be zero here, so it could be
340                          optimized. */
341
342                 if (clock_may_race(currClock, thread, readClock, readThread)) {
343                         /* We have a datarace */
344                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
345                         goto Exit;
346                 }
347         }
348
349         /* Check for datarace against last write. */
350
351         {
352                 modelclock_t writeClock = record->writeClock;
353                 thread_id_t writeThread = record->writeThread;
354
355                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
356                         /* We have a datarace */
357                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
358                         goto Exit;
359                 }
360         }
361 Exit:
362         record->numReads = 0;
363         record->writeThread = thread;
364         record->isAtomic = 1;
365         modelclock_t ourClock = currClock->getClock(thread);
366         record->writeClock = ourClock;
367         return race;
368 }
369
370 /** This function does race detection on a write. */
371 void atomraceCheckWrite(thread_id_t thread, void *location)
372 {
373         uint64_t *shadow = lookupAddressEntry(location);
374         uint64_t shadowval = *shadow;
375         ClockVector *currClock = get_execution()->get_cv(thread);
376         if (currClock == NULL)
377                 return;
378
379         struct DataRace * race = NULL;
380         /* Do full record */
381         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
382                 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
383                 goto Exit;
384         }
385
386         {
387                 int threadid = id_to_int(thread);
388                 modelclock_t ourClock = currClock->getClock(thread);
389
390                 /* Thread ID is too large or clock is too large. */
391                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
392                         expandRecord(shadow);
393                         race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
394                         goto Exit;
395                 }
396
397                 /* Can't race with atomic */
398                 if (shadowval & ATOMICMASK)
399                         goto ShadowExit;
400
401                 {
402                         /* Check for datarace against last read. */
403
404                         modelclock_t readClock = READVECTOR(shadowval);
405                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
406
407                         if (clock_may_race(currClock, thread, readClock, readThread)) {
408                                 /* We have a datarace */
409                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
410                                 goto ShadowExit;
411                         }
412                 }
413
414                 {
415                         /* Check for datarace against last write. */
416
417                         modelclock_t writeClock = WRITEVECTOR(shadowval);
418                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
419
420                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
421                                 /* We have a datarace */
422                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
423                                 goto ShadowExit;
424                         }
425                 }
426
427 ShadowExit:
428                 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
429         }
430
431 Exit:
432         if (race) {
433                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
434                 if (raceset->add(race))
435                         assert_race(race);
436                 else model_free(race);
437         }
438 }
439
440 /** This function does race detection for a write on an expanded record. */
441 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
442         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
443         record->numReads = 0;
444         record->writeThread = thread;
445         modelclock_t ourClock = currClock->getClock(thread);
446         record->writeClock = ourClock;
447         record->isAtomic = 1;
448 }
449
450 /** This function does race detection for a write on an expanded record. */
451 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
452         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
453         record->numReads = 0;
454         record->writeThread = thread;
455         modelclock_t ourClock = currClock->getClock(thread);
456         record->writeClock = ourClock;
457         record->isAtomic = 0;
458 }
459
460 /** This function just updates metadata on atomic write. */
461 void recordWrite(thread_id_t thread, void *location) {
462         uint64_t *shadow = lookupAddressEntry(location);
463         uint64_t shadowval = *shadow;
464         ClockVector *currClock = get_execution()->get_cv(thread);
465         /* Do full record */
466         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
467                 fullRecordWrite(thread, location, shadow, currClock);
468                 return;
469         }
470
471         int threadid = id_to_int(thread);
472         modelclock_t ourClock = currClock->getClock(thread);
473
474         /* Thread ID is too large or clock is too large. */
475         if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
476                 expandRecord(shadow);
477                 fullRecordWrite(thread, location, shadow, currClock);
478                 return;
479         }
480
481         *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
482 }
483
484 /** This function just updates metadata on atomic write. */
485 void recordCalloc(void *location, size_t size) {
486         thread_id_t thread = thread_current()->get_id();
487         for(;size != 0;size--) {
488                 uint64_t *shadow = lookupAddressEntry(location);
489                 uint64_t shadowval = *shadow;
490                 ClockVector *currClock = get_execution()->get_cv(thread);
491                 /* Do full record */
492                 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
493                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
494                         return;
495                 }
496
497                 int threadid = id_to_int(thread);
498                 modelclock_t ourClock = currClock->getClock(thread);
499
500                 /* Thread ID is too large or clock is too large. */
501                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
502                         expandRecord(shadow);
503                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
504                         return;
505                 }
506
507                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
508                 location = (void *)(((char *) location) + 1);
509         }
510 }
511
512
513
514 /** This function does race detection on a read for an expanded record. */
515 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
516 {
517         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
518         struct DataRace * race = NULL;
519         /* Check for datarace against last write. */
520
521         modelclock_t writeClock = record->writeClock;
522         thread_id_t writeThread = record->writeThread;
523
524         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
525                 /* We have a datarace */
526                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
527         }
528
529         /* Shorten vector when possible */
530
531         int copytoindex = 0;
532
533         for (int i = 0;i < record->numReads;i++) {
534                 modelclock_t readClock = record->readClock[i];
535                 thread_id_t readThread = record->thread[i];
536
537                 /*  Note that is not really a datarace check as reads cannot
538                                 actually race.  It is just determining that this read subsumes
539                                 another in the sense that either this read races or neither
540                                 read races. Note that readClock can't actually be zero, so it
541                                 could be optimized.  */
542
543                 if (clock_may_race(currClock, thread, readClock, readThread)) {
544                         /* Still need this read in vector */
545                         if (copytoindex != i) {
546                                 ASSERT(record->thread[i] >= 0);
547                                 record->readClock[copytoindex] = record->readClock[i];
548                                 record->thread[copytoindex] = record->thread[i];
549                         }
550                         copytoindex++;
551                 }
552         }
553
554         if (__builtin_popcount(copytoindex) <= 1) {
555                 if (copytoindex == 0) {
556                         int newCapacity = INITCAPACITY;
557                         record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
558                         record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
559                 } else if (copytoindex>=INITCAPACITY) {
560                         int newCapacity = copytoindex * 2;
561                         thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
562                         modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
563                         std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
564                         std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
565                         snapshot_free(record->readClock);
566                         snapshot_free(record->thread);
567                         record->readClock = newreadClock;
568                         record->thread = newthread;
569                 }
570         }
571
572         modelclock_t ourClock = currClock->getClock(thread);
573
574         ASSERT(thread >= 0);
575         record->thread[copytoindex] = thread;
576         record->readClock[copytoindex] = ourClock;
577         record->numReads = copytoindex + 1;
578         return race;
579 }
580
581 /** This function does race detection on a read. */
582 void raceCheckRead(thread_id_t thread, const void *location)
583 {
584         uint64_t *shadow = lookupAddressEntry(location);
585         uint64_t shadowval = *shadow;
586         ClockVector *currClock = get_execution()->get_cv(thread);
587         if (currClock == NULL)
588                 return;
589
590         struct DataRace * race = NULL;
591
592         /* Do full record */
593         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
594                 race = fullRaceCheckRead(thread, location, shadow, currClock);
595                 goto Exit;
596         }
597
598         {
599                 int threadid = id_to_int(thread);
600                 modelclock_t ourClock = currClock->getClock(thread);
601
602                 /* Thread ID is too large or clock is too large. */
603                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
604                         expandRecord(shadow);
605                         race = fullRaceCheckRead(thread, location, shadow, currClock);
606                         goto Exit;
607                 }
608
609                 /* Check for datarace against last write. */
610
611                 modelclock_t writeClock = WRITEVECTOR(shadowval);
612                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
613
614                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
615                         /* We have a datarace */
616                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
617                         goto ShadowExit;
618                 }
619
620 ShadowExit:
621                 {
622                         modelclock_t readClock = READVECTOR(shadowval);
623                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
624
625                         if (clock_may_race(currClock, thread, readClock, readThread)) {
626                                 /* We don't subsume this read... Have to expand record. */
627                                 expandRecord(shadow);
628                                 fullRaceCheckRead(thread, location, shadow, currClock);
629                                 goto Exit;
630                         }
631                 }
632
633                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
634         }
635 Exit:
636         if (race) {
637                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
638                 if (raceset->add(race))
639                         assert_race(race);
640                 else model_free(race);
641         }
642 }
643
644
645 /** This function does race detection on a read for an expanded record. */
646 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
647 {
648         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
649         struct DataRace * race = NULL;
650         /* Check for datarace against last write. */
651         if (record->isAtomic)
652                 return NULL;
653
654         modelclock_t writeClock = record->writeClock;
655         thread_id_t writeThread = record->writeThread;
656
657         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
658                 /* We have a datarace */
659                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
660         }
661         return race;
662 }
663
664 /** This function does race detection on a read. */
665 void atomraceCheckRead(thread_id_t thread, const void *location)
666 {
667         uint64_t *shadow = lookupAddressEntry(location);
668         uint64_t shadowval = *shadow;
669         ClockVector *currClock = get_execution()->get_cv(thread);
670         if (currClock == NULL)
671                 return;
672
673         struct DataRace * race = NULL;
674
675         /* Do full record */
676         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
677                 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
678                 goto Exit;
679         }
680
681         if (shadowval & ATOMICMASK)
682                 return;
683
684         {
685                 /* Check for datarace against last write. */
686
687                 modelclock_t writeClock = WRITEVECTOR(shadowval);
688                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
689
690                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
691                         /* We have a datarace */
692                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
693                         goto Exit;
694                 }
695
696
697         }
698 Exit:
699         if (race) {
700                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
701                 if (raceset->add(race))
702                         assert_race(race);
703                 else model_free(race);
704         }
705 }