Bug fix
[c11tester.git] / datarace.cc
1 #include "datarace.h"
2 #include "model.h"
3 #include "threads-model.h"
4 #include <stdio.h>
5 #include <cstring>
6 #include "mymemory.h"
7 #include "clockvector.h"
8 #include "config.h"
9 #include "action.h"
10 #include "execution.h"
11 #include "stl-model.h"
12 #include <execinfo.h>
13
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
18
19 static const ModelExecution * get_execution()
20 {
21         return model->get_execution();
22 }
23
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
26 {
27         root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28         memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29         memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30         raceset = new RaceSet();
31 }
32
33 void * table_calloc(size_t size)
34 {
35         if ((((char *)memory_base) + size) > memory_top) {
36                 return snapshot_calloc(size, 1);
37         } else {
38                 void *tmp = memory_base;
39                 memory_base = ((char *)memory_base) + size;
40                 return tmp;
41         }
42 }
43
44 /** This function looks up the entry in the shadow table corresponding to a
45  * given address.*/
46 static uint64_t * lookupAddressEntry(const void *address)
47 {
48         struct ShadowTable *currtable = root;
49 #if BIT48
50         currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51         if (currtable == NULL) {
52                 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
53         }
54 #endif
55
56         struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57         if (basetable == NULL) {
58                 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
59         }
60         return &basetable->array[((uintptr_t)address) & MASK16BIT];
61 }
62
63
64 bool hasNonAtomicStore(const void *address) {
65         uint64_t * shadow = lookupAddressEntry(address);
66         uint64_t shadowval = *shadow;
67         if (ISSHORTRECORD(shadowval)) {
68                 //Do we have a non atomic write with a non-zero clock
69                 return ((WRITEVECTOR(shadowval) != 0) && !(ATOMICMASK & shadowval));
70         } else {
71                 if (shadowval == 0)
72                         return false;
73                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74                 return !record->isAtomic && record->writeClock != 0;
75         }
76 }
77
78 void setAtomicStoreFlag(const void *address) {
79         uint64_t * shadow = lookupAddressEntry(address);
80         uint64_t shadowval = *shadow;
81         if (ISSHORTRECORD(shadowval)) {
82                 *shadow = shadowval | ATOMICMASK;
83         } else {
84                 if (shadowval == 0)
85                         return;
86                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
87                 record->isAtomic = 1;
88         }
89 }
90
91 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
92         uint64_t * shadow = lookupAddressEntry(address);
93         uint64_t shadowval = *shadow;
94         if (ISSHORTRECORD(shadowval)) {
95                 //Do we have a non atomic write with a non-zero clock
96                 *thread = WRTHREADID(shadowval);
97                 *clock = WRITEVECTOR(shadowval);
98         } else {
99                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
100                 *thread = record->writeThread;
101                 *clock = record->writeClock;
102         }
103 }
104
105 /**
106  * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
107  * to check the potential for a data race.
108  * @param clock1 The current clock vector
109  * @param tid1 The current thread; paired with clock1
110  * @param clock2 The clock value for the potentially-racing action
111  * @param tid2 The thread ID for the potentially-racing action
112  * @return true if the current clock allows a race with the event at clock2/tid2
113  */
114 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
115                                                                                                          modelclock_t clock2, thread_id_t tid2)
116 {
117         return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
118 }
119
120 /**
121  * Expands a record from the compact form to the full form.  This is
122  * necessary for multiple readers or for very large thread ids or time
123  * stamps. */
124 static void expandRecord(uint64_t *shadow)
125 {
126         uint64_t shadowval = *shadow;
127
128         modelclock_t readClock = READVECTOR(shadowval);
129         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
130         modelclock_t writeClock = WRITEVECTOR(shadowval);
131         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
132
133         struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
134         record->writeThread = writeThread;
135         record->writeClock = writeClock;
136
137         if (readClock != 0) {
138                 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
139                 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
140                 record->numReads = 1;
141                 ASSERT(readThread >= 0);
142                 record->thread[0] = readThread;
143                 record->readClock[0] = readClock;
144         }
145         if (shadowval & ATOMICMASK)
146                 record->isAtomic = 1;
147         *shadow = (uint64_t) record;
148 }
149
150 #define FIRST_STACK_FRAME 2
151
152 unsigned int race_hash(struct DataRace *race) {
153         unsigned int hash = 0;
154         for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
155                 hash ^= ((uintptr_t)race->backtrace[i]);
156                 hash = (hash >> 3) | (hash << 29);
157         }
158         return hash;
159 }
160
161
162 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
163         if (r1->numframes != r2->numframes)
164                 return false;
165         for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
166                 if (r1->backtrace[i] != r2->backtrace[i])
167                         return false;
168         }
169         return true;
170 }
171
172 /** This function is called when we detect a data race.*/
173 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
174 {
175         struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
176         race->oldthread = oldthread;
177         race->oldclock = oldclock;
178         race->isoldwrite = isoldwrite;
179         race->newaction = newaction;
180         race->isnewwrite = isnewwrite;
181         race->address = address;
182         return race;
183 }
184
185 /**
186  * @brief Assert a data race
187  *
188  * Asserts a data race which is currently realized, causing the execution to
189  * end and stashing a message in the model-checker's bug list
190  *
191  * @param race The race to report
192  */
193 void assert_race(struct DataRace *race)
194 {
195         model_print("Race detected at location: \n");
196         backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
197         model_print("\nData race detected @ address %p:\n"
198                                                         "    Access 1: %5s in thread %2d @ clock %3u\n"
199                                                         "    Access 2: %5s in thread %2d @ clock %3u\n\n",
200                                                         race->address,
201                                                         race->isoldwrite ? "write" : "read",
202                                                         id_to_int(race->oldthread),
203                                                         race->oldclock,
204                                                         race->isnewwrite ? "write" : "read",
205                                                         id_to_int(race->newaction->get_tid()),
206                                                         race->newaction->get_seq_number()
207                                                         );
208 }
209
210 /** This function does race detection for a write on an expanded record. */
211 struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
212 {
213         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
214         struct DataRace * race = NULL;
215
216         /* Check for datarace against last read. */
217
218         for (int i = 0;i < record->numReads;i++) {
219                 modelclock_t readClock = record->readClock[i];
220                 thread_id_t readThread = record->thread[i];
221
222                 /* Note that readClock can't actuall be zero here, so it could be
223                          optimized. */
224
225                 if (clock_may_race(currClock, thread, readClock, readThread)) {
226                         /* We have a datarace */
227                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
228                         goto Exit;
229                 }
230         }
231
232         /* Check for datarace against last write. */
233
234         {
235                 modelclock_t writeClock = record->writeClock;
236                 thread_id_t writeThread = record->writeThread;
237
238                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
239                         /* We have a datarace */
240                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
241                         goto Exit;
242                 }
243         }
244 Exit:
245         record->numReads = 0;
246         record->writeThread = thread;
247         record->isAtomic = 0;
248         modelclock_t ourClock = currClock->getClock(thread);
249         record->writeClock = ourClock;
250         return race;
251 }
252
253 /** This function does race detection on a write. */
254 void raceCheckWrite(thread_id_t thread, void *location)
255 {
256         uint64_t *shadow = lookupAddressEntry(location);
257         uint64_t shadowval = *shadow;
258         ClockVector *currClock = get_execution()->get_cv(thread);
259         if (currClock == NULL)
260                 return;
261
262         struct DataRace * race = NULL;
263         /* Do full record */
264         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
265                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
266                 goto Exit;
267         }
268
269         {
270                 int threadid = id_to_int(thread);
271                 modelclock_t ourClock = currClock->getClock(thread);
272
273                 /* Thread ID is too large or clock is too large. */
274                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
275                         expandRecord(shadow);
276                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
277                         goto Exit;
278                 }
279
280
281
282                 {
283                         /* Check for datarace against last read. */
284
285                         modelclock_t readClock = READVECTOR(shadowval);
286                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
287
288                         if (clock_may_race(currClock, thread, readClock, readThread)) {
289                                 /* We have a datarace */
290                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
291                                 goto ShadowExit;
292                         }
293                 }
294
295                 {
296                         /* Check for datarace against last write. */
297
298                         modelclock_t writeClock = WRITEVECTOR(shadowval);
299                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
300
301                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
302                                 /* We have a datarace */
303                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
304                                 goto ShadowExit;
305                         }
306                 }
307
308 ShadowExit:
309                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
310         }
311
312 Exit:
313         if (race) {
314                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
315                 if (raceset->add(race))
316                         assert_race(race);
317                 else model_free(race);
318         }
319 }
320
321
322 /** This function does race detection for a write on an expanded record. */
323 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
324 {
325         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
326         struct DataRace * race = NULL;
327
328         if (record->isAtomic)
329                 goto Exit;
330
331         /* Check for datarace against last read. */
332
333         for (int i = 0;i < record->numReads;i++) {
334                 modelclock_t readClock = record->readClock[i];
335                 thread_id_t readThread = record->thread[i];
336
337                 /* Note that readClock can't actuall be zero here, so it could be
338                          optimized. */
339
340                 if (clock_may_race(currClock, thread, readClock, readThread)) {
341                         /* We have a datarace */
342                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
343                         goto Exit;
344                 }
345         }
346
347         /* Check for datarace against last write. */
348
349         {
350                 modelclock_t writeClock = record->writeClock;
351                 thread_id_t writeThread = record->writeThread;
352
353                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
354                         /* We have a datarace */
355                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
356                         goto Exit;
357                 }
358         }
359 Exit:
360         record->numReads = 0;
361         record->writeThread = thread;
362         record->isAtomic = 1;
363         modelclock_t ourClock = currClock->getClock(thread);
364         record->writeClock = ourClock;
365         return race;
366 }
367
368 /** This function does race detection on a write. */
369 void atomraceCheckWrite(thread_id_t thread, void *location)
370 {
371         uint64_t *shadow = lookupAddressEntry(location);
372         uint64_t shadowval = *shadow;
373         ClockVector *currClock = get_execution()->get_cv(thread);
374         if (currClock == NULL)
375                 return;
376
377         struct DataRace * race = NULL;
378         /* Do full record */
379         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
380                 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
381                 goto Exit;
382         }
383
384         {
385                 int threadid = id_to_int(thread);
386                 modelclock_t ourClock = currClock->getClock(thread);
387
388                 /* Thread ID is too large or clock is too large. */
389                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
390                         expandRecord(shadow);
391                         race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
392                         goto Exit;
393                 }
394
395                 /* Can't race with atomic */
396                 if (shadowval & ATOMICMASK)
397                         goto ShadowExit;
398
399                 {
400                         /* Check for datarace against last read. */
401
402                         modelclock_t readClock = READVECTOR(shadowval);
403                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
404
405                         if (clock_may_race(currClock, thread, readClock, readThread)) {
406                                 /* We have a datarace */
407                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
408                                 goto ShadowExit;
409                         }
410                 }
411
412                 {
413                         /* Check for datarace against last write. */
414
415                         modelclock_t writeClock = WRITEVECTOR(shadowval);
416                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
417
418                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
419                                 /* We have a datarace */
420                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
421                                 goto ShadowExit;
422                         }
423                 }
424
425 ShadowExit:
426                 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
427         }
428
429 Exit:
430         if (race) {
431                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
432                 if (raceset->add(race))
433                         assert_race(race);
434                 else model_free(race);
435         }
436 }
437
438 /** This function does race detection for a write on an expanded record. */
439 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
440         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
441         record->numReads = 0;
442         record->writeThread = thread;
443         modelclock_t ourClock = currClock->getClock(thread);
444         record->writeClock = ourClock;
445         record->isAtomic = 1;
446 }
447
448 /** This function does race detection for a write on an expanded record. */
449 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
450         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
451         record->numReads = 0;
452         record->writeThread = thread;
453         modelclock_t ourClock = currClock->getClock(thread);
454         record->writeClock = ourClock;
455         record->isAtomic = 0;
456 }
457
458 /** This function just updates metadata on atomic write. */
459 void recordWrite(thread_id_t thread, void *location) {
460         uint64_t *shadow = lookupAddressEntry(location);
461         uint64_t shadowval = *shadow;
462         ClockVector *currClock = get_execution()->get_cv(thread);
463         /* Do full record */
464         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
465                 fullRecordWrite(thread, location, shadow, currClock);
466                 return;
467         }
468
469         int threadid = id_to_int(thread);
470         modelclock_t ourClock = currClock->getClock(thread);
471
472         /* Thread ID is too large or clock is too large. */
473         if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
474                 expandRecord(shadow);
475                 fullRecordWrite(thread, location, shadow, currClock);
476                 return;
477         }
478
479         *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
480 }
481
482 /** This function just updates metadata on atomic write. */
483 void recordCalloc(void *location, size_t size) {
484         thread_id_t thread = thread_current()->get_id();
485         for(;size != 0;size--) {
486                 uint64_t *shadow = lookupAddressEntry(location);
487                 uint64_t shadowval = *shadow;
488                 ClockVector *currClock = get_execution()->get_cv(thread);
489                 /* Do full record */
490                 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
491                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
492                         return;
493                 }
494
495                 int threadid = id_to_int(thread);
496                 modelclock_t ourClock = currClock->getClock(thread);
497
498                 /* Thread ID is too large or clock is too large. */
499                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
500                         expandRecord(shadow);
501                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
502                         return;
503                 }
504
505                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
506                 location = (void *)(((char *) location) + 1);
507         }
508 }
509
510
511
512 /** This function does race detection on a read for an expanded record. */
513 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
514 {
515         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
516         struct DataRace * race = NULL;
517         /* Check for datarace against last write. */
518
519         modelclock_t writeClock = record->writeClock;
520         thread_id_t writeThread = record->writeThread;
521
522         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
523                 /* We have a datarace */
524                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
525         }
526
527         /* Shorten vector when possible */
528
529         int copytoindex = 0;
530
531         for (int i = 0;i < record->numReads;i++) {
532                 modelclock_t readClock = record->readClock[i];
533                 thread_id_t readThread = record->thread[i];
534
535                 /*  Note that is not really a datarace check as reads cannot
536                                 actually race.  It is just determining that this read subsumes
537                                 another in the sense that either this read races or neither
538                                 read races. Note that readClock can't actually be zero, so it
539                                 could be optimized.  */
540
541                 if (clock_may_race(currClock, thread, readClock, readThread)) {
542                         /* Still need this read in vector */
543                         if (copytoindex != i) {
544                                 ASSERT(record->thread[i] >= 0);
545                                 record->readClock[copytoindex] = record->readClock[i];
546                                 record->thread[copytoindex] = record->thread[i];
547                         }
548                         copytoindex++;
549                 }
550         }
551
552         if (__builtin_popcount(copytoindex) <= 1) {
553                 if (copytoindex == 0) {
554                         int newCapacity = INITCAPACITY;
555                         record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
556                         record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
557                 } else if (copytoindex>=INITCAPACITY) {
558                         int newCapacity = copytoindex * 2;
559                         thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
560                         modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
561                         std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
562                         std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
563                         snapshot_free(record->readClock);
564                         snapshot_free(record->thread);
565                         record->readClock = newreadClock;
566                         record->thread = newthread;
567                 }
568         }
569
570         modelclock_t ourClock = currClock->getClock(thread);
571
572         ASSERT(thread >= 0);
573         record->thread[copytoindex] = thread;
574         record->readClock[copytoindex] = ourClock;
575         record->numReads = copytoindex + 1;
576         return race;
577 }
578
579 /** This function does race detection on a read. */
580 void raceCheckRead(thread_id_t thread, const void *location)
581 {
582         uint64_t *shadow = lookupAddressEntry(location);
583         uint64_t shadowval = *shadow;
584         ClockVector *currClock = get_execution()->get_cv(thread);
585         if (currClock == NULL)
586                 return;
587
588         struct DataRace * race = NULL;
589
590         /* Do full record */
591         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
592                 race = fullRaceCheckRead(thread, location, shadow, currClock);
593                 goto Exit;
594         }
595
596         {
597                 int threadid = id_to_int(thread);
598                 modelclock_t ourClock = currClock->getClock(thread);
599
600                 /* Thread ID is too large or clock is too large. */
601                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
602                         expandRecord(shadow);
603                         race = fullRaceCheckRead(thread, location, shadow, currClock);
604                         goto Exit;
605                 }
606
607                 /* Check for datarace against last write. */
608
609                 modelclock_t writeClock = WRITEVECTOR(shadowval);
610                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
611
612                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
613                         /* We have a datarace */
614                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
615                         goto ShadowExit;
616                 }
617
618 ShadowExit:
619                 {
620                         modelclock_t readClock = READVECTOR(shadowval);
621                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
622
623                         if (clock_may_race(currClock, thread, readClock, readThread)) {
624                                 /* We don't subsume this read... Have to expand record. */
625                                 expandRecord(shadow);
626                                 fullRaceCheckRead(thread, location, shadow, currClock);
627                                 goto Exit;
628                         }
629                 }
630
631                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
632         }
633 Exit:
634         if (race) {
635                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
636                 if (raceset->add(race))
637                         assert_race(race);
638                 else model_free(race);
639         }
640 }
641
642
643 /** This function does race detection on a read for an expanded record. */
644 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
645 {
646         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
647         struct DataRace * race = NULL;
648         /* Check for datarace against last write. */
649         if (record->isAtomic)
650                 return NULL;
651
652         modelclock_t writeClock = record->writeClock;
653         thread_id_t writeThread = record->writeThread;
654
655         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
656                 /* We have a datarace */
657                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
658         }
659         return race;
660 }
661
662 /** This function does race detection on a read. */
663 void atomraceCheckRead(thread_id_t thread, const void *location)
664 {
665         uint64_t *shadow = lookupAddressEntry(location);
666         uint64_t shadowval = *shadow;
667         ClockVector *currClock = get_execution()->get_cv(thread);
668         if (currClock == NULL)
669                 return;
670
671         struct DataRace * race = NULL;
672
673         /* Do full record */
674         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
675                 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
676                 goto Exit;
677         }
678
679         if (shadowval & ATOMICMASK)
680                 return;
681
682         {
683                 /* Check for datarace against last write. */
684
685                 modelclock_t writeClock = WRITEVECTOR(shadowval);
686                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
687
688                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
689                         /* We have a datarace */
690                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
691                         goto Exit;
692                 }
693
694
695         }
696 Exit:
697         if (race) {
698                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
699                 if (raceset->add(race))
700                         assert_race(race);
701                 else model_free(race);
702         }
703 }