3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
19 static const ModelExecution * get_execution()
21 return model->get_execution();
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
27 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30 raceset = new RaceSet();
33 void * table_calloc(size_t size)
35 if ((((char *)memory_base) + size) > memory_top) {
36 return snapshot_calloc(size, 1);
38 void *tmp = memory_base;
39 memory_base = ((char *)memory_base) + size;
44 /** This function looks up the entry in the shadow table corresponding to a
46 static uint64_t * lookupAddressEntry(const void *address)
48 struct ShadowTable *currtable = root;
50 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51 if (currtable == NULL) {
52 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
56 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57 if (basetable == NULL) {
58 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
60 return &basetable->array[((uintptr_t)address) & MASK16BIT];
64 bool hasNonAtomicStore(const void *address) {
65 uint64_t * shadow = lookupAddressEntry(address);
66 uint64_t shadowval = *shadow;
67 if (ISSHORTRECORD(shadowval)) {
68 //Do we have a non atomic write with a non-zero clock
69 return !(ATOMICMASK & shadowval);
73 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74 return !record->isAtomic;
78 void setAtomicStoreFlag(const void *address) {
79 uint64_t * shadow = lookupAddressEntry(address);
80 uint64_t shadowval = *shadow;
81 if (ISSHORTRECORD(shadowval)) {
82 *shadow = shadowval | ATOMICMASK;
85 *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
88 struct RaceRecord *record = (struct RaceRecord *)shadowval;
93 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
94 uint64_t * shadow = lookupAddressEntry(address);
95 uint64_t shadowval = *shadow;
96 if (ISSHORTRECORD(shadowval) || shadowval == 0) {
97 //Do we have a non atomic write with a non-zero clock
98 *thread = WRTHREADID(shadowval);
99 *clock = WRITEVECTOR(shadowval);
101 struct RaceRecord *record = (struct RaceRecord *)shadowval;
102 *thread = record->writeThread;
103 *clock = record->writeClock;
108 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
109 * to check the potential for a data race.
110 * @param clock1 The current clock vector
111 * @param tid1 The current thread; paired with clock1
112 * @param clock2 The clock value for the potentially-racing action
113 * @param tid2 The thread ID for the potentially-racing action
114 * @return true if the current clock allows a race with the event at clock2/tid2
116 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
117 modelclock_t clock2, thread_id_t tid2)
119 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
123 * Expands a record from the compact form to the full form. This is
124 * necessary for multiple readers or for very large thread ids or time
126 static void expandRecord(uint64_t *shadow)
128 uint64_t shadowval = *shadow;
130 modelclock_t readClock = READVECTOR(shadowval);
131 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
132 modelclock_t writeClock = WRITEVECTOR(shadowval);
133 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
135 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
136 record->writeThread = writeThread;
137 record->writeClock = writeClock;
139 if (readClock != 0) {
140 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
141 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
142 record->numReads = 1;
143 ASSERT(readThread >= 0);
144 record->thread[0] = readThread;
145 record->readClock[0] = readClock;
147 if (shadowval & ATOMICMASK)
148 record->isAtomic = 1;
149 *shadow = (uint64_t) record;
152 #define FIRST_STACK_FRAME 2
154 unsigned int race_hash(struct DataRace *race) {
155 unsigned int hash = 0;
156 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
157 hash ^= ((uintptr_t)race->backtrace[i]);
158 hash = (hash >> 3) | (hash << 29);
164 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
165 if (r1->numframes != r2->numframes)
167 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
168 if (r1->backtrace[i] != r2->backtrace[i])
174 /** This function is called when we detect a data race.*/
175 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
177 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
178 race->oldthread = oldthread;
179 race->oldclock = oldclock;
180 race->isoldwrite = isoldwrite;
181 race->newaction = newaction;
182 race->isnewwrite = isnewwrite;
183 race->address = address;
188 * @brief Assert a data race
190 * Asserts a data race which is currently realized, causing the execution to
191 * end and stashing a message in the model-checker's bug list
193 * @param race The race to report
195 void assert_race(struct DataRace *race)
197 model_print("Race detected at location: \n");
198 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
199 model_print("\nData race detected @ address %p:\n"
200 " Access 1: %5s in thread %2d @ clock %3u\n"
201 " Access 2: %5s in thread %2d @ clock %3u\n\n",
203 race->isoldwrite ? "write" : "read",
204 id_to_int(race->oldthread),
206 race->isnewwrite ? "write" : "read",
207 id_to_int(race->newaction->get_tid()),
208 race->newaction->get_seq_number()
212 /** This function does race detection for a write on an expanded record. */
213 struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
215 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
216 struct DataRace * race = NULL;
218 /* Check for datarace against last read. */
220 for (int i = 0;i < record->numReads;i++) {
221 modelclock_t readClock = record->readClock[i];
222 thread_id_t readThread = record->thread[i];
224 /* Note that readClock can't actuall be zero here, so it could be
227 if (clock_may_race(currClock, thread, readClock, readThread)) {
228 /* We have a datarace */
229 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
234 /* Check for datarace against last write. */
237 modelclock_t writeClock = record->writeClock;
238 thread_id_t writeThread = record->writeThread;
240 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
241 /* We have a datarace */
242 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
247 record->numReads = 0;
248 record->writeThread = thread;
249 record->isAtomic = 0;
250 modelclock_t ourClock = currClock->getClock(thread);
251 record->writeClock = ourClock;
255 /** This function does race detection on a write. */
256 void raceCheckWrite(thread_id_t thread, void *location)
258 uint64_t *shadow = lookupAddressEntry(location);
259 uint64_t shadowval = *shadow;
260 ClockVector *currClock = get_execution()->get_cv(thread);
261 if (currClock == NULL)
264 struct DataRace * race = NULL;
266 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
267 race = fullRaceCheckWrite(thread, location, shadow, currClock);
272 int threadid = id_to_int(thread);
273 modelclock_t ourClock = currClock->getClock(thread);
275 /* Thread ID is too large or clock is too large. */
276 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
277 expandRecord(shadow);
278 race = fullRaceCheckWrite(thread, location, shadow, currClock);
285 /* Check for datarace against last read. */
286 modelclock_t readClock = READVECTOR(shadowval);
287 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
289 if (clock_may_race(currClock, thread, readClock, readThread)) {
290 /* We have a datarace */
291 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
297 /* Check for datarace against last write. */
299 modelclock_t writeClock = WRITEVECTOR(shadowval);
300 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
302 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
303 /* We have a datarace */
304 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
310 *shadow = ENCODEOP(0, 0, threadid, ourClock);
315 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
316 if (raceset->add(race))
318 else model_free(race);
323 /** This function does race detection for a write on an expanded record. */
324 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
326 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
327 struct DataRace * race = NULL;
329 if (record->isAtomic)
332 /* Check for datarace against last read. */
334 for (int i = 0;i < record->numReads;i++) {
335 modelclock_t readClock = record->readClock[i];
336 thread_id_t readThread = record->thread[i];
338 /* Note that readClock can't actuall be zero here, so it could be
341 if (clock_may_race(currClock, thread, readClock, readThread)) {
342 /* We have a datarace */
343 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
348 /* Check for datarace against last write. */
351 modelclock_t writeClock = record->writeClock;
352 thread_id_t writeThread = record->writeThread;
354 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
355 /* We have a datarace */
356 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
361 record->numReads = 0;
362 record->writeThread = thread;
363 record->isAtomic = 1;
364 modelclock_t ourClock = currClock->getClock(thread);
365 record->writeClock = ourClock;
369 /** This function does race detection on a write. */
370 void atomraceCheckWrite(thread_id_t thread, void *location)
372 uint64_t *shadow = lookupAddressEntry(location);
373 uint64_t shadowval = *shadow;
374 ClockVector *currClock = get_execution()->get_cv(thread);
375 if (currClock == NULL)
378 struct DataRace * race = NULL;
380 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
381 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
386 int threadid = id_to_int(thread);
387 modelclock_t ourClock = currClock->getClock(thread);
389 /* Thread ID is too large or clock is too large. */
390 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
391 expandRecord(shadow);
392 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
396 /* Can't race with atomic */
397 if (shadowval & ATOMICMASK)
401 /* Check for datarace against last read. */
403 modelclock_t readClock = READVECTOR(shadowval);
404 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
406 if (clock_may_race(currClock, thread, readClock, readThread)) {
407 /* We have a datarace */
408 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
414 /* Check for datarace against last write. */
416 modelclock_t writeClock = WRITEVECTOR(shadowval);
417 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
419 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
420 /* We have a datarace */
421 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
427 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
432 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
433 if (raceset->add(race))
435 else model_free(race);
439 /** This function does race detection for a write on an expanded record. */
440 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
441 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
442 record->numReads = 0;
443 record->writeThread = thread;
444 modelclock_t ourClock = currClock->getClock(thread);
445 record->writeClock = ourClock;
446 record->isAtomic = 1;
449 /** This function does race detection for a write on an expanded record. */
450 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
451 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
452 record->numReads = 0;
453 record->writeThread = thread;
454 modelclock_t ourClock = currClock->getClock(thread);
455 record->writeClock = ourClock;
456 record->isAtomic = 0;
459 /** This function just updates metadata on atomic write. */
460 void recordWrite(thread_id_t thread, void *location) {
461 uint64_t *shadow = lookupAddressEntry(location);
462 uint64_t shadowval = *shadow;
463 ClockVector *currClock = get_execution()->get_cv(thread);
465 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
466 fullRecordWrite(thread, location, shadow, currClock);
470 int threadid = id_to_int(thread);
471 modelclock_t ourClock = currClock->getClock(thread);
473 /* Thread ID is too large or clock is too large. */
474 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
475 expandRecord(shadow);
476 fullRecordWrite(thread, location, shadow, currClock);
480 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
483 /** This function just updates metadata on atomic write. */
484 void recordCalloc(void *location, size_t size) {
485 thread_id_t thread = thread_current()->get_id();
486 for(;size != 0;size--) {
487 uint64_t *shadow = lookupAddressEntry(location);
488 uint64_t shadowval = *shadow;
489 ClockVector *currClock = get_execution()->get_cv(thread);
491 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
492 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
496 int threadid = id_to_int(thread);
497 modelclock_t ourClock = currClock->getClock(thread);
499 /* Thread ID is too large or clock is too large. */
500 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
501 expandRecord(shadow);
502 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
506 *shadow = ENCODEOP(0, 0, threadid, ourClock);
507 location = (void *)(((char *) location) + 1);
513 /** This function does race detection on a read for an expanded record. */
514 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
516 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
517 struct DataRace * race = NULL;
518 /* Check for datarace against last write. */
520 modelclock_t writeClock = record->writeClock;
521 thread_id_t writeThread = record->writeThread;
523 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
524 /* We have a datarace */
525 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
528 /* Shorten vector when possible */
532 for (int i = 0;i < record->numReads;i++) {
533 modelclock_t readClock = record->readClock[i];
534 thread_id_t readThread = record->thread[i];
536 /* Note that is not really a datarace check as reads cannot
537 actually race. It is just determining that this read subsumes
538 another in the sense that either this read races or neither
539 read races. Note that readClock can't actually be zero, so it
540 could be optimized. */
542 if (clock_may_race(currClock, thread, readClock, readThread)) {
543 /* Still need this read in vector */
544 if (copytoindex != i) {
545 ASSERT(record->thread[i] >= 0);
546 record->readClock[copytoindex] = record->readClock[i];
547 record->thread[copytoindex] = record->thread[i];
553 if (__builtin_popcount(copytoindex) <= 1) {
554 if (copytoindex == 0) {
555 int newCapacity = INITCAPACITY;
556 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
557 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
558 } else if (copytoindex>=INITCAPACITY) {
559 int newCapacity = copytoindex * 2;
560 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
561 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
562 std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
563 std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
564 snapshot_free(record->readClock);
565 snapshot_free(record->thread);
566 record->readClock = newreadClock;
567 record->thread = newthread;
571 modelclock_t ourClock = currClock->getClock(thread);
574 record->thread[copytoindex] = thread;
575 record->readClock[copytoindex] = ourClock;
576 record->numReads = copytoindex + 1;
580 /** This function does race detection on a read. */
581 void raceCheckRead(thread_id_t thread, const void *location)
583 uint64_t *shadow = lookupAddressEntry(location);
584 uint64_t shadowval = *shadow;
585 ClockVector *currClock = get_execution()->get_cv(thread);
586 if (currClock == NULL)
589 struct DataRace * race = NULL;
592 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
593 race = fullRaceCheckRead(thread, location, shadow, currClock);
598 int threadid = id_to_int(thread);
599 modelclock_t ourClock = currClock->getClock(thread);
601 /* Thread ID is too large or clock is too large. */
602 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
603 expandRecord(shadow);
604 race = fullRaceCheckRead(thread, location, shadow, currClock);
608 /* Check for datarace against last write. */
610 modelclock_t writeClock = WRITEVECTOR(shadowval);
611 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
613 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
614 /* We have a datarace */
615 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
621 modelclock_t readClock = READVECTOR(shadowval);
622 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
624 if (clock_may_race(currClock, thread, readClock, readThread)) {
625 /* We don't subsume this read... Have to expand record. */
626 expandRecord(shadow);
627 fullRaceCheckRead(thread, location, shadow, currClock);
632 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
636 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
637 if (raceset->add(race))
639 else model_free(race);
644 /** This function does race detection on a read for an expanded record. */
645 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
647 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
648 struct DataRace * race = NULL;
649 /* Check for datarace against last write. */
650 if (record->isAtomic)
653 modelclock_t writeClock = record->writeClock;
654 thread_id_t writeThread = record->writeThread;
656 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
657 /* We have a datarace */
658 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
663 /** This function does race detection on a read. */
664 void atomraceCheckRead(thread_id_t thread, const void *location)
666 uint64_t *shadow = lookupAddressEntry(location);
667 uint64_t shadowval = *shadow;
668 ClockVector *currClock = get_execution()->get_cv(thread);
669 if (currClock == NULL)
672 struct DataRace * race = NULL;
675 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
676 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
680 if (shadowval & ATOMICMASK)
684 /* Check for datarace against last write. */
686 modelclock_t writeClock = WRITEVECTOR(shadowval);
687 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
689 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
690 /* We have a datarace */
691 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
699 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
700 if (raceset->add(race))
702 else model_free(race);
706 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
708 uint64_t *shadow = lookupAddressEntry(location);
709 uint64_t shadowval = *shadow;
711 ClockVector *currClock = get_execution()->get_cv(thread);
712 if (currClock == NULL)
715 struct DataRace * race = NULL;
718 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
719 race = fullRaceCheckRead(thread, location, shadow, currClock);
724 int threadid = id_to_int(thread);
725 modelclock_t ourClock = currClock->getClock(thread);
727 /* Thread ID is too large or clock is too large. */
728 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
729 expandRecord(shadow);
730 race = fullRaceCheckRead(thread, location, shadow, currClock);
734 /* Check for datarace against last write. */
736 modelclock_t writeClock = WRITEVECTOR(shadowval);
737 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
739 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
740 /* We have a datarace */
741 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
744 modelclock_t readClock = READVECTOR(shadowval);
745 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
747 if (clock_may_race(currClock, thread, readClock, readThread)) {
748 /* We don't subsume this read... Have to expand record. */
749 expandRecord(shadow);
750 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
751 record->thread[1] = thread;
752 record->readClock[1] = ourClock;
758 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
760 *old_val = shadowval;
765 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
766 if (raceset->add(race))
768 else model_free(race);
774 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location, uint64_t first_shadowval, uint64_t updated_shadowval, uint64_t * _shadow, bool fast_path)
780 shadow = lookupAddressEntry(location);
782 uint64_t shadowval = *shadow;
784 if (shadowval == first_shadowval) {
785 *shadow = updated_shadowval;
789 ClockVector *currClock = get_execution()->get_cv(thread);
790 if (currClock == NULL)
793 struct DataRace * race = NULL;
796 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
797 race = fullRaceCheckRead(thread, location, shadow, currClock);
802 int threadid = id_to_int(thread);
803 modelclock_t ourClock = currClock->getClock(thread);
805 /* Thread ID is too large or clock is too large. */
806 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
807 expandRecord(shadow);
808 race = fullRaceCheckRead(thread, location, shadow, currClock);
812 /* Check for datarace against last write. */
813 modelclock_t writeClock = WRITEVECTOR(shadowval);
814 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
816 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
817 /* We have a datarace */
818 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
821 modelclock_t readClock = READVECTOR(shadowval);
822 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
824 if (clock_may_race(currClock, thread, readClock, readThread)) {
825 /* We don't subsume this read... Have to expand record. */
826 expandRecord(shadow);
827 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
828 record->thread[1] = thread;
829 record->readClock[1] = ourClock;
835 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
839 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
840 if (raceset->add(race))
842 else model_free(race);
846 void raceCheckRead64(thread_id_t thread, const void *location)
848 uint64_t old_shadowval, new_shadowval;
849 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
850 bool fast_path = CHECKBOUNDARY(location, 7);
852 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
853 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval, shadow + 1, fast_path);
854 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2), old_shadowval, new_shadowval, shadow + 2, fast_path);
855 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3), old_shadowval, new_shadowval, shadow + 3, fast_path);
856 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4), old_shadowval, new_shadowval, shadow + 4, fast_path);
857 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5), old_shadowval, new_shadowval, shadow + 5, fast_path);
858 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6), old_shadowval, new_shadowval, shadow + 6, fast_path);
859 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7), old_shadowval, new_shadowval, shadow + 7, fast_path);
862 void raceCheckRead32(thread_id_t thread, const void *location)
864 uint64_t old_shadowval, new_shadowval;
865 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
866 bool fast_path = CHECKBOUNDARY(location, 3);
868 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
869 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval, shadow + 1, fast_path);
870 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2), old_shadowval, new_shadowval, shadow + 2, fast_path);
871 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3), old_shadowval, new_shadowval, shadow + 3, fast_path);
874 void raceCheckRead16(thread_id_t thread, const void *location)
876 uint64_t old_shadowval, new_shadowval;
877 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
878 bool fast_path = CHECKBOUNDARY(location, 1);
880 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
881 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval, shadow + 1, fast_path);
884 void raceCheckRead8(thread_id_t thread, const void *location)
886 uint64_t old_shadowval, new_shadowval;
887 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
889 raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
892 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, void * location, uint64_t *old_val, uint64_t *new_val)
894 uint64_t *shadow = lookupAddressEntry(location);
895 uint64_t shadowval = *shadow;
896 ClockVector *currClock = get_execution()->get_cv(thread);
897 if (currClock == NULL)
900 struct DataRace * race = NULL;
902 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
903 race = fullRaceCheckWrite(thread, location, shadow, currClock);
908 int threadid = id_to_int(thread);
909 modelclock_t ourClock = currClock->getClock(thread);
911 /* Thread ID is too large or clock is too large. */
912 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
913 expandRecord(shadow);
914 race = fullRaceCheckWrite(thread, location, shadow, currClock);
919 /* Check for datarace against last read. */
920 modelclock_t readClock = READVECTOR(shadowval);
921 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
923 if (clock_may_race(currClock, thread, readClock, readThread)) {
924 /* We have a datarace */
925 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
931 /* Check for datarace against last write. */
932 modelclock_t writeClock = WRITEVECTOR(shadowval);
933 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
935 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
936 /* We have a datarace */
937 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
943 *shadow = ENCODEOP(0, 0, threadid, ourClock);
945 *old_val = shadowval;
951 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
952 if (raceset->add(race))
954 else model_free(race);
960 static inline void raceCheckWrite_otherIt(thread_id_t thread, void * location, uint64_t first_shadowval, uint64_t updated_shadowval, uint64_t * _shadow, bool fast_path)
966 shadow = lookupAddressEntry(location);
968 uint64_t shadowval = *shadow;
970 if (shadowval == first_shadowval) {
971 *shadow = updated_shadowval;
975 ClockVector *currClock = get_execution()->get_cv(thread);
976 if (currClock == NULL)
979 struct DataRace * race = NULL;
981 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
982 race = fullRaceCheckWrite(thread, location, shadow, currClock);
987 int threadid = id_to_int(thread);
988 modelclock_t ourClock = currClock->getClock(thread);
990 /* Thread ID is too large or clock is too large. */
991 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
992 expandRecord(shadow);
993 race = fullRaceCheckWrite(thread, location, shadow, currClock);
998 /* Check for datarace against last read. */
999 modelclock_t readClock = READVECTOR(shadowval);
1000 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1002 if (clock_may_race(currClock, thread, readClock, readThread)) {
1003 /* We have a datarace */
1004 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1010 /* Check for datarace against last write. */
1011 modelclock_t writeClock = WRITEVECTOR(shadowval);
1012 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1014 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1015 /* We have a datarace */
1016 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1022 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1027 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1028 if (raceset->add(race))
1030 else model_free(race);
1034 void raceCheckWrite64(thread_id_t thread, void *location)
1036 uint64_t old_shadowval, new_shadowval;
1037 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1038 bool fast_path = CHECKBOUNDARY(location, 7);
1040 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1041 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval, shadow + 1, fast_path);
1042 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 2), old_shadowval, new_shadowval, shadow + 2, fast_path);
1043 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 3), old_shadowval, new_shadowval, shadow + 3, fast_path);
1044 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 4), old_shadowval, new_shadowval, shadow + 4, fast_path);
1045 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 5), old_shadowval, new_shadowval, shadow + 5, fast_path);
1046 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 6), old_shadowval, new_shadowval, shadow + 6, fast_path);
1047 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 7), old_shadowval, new_shadowval, shadow + 7, fast_path);
1050 void raceCheckWrite32(thread_id_t thread, void *location)
1052 uint64_t old_shadowval, new_shadowval;
1053 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1054 bool fast_path = CHECKBOUNDARY(location, 3);
1056 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1057 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval, shadow + 1, fast_path);
1058 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 2), old_shadowval, new_shadowval, shadow + 2, fast_path);
1059 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 3), old_shadowval, new_shadowval, shadow + 3, fast_path);
1062 void raceCheckWrite16(thread_id_t thread, void *location)
1064 uint64_t old_shadowval, new_shadowval;
1065 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1066 bool fast_path = CHECKBOUNDARY(location, 1);
1068 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1069 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval, shadow + 1, fast_path);
1072 void raceCheckWrite8(thread_id_t thread, void *location)
1074 uint64_t old_shadowval, new_shadowval;
1075 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1077 raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);