3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
19 static const ModelExecution * get_execution()
21 return model->get_execution();
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
27 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30 raceset = new RaceSet();
33 void * table_calloc(size_t size)
35 if ((((char *)memory_base) + size) > memory_top) {
36 return snapshot_calloc(size, 1);
38 void *tmp = memory_base;
39 memory_base = ((char *)memory_base) + size;
44 /** This function looks up the entry in the shadow table corresponding to a
46 static uint64_t * lookupAddressEntry(const void *address)
48 struct ShadowTable *currtable = root;
50 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51 if (currtable == NULL) {
52 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
56 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57 if (basetable == NULL) {
58 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
60 return &basetable->array[((uintptr_t)address) & MASK16BIT];
64 bool hasNonAtomicStore(const void *address) {
65 uint64_t * shadow = lookupAddressEntry(address);
66 uint64_t shadowval = *shadow;
67 if (ISSHORTRECORD(shadowval)) {
68 //Do we have a non atomic write with a non-zero clock
69 return !(ATOMICMASK & shadowval);
73 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74 return !record->isAtomic;
78 void setAtomicStoreFlag(const void *address) {
79 uint64_t * shadow = lookupAddressEntry(address);
80 uint64_t shadowval = *shadow;
81 if (ISSHORTRECORD(shadowval)) {
82 *shadow = shadowval | ATOMICMASK;
85 *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
88 struct RaceRecord *record = (struct RaceRecord *)shadowval;
93 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
94 uint64_t * shadow = lookupAddressEntry(address);
95 uint64_t shadowval = *shadow;
96 if (ISSHORTRECORD(shadowval) || shadowval == 0) {
97 //Do we have a non atomic write with a non-zero clock
98 *thread = WRTHREADID(shadowval);
99 *clock = WRITEVECTOR(shadowval);
101 struct RaceRecord *record = (struct RaceRecord *)shadowval;
102 *thread = record->writeThread;
103 *clock = record->writeClock;
108 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
109 * to check the potential for a data race.
110 * @param clock1 The current clock vector
111 * @param tid1 The current thread; paired with clock1
112 * @param clock2 The clock value for the potentially-racing action
113 * @param tid2 The thread ID for the potentially-racing action
114 * @return true if the current clock allows a race with the event at clock2/tid2
116 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
117 modelclock_t clock2, thread_id_t tid2)
119 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
123 * Expands a record from the compact form to the full form. This is
124 * necessary for multiple readers or for very large thread ids or time
126 static void expandRecord(uint64_t *shadow)
128 uint64_t shadowval = *shadow;
130 modelclock_t readClock = READVECTOR(shadowval);
131 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
132 modelclock_t writeClock = WRITEVECTOR(shadowval);
133 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
135 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
136 record->writeThread = writeThread;
137 record->writeClock = writeClock;
139 if (readClock != 0) {
140 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
141 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
142 record->numReads = 1;
143 ASSERT(readThread >= 0);
144 record->thread[0] = readThread;
145 record->readClock[0] = readClock;
147 if (shadowval & ATOMICMASK)
148 record->isAtomic = 1;
149 *shadow = (uint64_t) record;
152 #define FIRST_STACK_FRAME 2
154 unsigned int race_hash(struct DataRace *race) {
155 unsigned int hash = 0;
156 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
157 hash ^= ((uintptr_t)race->backtrace[i]);
158 hash = (hash >> 3) | (hash << 29);
164 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
165 if (r1->numframes != r2->numframes)
167 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
168 if (r1->backtrace[i] != r2->backtrace[i])
174 /** This function is called when we detect a data race.*/
175 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
177 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
178 race->oldthread = oldthread;
179 race->oldclock = oldclock;
180 race->isoldwrite = isoldwrite;
181 race->newaction = newaction;
182 race->isnewwrite = isnewwrite;
183 race->address = address;
188 * @brief Assert a data race
190 * Asserts a data race which is currently realized, causing the execution to
191 * end and stashing a message in the model-checker's bug list
193 * @param race The race to report
195 void assert_race(struct DataRace *race)
197 model_print("Race detected at location: \n");
198 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
199 model_print("\nData race detected @ address %p:\n"
200 " Access 1: %5s in thread %2d @ clock %3u\n"
201 " Access 2: %5s in thread %2d @ clock %3u\n\n",
203 race->isoldwrite ? "write" : "read",
204 id_to_int(race->oldthread),
206 race->isnewwrite ? "write" : "read",
207 id_to_int(race->newaction->get_tid()),
208 race->newaction->get_seq_number()
212 /** This function does race detection for a write on an expanded record. */
213 struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
215 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
216 struct DataRace * race = NULL;
218 /* Check for datarace against last read. */
220 for (int i = 0;i < record->numReads;i++) {
221 modelclock_t readClock = record->readClock[i];
222 thread_id_t readThread = record->thread[i];
224 /* Note that readClock can't actuall be zero here, so it could be
227 if (clock_may_race(currClock, thread, readClock, readThread)) {
228 /* We have a datarace */
229 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
234 /* Check for datarace against last write. */
237 modelclock_t writeClock = record->writeClock;
238 thread_id_t writeThread = record->writeThread;
240 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
241 /* We have a datarace */
242 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
247 record->numReads = 0;
248 record->writeThread = thread;
249 record->isAtomic = 0;
250 modelclock_t ourClock = currClock->getClock(thread);
251 record->writeClock = ourClock;
255 /** This function does race detection on a write. */
256 void raceCheckWrite(thread_id_t thread, void *location)
258 uint64_t *shadow = lookupAddressEntry(location);
259 uint64_t shadowval = *shadow;
260 ClockVector *currClock = get_execution()->get_cv(thread);
261 if (currClock == NULL)
264 struct DataRace * race = NULL;
266 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
267 race = fullRaceCheckWrite(thread, location, shadow, currClock);
272 int threadid = id_to_int(thread);
273 modelclock_t ourClock = currClock->getClock(thread);
275 /* Thread ID is too large or clock is too large. */
276 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
277 expandRecord(shadow);
278 race = fullRaceCheckWrite(thread, location, shadow, currClock);
285 /* Check for datarace against last read. */
286 modelclock_t readClock = READVECTOR(shadowval);
287 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
289 if (clock_may_race(currClock, thread, readClock, readThread)) {
290 /* We have a datarace */
291 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
297 /* Check for datarace against last write. */
299 modelclock_t writeClock = WRITEVECTOR(shadowval);
300 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
302 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
303 /* We have a datarace */
304 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
310 *shadow = ENCODEOP(0, 0, threadid, ourClock);
315 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
316 if (raceset->add(race))
318 else model_free(race);
323 /** This function does race detection for a write on an expanded record. */
324 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
326 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
327 struct DataRace * race = NULL;
329 if (record->isAtomic)
332 /* Check for datarace against last read. */
334 for (int i = 0;i < record->numReads;i++) {
335 modelclock_t readClock = record->readClock[i];
336 thread_id_t readThread = record->thread[i];
338 /* Note that readClock can't actuall be zero here, so it could be
341 if (clock_may_race(currClock, thread, readClock, readThread)) {
342 /* We have a datarace */
343 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
348 /* Check for datarace against last write. */
351 modelclock_t writeClock = record->writeClock;
352 thread_id_t writeThread = record->writeThread;
354 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
355 /* We have a datarace */
356 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
361 record->numReads = 0;
362 record->writeThread = thread;
363 record->isAtomic = 1;
364 modelclock_t ourClock = currClock->getClock(thread);
365 record->writeClock = ourClock;
369 /** This function does race detection on a write. */
370 void atomraceCheckWrite(thread_id_t thread, void *location)
372 uint64_t *shadow = lookupAddressEntry(location);
373 uint64_t shadowval = *shadow;
374 ClockVector *currClock = get_execution()->get_cv(thread);
375 if (currClock == NULL)
378 struct DataRace * race = NULL;
380 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
381 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
386 int threadid = id_to_int(thread);
387 modelclock_t ourClock = currClock->getClock(thread);
389 /* Thread ID is too large or clock is too large. */
390 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
391 expandRecord(shadow);
392 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
396 /* Can't race with atomic */
397 if (shadowval & ATOMICMASK)
401 /* Check for datarace against last read. */
403 modelclock_t readClock = READVECTOR(shadowval);
404 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
406 if (clock_may_race(currClock, thread, readClock, readThread)) {
407 /* We have a datarace */
408 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
414 /* Check for datarace against last write. */
416 modelclock_t writeClock = WRITEVECTOR(shadowval);
417 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
419 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
420 /* We have a datarace */
421 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
427 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
432 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
433 if (raceset->add(race))
435 else model_free(race);
439 /** This function does race detection for a write on an expanded record. */
440 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
441 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
442 record->numReads = 0;
443 record->writeThread = thread;
444 modelclock_t ourClock = currClock->getClock(thread);
445 record->writeClock = ourClock;
446 record->isAtomic = 1;
449 /** This function does race detection for a write on an expanded record. */
450 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
451 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
452 record->numReads = 0;
453 record->writeThread = thread;
454 modelclock_t ourClock = currClock->getClock(thread);
455 record->writeClock = ourClock;
456 record->isAtomic = 0;
459 /** This function just updates metadata on atomic write. */
460 void recordWrite(thread_id_t thread, void *location) {
461 uint64_t *shadow = lookupAddressEntry(location);
462 uint64_t shadowval = *shadow;
463 ClockVector *currClock = get_execution()->get_cv(thread);
465 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
466 fullRecordWrite(thread, location, shadow, currClock);
470 int threadid = id_to_int(thread);
471 modelclock_t ourClock = currClock->getClock(thread);
473 /* Thread ID is too large or clock is too large. */
474 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
475 expandRecord(shadow);
476 fullRecordWrite(thread, location, shadow, currClock);
480 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
483 /** This function just updates metadata on atomic write. */
484 void recordCalloc(void *location, size_t size) {
485 thread_id_t thread = thread_current()->get_id();
486 for(;size != 0;size--) {
487 uint64_t *shadow = lookupAddressEntry(location);
488 uint64_t shadowval = *shadow;
489 ClockVector *currClock = get_execution()->get_cv(thread);
491 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
492 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
496 int threadid = id_to_int(thread);
497 modelclock_t ourClock = currClock->getClock(thread);
499 /* Thread ID is too large or clock is too large. */
500 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
501 expandRecord(shadow);
502 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
506 *shadow = ENCODEOP(0, 0, threadid, ourClock);
507 location = (void *)(((char *) location) + 1);
513 /** This function does race detection on a read for an expanded record. */
514 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
516 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
517 struct DataRace * race = NULL;
518 /* Check for datarace against last write. */
520 modelclock_t writeClock = record->writeClock;
521 thread_id_t writeThread = record->writeThread;
523 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
524 /* We have a datarace */
525 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
528 /* Shorten vector when possible */
532 for (int i = 0;i < record->numReads;i++) {
533 modelclock_t readClock = record->readClock[i];
534 thread_id_t readThread = record->thread[i];
536 /* Note that is not really a datarace check as reads cannot
537 actually race. It is just determining that this read subsumes
538 another in the sense that either this read races or neither
539 read races. Note that readClock can't actually be zero, so it
540 could be optimized. */
542 if (clock_may_race(currClock, thread, readClock, readThread)) {
543 /* Still need this read in vector */
544 if (copytoindex != i) {
545 ASSERT(record->thread[i] >= 0);
546 record->readClock[copytoindex] = record->readClock[i];
547 record->thread[copytoindex] = record->thread[i];
553 if (__builtin_popcount(copytoindex) <= 1) {
554 if (copytoindex == 0) {
555 int newCapacity = INITCAPACITY;
556 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
557 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
558 } else if (copytoindex>=INITCAPACITY) {
559 int newCapacity = copytoindex * 2;
560 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
561 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
562 std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
563 std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
564 snapshot_free(record->readClock);
565 snapshot_free(record->thread);
566 record->readClock = newreadClock;
567 record->thread = newthread;
571 modelclock_t ourClock = currClock->getClock(thread);
574 record->thread[copytoindex] = thread;
575 record->readClock[copytoindex] = ourClock;
576 record->numReads = copytoindex + 1;
580 /** This function does race detection on a read. */
581 void raceCheckRead(thread_id_t thread, const void *location)
583 uint64_t *shadow = lookupAddressEntry(location);
584 uint64_t shadowval = *shadow;
585 ClockVector *currClock = get_execution()->get_cv(thread);
586 if (currClock == NULL)
589 struct DataRace * race = NULL;
592 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
593 race = fullRaceCheckRead(thread, location, shadow, currClock);
598 int threadid = id_to_int(thread);
599 modelclock_t ourClock = currClock->getClock(thread);
601 /* Thread ID is too large or clock is too large. */
602 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
603 expandRecord(shadow);
604 race = fullRaceCheckRead(thread, location, shadow, currClock);
608 /* Check for datarace against last write. */
610 modelclock_t writeClock = WRITEVECTOR(shadowval);
611 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
613 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
614 /* We have a datarace */
615 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
621 modelclock_t readClock = READVECTOR(shadowval);
622 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
624 if (clock_may_race(currClock, thread, readClock, readThread)) {
625 /* We don't subsume this read... Have to expand record. */
626 expandRecord(shadow);
627 fullRaceCheckRead(thread, location, shadow, currClock);
632 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
636 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
637 if (raceset->add(race))
639 else model_free(race);
644 /** This function does race detection on a read for an expanded record. */
645 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
647 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
648 struct DataRace * race = NULL;
649 /* Check for datarace against last write. */
650 if (record->isAtomic)
653 modelclock_t writeClock = record->writeClock;
654 thread_id_t writeThread = record->writeThread;
656 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
657 /* We have a datarace */
658 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
663 /** This function does race detection on a read. */
664 void atomraceCheckRead(thread_id_t thread, const void *location)
666 uint64_t *shadow = lookupAddressEntry(location);
667 uint64_t shadowval = *shadow;
668 ClockVector *currClock = get_execution()->get_cv(thread);
669 if (currClock == NULL)
672 struct DataRace * race = NULL;
675 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
676 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
680 if (shadowval & ATOMICMASK)
684 /* Check for datarace against last write. */
686 modelclock_t writeClock = WRITEVECTOR(shadowval);
687 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
689 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
690 /* We have a datarace */
691 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
699 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
700 if (raceset->add(race))
702 else model_free(race);
706 static inline void raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
708 uint64_t *shadow = lookupAddressEntry(location);
709 uint64_t shadowval = *shadow;
711 ClockVector *currClock = get_execution()->get_cv(thread);
712 if (currClock == NULL)
715 struct DataRace * race = NULL;
718 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
719 race = fullRaceCheckRead(thread, location, shadow, currClock);
724 int threadid = id_to_int(thread);
725 modelclock_t ourClock = currClock->getClock(thread);
727 /* Thread ID is too large or clock is too large. */
728 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
729 expandRecord(shadow);
730 race = fullRaceCheckRead(thread, location, shadow, currClock);
734 /* Check for datarace against last write. */
736 modelclock_t writeClock = WRITEVECTOR(shadowval);
737 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
739 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
740 /* We have a datarace */
741 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
744 modelclock_t readClock = READVECTOR(shadowval);
745 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
747 if (clock_may_race(currClock, thread, readClock, readThread)) {
748 /* We don't subsume this read... Have to expand record. */
749 expandRecord(shadow);
750 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
751 record->thread[1] = thread;
752 record->readClock[1] = ourClock;
758 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
760 *old_val = shadowval;
765 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
766 if (raceset->add(race))
768 else model_free(race);
772 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location, uint64_t first_shadowval, uint64_t updated_shadowval)
774 uint64_t *shadow = lookupAddressEntry(location);
775 uint64_t shadowval = *shadow;
777 if (shadowval == first_shadowval) {
778 *shadow = updated_shadowval;
782 ClockVector *currClock = get_execution()->get_cv(thread);
783 if (currClock == NULL)
786 struct DataRace * race = NULL;
789 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
790 race = fullRaceCheckRead(thread, location, shadow, currClock);
795 int threadid = id_to_int(thread);
796 modelclock_t ourClock = currClock->getClock(thread);
798 /* Thread ID is too large or clock is too large. */
799 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
800 expandRecord(shadow);
801 race = fullRaceCheckRead(thread, location, shadow, currClock);
805 /* Check for datarace against last write. */
806 modelclock_t writeClock = WRITEVECTOR(shadowval);
807 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
809 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
810 /* We have a datarace */
811 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
814 modelclock_t readClock = READVECTOR(shadowval);
815 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
817 if (clock_may_race(currClock, thread, readClock, readThread)) {
818 /* We don't subsume this read... Have to expand record. */
819 expandRecord(shadow);
820 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
821 record->thread[1] = thread;
822 record->readClock[1] = ourClock;
828 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
832 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
833 if (raceset->add(race))
835 else model_free(race);
839 void raceCheckRead64(thread_id_t thread, const void *location)
841 uint64_t old_shadowval, new_shadowval;
842 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
844 raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
845 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval);
846 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2), old_shadowval, new_shadowval);
847 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3), old_shadowval, new_shadowval);
848 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4), old_shadowval, new_shadowval);
849 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5), old_shadowval, new_shadowval);
850 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6), old_shadowval, new_shadowval);
851 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7), old_shadowval, new_shadowval);
854 void raceCheckRead32(thread_id_t thread, const void *location)
856 uint64_t old_shadowval, new_shadowval;
857 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
859 raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
860 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval);
861 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2), old_shadowval, new_shadowval);
862 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3), old_shadowval, new_shadowval);
865 void raceCheckRead16(thread_id_t thread, const void *location)
867 uint64_t old_shadowval, new_shadowval;
868 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
870 raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
871 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval);
874 void raceCheckRead8(thread_id_t thread, const void *location)
876 uint64_t old_shadowval, new_shadowval;
877 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
879 raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
882 static inline void raceCheckWrite_firstIt(thread_id_t thread, void * location, uint64_t *old_val, uint64_t *new_val)
884 uint64_t *shadow = lookupAddressEntry(location);
885 uint64_t shadowval = *shadow;
886 ClockVector *currClock = get_execution()->get_cv(thread);
887 if (currClock == NULL)
890 struct DataRace * race = NULL;
892 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
893 race = fullRaceCheckWrite(thread, location, shadow, currClock);
898 int threadid = id_to_int(thread);
899 modelclock_t ourClock = currClock->getClock(thread);
901 /* Thread ID is too large or clock is too large. */
902 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
903 expandRecord(shadow);
904 race = fullRaceCheckWrite(thread, location, shadow, currClock);
909 /* Check for datarace against last read. */
910 modelclock_t readClock = READVECTOR(shadowval);
911 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
913 if (clock_may_race(currClock, thread, readClock, readThread)) {
914 /* We have a datarace */
915 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
921 /* Check for datarace against last write. */
922 modelclock_t writeClock = WRITEVECTOR(shadowval);
923 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
925 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
926 /* We have a datarace */
927 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
933 *shadow = ENCODEOP(0, 0, threadid, ourClock);
935 *old_val = shadowval;
941 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
942 if (raceset->add(race))
944 else model_free(race);
948 static inline void raceCheckWrite_otherIt(thread_id_t thread, void * location, uint64_t first_shadowval, uint64_t updated_shadowval)
950 uint64_t *shadow = lookupAddressEntry(location);
951 uint64_t shadowval = *shadow;
953 if (shadowval == first_shadowval) {
954 *shadow = updated_shadowval;
958 ClockVector *currClock = get_execution()->get_cv(thread);
959 if (currClock == NULL)
962 struct DataRace * race = NULL;
964 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
965 race = fullRaceCheckWrite(thread, location, shadow, currClock);
970 int threadid = id_to_int(thread);
971 modelclock_t ourClock = currClock->getClock(thread);
973 /* Thread ID is too large or clock is too large. */
974 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
975 expandRecord(shadow);
976 race = fullRaceCheckWrite(thread, location, shadow, currClock);
981 /* Check for datarace against last read. */
982 modelclock_t readClock = READVECTOR(shadowval);
983 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
985 if (clock_may_race(currClock, thread, readClock, readThread)) {
986 /* We have a datarace */
987 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
993 /* Check for datarace against last write. */
994 modelclock_t writeClock = WRITEVECTOR(shadowval);
995 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
997 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
998 /* We have a datarace */
999 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1005 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1010 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1011 if (raceset->add(race))
1013 else model_free(race);
1017 void raceCheckWrite64(thread_id_t thread, void *location)
1019 uint64_t old_shadowval, new_shadowval;
1020 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1022 raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1023 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval);
1024 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 2), old_shadowval, new_shadowval);
1025 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 3), old_shadowval, new_shadowval);
1026 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 4), old_shadowval, new_shadowval);
1027 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 5), old_shadowval, new_shadowval);
1028 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 6), old_shadowval, new_shadowval);
1029 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 7), old_shadowval, new_shadowval);
1032 void raceCheckWrite32(thread_id_t thread, void *location)
1034 uint64_t old_shadowval, new_shadowval;
1035 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1037 raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1038 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval);
1039 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 2), old_shadowval, new_shadowval);
1040 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 3), old_shadowval, new_shadowval);
1043 void raceCheckWrite16(thread_id_t thread, void *location)
1045 uint64_t old_shadowval, new_shadowval;
1046 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1048 raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1049 raceCheckWrite_otherIt(thread, (void *)(((uintptr_t)location) + 1), old_shadowval, new_shadowval);
1052 void raceCheckWrite8(thread_id_t thread, void *location)
1054 uint64_t old_shadowval, new_shadowval;
1055 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1057 raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1061 #define RACECHECKWRITE(size) \
1062 void raceCheckWrite ## size(thread_id_t thread, void *location) { \
1063 uint64_t old_shadowval, new_shadowval; \
1064 old_shadowval = new_shadowval = 0; \
1065 raceCheckWrite_firstit(thread, location, &old_shadowval, &new_shadowval); \
1066 for (int i=1; i < size / 8;i++) { \
1067 raceCheckWrite_otherit(thread, (void *)(((char *)location) + 1), old_shadowval, new_shadowval); \
1076 #define RACECHECKREAD(size) \
1077 void raceCheckRead ## size(thread_id_t thread, void *location) { \
1078 uint64_t old_shadowval, new_shadowval; \
1079 old_shadowval = new_shadowval = 0; \
1080 raceCheckRead_firstit(thread, location, &old_shadowval, &new_shadowval); \
1081 for (int i=1; i < size / 8;i++) { \
1082 raceCheckRead_otherit(thread, (void *)(((char *)location) + 1), old_shadowval, new_shadowval); \