3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
19 static const ModelExecution * get_execution()
21 return model->get_execution();
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
27 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30 raceset = new RaceSet();
33 void * table_calloc(size_t size)
35 if ((((char *)memory_base) + size) > memory_top) {
36 return snapshot_calloc(size, 1);
38 void *tmp = memory_base;
39 memory_base = ((char *)memory_base) + size;
44 /** This function looks up the entry in the shadow table corresponding to a
46 static uint64_t * lookupAddressEntry(const void *address)
48 struct ShadowTable *currtable = root;
50 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51 if (currtable == NULL) {
52 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
56 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57 if (basetable == NULL) {
58 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
60 return &basetable->array[((uintptr_t)address) & MASK16BIT];
64 bool hasNonAtomicStore(const void *address) {
65 uint64_t * shadow = lookupAddressEntry(address);
66 uint64_t shadowval = *shadow;
67 if (ISSHORTRECORD(shadowval)) {
68 //Do we have a non atomic write with a non-zero clock
69 return !(ATOMICMASK & shadowval);
73 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74 return !record->isAtomic;
78 void setAtomicStoreFlag(const void *address) {
79 uint64_t * shadow = lookupAddressEntry(address);
80 uint64_t shadowval = *shadow;
81 if (ISSHORTRECORD(shadowval)) {
82 *shadow = shadowval | ATOMICMASK;
85 *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
88 struct RaceRecord *record = (struct RaceRecord *)shadowval;
93 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
94 uint64_t * shadow = lookupAddressEntry(address);
95 uint64_t shadowval = *shadow;
96 if (ISSHORTRECORD(shadowval) || shadowval == 0) {
97 //Do we have a non atomic write with a non-zero clock
98 *thread = WRTHREADID(shadowval);
99 *clock = WRITEVECTOR(shadowval);
101 struct RaceRecord *record = (struct RaceRecord *)shadowval;
102 *thread = record->writeThread;
103 *clock = record->writeClock;
108 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
109 * to check the potential for a data race.
110 * @param clock1 The current clock vector
111 * @param tid1 The current thread; paired with clock1
112 * @param clock2 The clock value for the potentially-racing action
113 * @param tid2 The thread ID for the potentially-racing action
114 * @return true if the current clock allows a race with the event at clock2/tid2
116 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
117 modelclock_t clock2, thread_id_t tid2)
119 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
123 * Expands a record from the compact form to the full form. This is
124 * necessary for multiple readers or for very large thread ids or time
126 static void expandRecord(uint64_t *shadow)
128 uint64_t shadowval = *shadow;
130 modelclock_t readClock = READVECTOR(shadowval);
131 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
132 modelclock_t writeClock = WRITEVECTOR(shadowval);
133 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
135 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
136 record->writeThread = writeThread;
137 record->writeClock = writeClock;
139 if (readClock != 0) {
140 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
141 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
142 record->numReads = 1;
143 ASSERT(readThread >= 0);
144 record->thread[0] = readThread;
145 record->readClock[0] = readClock;
147 record->thread = NULL;
149 if (shadowval & ATOMICMASK)
150 record->isAtomic = 1;
151 *shadow = (uint64_t) record;
154 #define FIRST_STACK_FRAME 2
156 unsigned int race_hash(struct DataRace *race) {
157 unsigned int hash = 0;
158 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
159 hash ^= ((uintptr_t)race->backtrace[i]);
160 hash = (hash >> 3) | (hash << 29);
165 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
166 if (r1->numframes != r2->numframes)
168 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
169 if (r1->backtrace[i] != r2->backtrace[i])
175 /** This function is called when we detect a data race.*/
176 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
178 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
179 race->oldthread = oldthread;
180 race->oldclock = oldclock;
181 race->isoldwrite = isoldwrite;
182 race->newaction = newaction;
183 race->isnewwrite = isnewwrite;
184 race->address = address;
189 * @brief Assert a data race
191 * Asserts a data race which is currently realized, causing the execution to
192 * end and stashing a message in the model-checker's bug list
194 * @param race The race to report
196 void assert_race(struct DataRace *race)
198 model_print("Race detected at location: \n");
199 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
200 model_print("\nData race detected @ address %p:\n"
201 " Access 1: %5s in thread %2d @ clock %3u\n"
202 " Access 2: %5s in thread %2d @ clock %3u\n\n",
204 race->isoldwrite ? "write" : "read",
205 id_to_int(race->oldthread),
207 race->isnewwrite ? "write" : "read",
208 id_to_int(race->newaction->get_tid()),
209 race->newaction->get_seq_number()
213 /** This function does race detection for a write on an expanded record. */
214 struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
216 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
217 struct DataRace * race = NULL;
219 /* Check for datarace against last read. */
221 for (int i = 0;i < record->numReads;i++) {
222 modelclock_t readClock = record->readClock[i];
223 thread_id_t readThread = record->thread[i];
225 /* Note that readClock can't actuall be zero here, so it could be
228 if (clock_may_race(currClock, thread, readClock, readThread)) {
229 /* We have a datarace */
230 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
235 /* Check for datarace against last write. */
237 modelclock_t writeClock = record->writeClock;
238 thread_id_t writeThread = record->writeThread;
240 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
241 /* We have a datarace */
242 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
247 record->numReads = 0;
248 record->writeThread = thread;
249 record->isAtomic = 0;
250 modelclock_t ourClock = currClock->getClock(thread);
251 record->writeClock = ourClock;
255 /** This function does race detection on a write. */
256 void raceCheckWrite(thread_id_t thread, void *location)
258 uint64_t *shadow = lookupAddressEntry(location);
259 uint64_t shadowval = *shadow;
260 ClockVector *currClock = get_execution()->get_cv(thread);
261 if (currClock == NULL)
264 struct DataRace * race = NULL;
266 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
267 race = fullRaceCheckWrite(thread, location, shadow, currClock);
272 int threadid = id_to_int(thread);
273 modelclock_t ourClock = currClock->getClock(thread);
275 /* Thread ID is too large or clock is too large. */
276 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
277 expandRecord(shadow);
278 race = fullRaceCheckWrite(thread, location, shadow, currClock);
283 /* Check for datarace against last read. */
284 modelclock_t readClock = READVECTOR(shadowval);
285 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
287 if (clock_may_race(currClock, thread, readClock, readThread)) {
288 /* We have a datarace */
289 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
295 /* Check for datarace against last write. */
296 modelclock_t writeClock = WRITEVECTOR(shadowval);
297 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
299 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
300 /* We have a datarace */
301 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
307 *shadow = ENCODEOP(0, 0, threadid, ourClock);
312 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
313 if (raceset->add(race))
315 else model_free(race);
319 /** This function does race detection for a write on an expanded record. */
320 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
322 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
323 struct DataRace * race = NULL;
325 if (record->isAtomic)
328 /* Check for datarace against last read. */
330 for (int i = 0;i < record->numReads;i++) {
331 modelclock_t readClock = record->readClock[i];
332 thread_id_t readThread = record->thread[i];
334 /* Note that readClock can't actuall be zero here, so it could be
337 if (clock_may_race(currClock, thread, readClock, readThread)) {
338 /* We have a datarace */
339 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
344 /* Check for datarace against last write. */
347 modelclock_t writeClock = record->writeClock;
348 thread_id_t writeThread = record->writeThread;
350 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
351 /* We have a datarace */
352 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
357 record->numReads = 0;
358 record->writeThread = thread;
359 record->isAtomic = 1;
360 modelclock_t ourClock = currClock->getClock(thread);
361 record->writeClock = ourClock;
365 /** This function does race detection on a write. */
366 void atomraceCheckWrite(thread_id_t thread, void *location)
368 uint64_t *shadow = lookupAddressEntry(location);
369 uint64_t shadowval = *shadow;
370 ClockVector *currClock = get_execution()->get_cv(thread);
371 if (currClock == NULL)
374 struct DataRace * race = NULL;
376 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
377 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
382 int threadid = id_to_int(thread);
383 modelclock_t ourClock = currClock->getClock(thread);
385 /* Thread ID is too large or clock is too large. */
386 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
387 expandRecord(shadow);
388 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
392 /* Can't race with atomic */
393 if (shadowval & ATOMICMASK)
397 /* Check for datarace against last read. */
398 modelclock_t readClock = READVECTOR(shadowval);
399 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
401 if (clock_may_race(currClock, thread, readClock, readThread)) {
402 /* We have a datarace */
403 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
409 /* Check for datarace against last write. */
410 modelclock_t writeClock = WRITEVECTOR(shadowval);
411 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
413 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
414 /* We have a datarace */
415 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
421 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
426 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
427 if (raceset->add(race))
429 else model_free(race);
433 /** This function does race detection for a write on an expanded record. */
434 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
435 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
436 record->numReads = 0;
437 record->writeThread = thread;
438 modelclock_t ourClock = currClock->getClock(thread);
439 record->writeClock = ourClock;
440 record->isAtomic = 1;
443 /** This function does race detection for a write on an expanded record. */
444 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
445 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
446 record->numReads = 0;
447 record->writeThread = thread;
448 modelclock_t ourClock = currClock->getClock(thread);
449 record->writeClock = ourClock;
450 record->isAtomic = 0;
453 /** This function just updates metadata on atomic write. */
454 void recordWrite(thread_id_t thread, void *location) {
455 uint64_t *shadow = lookupAddressEntry(location);
456 uint64_t shadowval = *shadow;
457 ClockVector *currClock = get_execution()->get_cv(thread);
459 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
460 fullRecordWrite(thread, location, shadow, currClock);
464 int threadid = id_to_int(thread);
465 modelclock_t ourClock = currClock->getClock(thread);
467 /* Thread ID is too large or clock is too large. */
468 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
469 expandRecord(shadow);
470 fullRecordWrite(thread, location, shadow, currClock);
474 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
477 /** This function just updates metadata on atomic write. */
478 void recordCalloc(void *location, size_t size) {
479 thread_id_t thread = thread_current()->get_id();
480 for(;size != 0;size--) {
481 uint64_t *shadow = lookupAddressEntry(location);
482 uint64_t shadowval = *shadow;
483 ClockVector *currClock = get_execution()->get_cv(thread);
485 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
486 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
490 int threadid = id_to_int(thread);
491 modelclock_t ourClock = currClock->getClock(thread);
493 /* Thread ID is too large or clock is too large. */
494 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
495 expandRecord(shadow);
496 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
500 *shadow = ENCODEOP(0, 0, threadid, ourClock);
501 location = (void *)(((char *) location) + 1);
505 /** This function does race detection on a read for an expanded record. */
506 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
508 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
509 struct DataRace * race = NULL;
510 /* Check for datarace against last write. */
512 modelclock_t writeClock = record->writeClock;
513 thread_id_t writeThread = record->writeThread;
515 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
516 /* We have a datarace */
517 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
520 /* Shorten vector when possible */
524 for (int i = 0;i < record->numReads;i++) {
525 modelclock_t readClock = record->readClock[i];
526 thread_id_t readThread = record->thread[i];
528 /* Note that is not really a datarace check as reads cannot
529 actually race. It is just determining that this read subsumes
530 another in the sense that either this read races or neither
531 read races. Note that readClock can't actually be zero, so it
532 could be optimized. */
534 if (clock_may_race(currClock, thread, readClock, readThread)) {
535 /* Still need this read in vector */
536 if (copytoindex != i) {
537 ASSERT(record->thread[i] >= 0);
538 record->readClock[copytoindex] = record->readClock[i];
539 record->thread[copytoindex] = record->thread[i];
545 if (__builtin_popcount(copytoindex) <= 1) {
546 if (copytoindex == 0 && record->thread == NULL) {
547 int newCapacity = INITCAPACITY;
548 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
549 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
550 } else if (copytoindex>=INITCAPACITY) {
551 int newCapacity = copytoindex * 2;
552 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
553 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
554 std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
555 std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
556 snapshot_free(record->readClock);
557 snapshot_free(record->thread);
558 record->readClock = newreadClock;
559 record->thread = newthread;
563 modelclock_t ourClock = currClock->getClock(thread);
566 record->thread[copytoindex] = thread;
567 record->readClock[copytoindex] = ourClock;
568 record->numReads = copytoindex + 1;
572 /** This function does race detection on a read. */
573 void raceCheckRead(thread_id_t thread, const void *location)
575 uint64_t *shadow = lookupAddressEntry(location);
576 uint64_t shadowval = *shadow;
577 ClockVector *currClock = get_execution()->get_cv(thread);
578 if (currClock == NULL)
581 struct DataRace * race = NULL;
584 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
585 race = fullRaceCheckRead(thread, location, shadow, currClock);
590 int threadid = id_to_int(thread);
591 modelclock_t ourClock = currClock->getClock(thread);
593 /* Thread ID is too large or clock is too large. */
594 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
595 expandRecord(shadow);
596 race = fullRaceCheckRead(thread, location, shadow, currClock);
600 /* Check for datarace against last write. */
602 modelclock_t writeClock = WRITEVECTOR(shadowval);
603 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
605 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
606 /* We have a datarace */
607 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
613 modelclock_t readClock = READVECTOR(shadowval);
614 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
616 if (clock_may_race(currClock, thread, readClock, readThread)) {
617 /* We don't subsume this read... Have to expand record. */
618 expandRecord(shadow);
619 fullRaceCheckRead(thread, location, shadow, currClock);
624 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
628 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
629 if (raceset->add(race))
631 else model_free(race);
636 /** This function does race detection on a read for an expanded record. */
637 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
639 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
640 struct DataRace * race = NULL;
641 /* Check for datarace against last write. */
642 if (record->isAtomic)
645 modelclock_t writeClock = record->writeClock;
646 thread_id_t writeThread = record->writeThread;
648 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
649 /* We have a datarace */
650 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
655 /** This function does race detection on a read. */
656 void atomraceCheckRead(thread_id_t thread, const void *location)
658 uint64_t *shadow = lookupAddressEntry(location);
659 uint64_t shadowval = *shadow;
660 ClockVector *currClock = get_execution()->get_cv(thread);
661 if (currClock == NULL)
664 struct DataRace * race = NULL;
667 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
668 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
672 if (shadowval & ATOMICMASK)
676 /* Check for datarace against last write. */
677 modelclock_t writeClock = WRITEVECTOR(shadowval);
678 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
680 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
681 /* We have a datarace */
682 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
690 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
691 if (raceset->add(race))
693 else model_free(race);
697 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
699 uint64_t *shadow = lookupAddressEntry(location);
700 uint64_t shadowval = *shadow;
702 ClockVector *currClock = get_execution()->get_cv(thread);
703 if (currClock == NULL)
706 struct DataRace * race = NULL;
709 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
710 race = fullRaceCheckRead(thread, location, shadow, currClock);
715 int threadid = id_to_int(thread);
716 modelclock_t ourClock = currClock->getClock(thread);
718 /* Thread ID is too large or clock is too large. */
719 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
720 expandRecord(shadow);
721 race = fullRaceCheckRead(thread, location, shadow, currClock);
725 /* Check for datarace against last write. */
726 modelclock_t writeClock = WRITEVECTOR(shadowval);
727 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
729 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
730 /* We have a datarace */
731 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
734 modelclock_t readClock = READVECTOR(shadowval);
735 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
737 if (clock_may_race(currClock, thread, readClock, readThread)) {
738 /* We don't subsume this read... Have to expand record. */
739 expandRecord(shadow);
740 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
741 record->thread[1] = thread;
742 record->readClock[1] = ourClock;
748 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
750 *old_val = shadowval;
755 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
756 if (raceset->add(race))
758 else model_free(race);
764 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
765 uint64_t *shadow = lookupAddressEntry(location);
767 uint64_t shadowval = *shadow;
769 ClockVector *currClock = get_execution()->get_cv(thread);
770 if (currClock == NULL)
773 struct DataRace * race = NULL;
776 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
777 race = fullRaceCheckRead(thread, location, shadow, currClock);
782 int threadid = id_to_int(thread);
783 modelclock_t ourClock = currClock->getClock(thread);
785 /* Thread ID is too large or clock is too large. */
786 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
787 expandRecord(shadow);
788 race = fullRaceCheckRead(thread, location, shadow, currClock);
792 /* Check for datarace against last write. */
793 modelclock_t writeClock = WRITEVECTOR(shadowval);
794 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
796 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
797 /* We have a datarace */
798 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
801 modelclock_t readClock = READVECTOR(shadowval);
802 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
804 if (clock_may_race(currClock, thread, readClock, readThread)) {
805 /* We don't subsume this read... Have to expand record. */
806 expandRecord(shadow);
807 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
808 record->thread[1] = thread;
809 record->readClock[1] = ourClock;
815 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
819 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
820 if (raceset->add(race))
822 else model_free(race);
826 void raceCheckRead64(thread_id_t thread, const void *location)
828 uint64_t old_shadowval, new_shadowval;
829 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
831 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
832 if (CHECKBOUNDARY(location, 7)) {
833 if (shadow[1]==old_shadowval)
834 shadow[1] = new_shadowval;
836 if (shadow[2]==old_shadowval)
837 shadow[2] = new_shadowval;
839 if (shadow[3]==old_shadowval)
840 shadow[3] = new_shadowval;
842 if (shadow[4]==old_shadowval)
843 shadow[4] = new_shadowval;
845 if (shadow[5]==old_shadowval)
846 shadow[5] = new_shadowval;
848 if (shadow[6]==old_shadowval)
849 shadow[6] = new_shadowval;
851 if (shadow[7]==old_shadowval)
852 shadow[7] = new_shadowval;
858 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
860 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
862 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
864 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
866 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
868 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
870 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
873 void raceCheckRead32(thread_id_t thread, const void *location)
875 uint64_t old_shadowval, new_shadowval;
876 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
878 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
879 if (CHECKBOUNDARY(location, 3)) {
880 if (shadow[1]==old_shadowval)
881 shadow[1] = new_shadowval;
883 if (shadow[2]==old_shadowval)
884 shadow[2] = new_shadowval;
886 if (shadow[3]==old_shadowval)
887 shadow[3] = new_shadowval;
893 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
895 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
897 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
900 void raceCheckRead16(thread_id_t thread, const void *location)
902 uint64_t old_shadowval, new_shadowval;
903 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
906 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
907 if (CHECKBOUNDARY(location, 1)) {
908 if (shadow[1]==old_shadowval) {
909 shadow[1] = new_shadowval;
913 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
916 void raceCheckRead8(thread_id_t thread, const void *location)
918 uint64_t old_shadowval, new_shadowval;
919 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
921 raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
924 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
926 uint64_t *shadow = lookupAddressEntry(location);
927 uint64_t shadowval = *shadow;
928 ClockVector *currClock = get_execution()->get_cv(thread);
929 if (currClock == NULL)
932 struct DataRace * race = NULL;
934 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
935 race = fullRaceCheckWrite(thread, location, shadow, currClock);
940 int threadid = id_to_int(thread);
941 modelclock_t ourClock = currClock->getClock(thread);
943 /* Thread ID is too large or clock is too large. */
944 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
945 expandRecord(shadow);
946 race = fullRaceCheckWrite(thread, location, shadow, currClock);
951 /* Check for datarace against last read. */
952 modelclock_t readClock = READVECTOR(shadowval);
953 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
955 if (clock_may_race(currClock, thread, readClock, readThread)) {
956 /* We have a datarace */
957 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
963 /* Check for datarace against last write. */
964 modelclock_t writeClock = WRITEVECTOR(shadowval);
965 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
967 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
968 /* We have a datarace */
969 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
975 *shadow = ENCODEOP(0, 0, threadid, ourClock);
977 *old_val = shadowval;
983 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
984 if (raceset->add(race))
986 else model_free(race);
992 static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
993 uint64_t *shadow = lookupAddressEntry(location);
995 uint64_t shadowval = *shadow;
997 ClockVector *currClock = get_execution()->get_cv(thread);
998 if (currClock == NULL)
1001 struct DataRace * race = NULL;
1002 /* Do full record */
1003 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1004 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1009 int threadid = id_to_int(thread);
1010 modelclock_t ourClock = currClock->getClock(thread);
1012 /* Thread ID is too large or clock is too large. */
1013 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1014 expandRecord(shadow);
1015 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1020 /* Check for datarace against last read. */
1021 modelclock_t readClock = READVECTOR(shadowval);
1022 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1024 if (clock_may_race(currClock, thread, readClock, readThread)) {
1025 /* We have a datarace */
1026 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1032 /* Check for datarace against last write. */
1033 modelclock_t writeClock = WRITEVECTOR(shadowval);
1034 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1036 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1037 /* We have a datarace */
1038 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1044 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1049 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1050 if (raceset->add(race))
1052 else model_free(race);
1056 void raceCheckWrite64(thread_id_t thread, const void *location)
1058 uint64_t old_shadowval, new_shadowval;
1059 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1061 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1062 if (CHECKBOUNDARY(location, 7)) {
1063 if (shadow[1]==old_shadowval)
1064 shadow[1] = new_shadowval;
1066 if (shadow[2]==old_shadowval)
1067 shadow[2] = new_shadowval;
1069 if (shadow[3]==old_shadowval)
1070 shadow[3] = new_shadowval;
1072 if (shadow[4]==old_shadowval)
1073 shadow[4] = new_shadowval;
1075 if (shadow[5]==old_shadowval)
1076 shadow[5] = new_shadowval;
1078 if (shadow[6]==old_shadowval)
1079 shadow[6] = new_shadowval;
1081 if (shadow[7]==old_shadowval)
1082 shadow[7] = new_shadowval;
1088 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1090 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1092 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1094 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
1096 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
1098 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
1100 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
1103 void raceCheckWrite32(thread_id_t thread, const void *location)
1105 uint64_t old_shadowval, new_shadowval;
1106 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1108 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1109 if (CHECKBOUNDARY(location, 3)) {
1110 if (shadow[1]==old_shadowval)
1111 shadow[1] = new_shadowval;
1113 if (shadow[2]==old_shadowval)
1114 shadow[2] = new_shadowval;
1116 if (shadow[3]==old_shadowval)
1117 shadow[3] = new_shadowval;
1123 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1125 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1127 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1130 void raceCheckWrite16(thread_id_t thread, const void *location)
1132 uint64_t old_shadowval, new_shadowval;
1133 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1135 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1136 if (CHECKBOUNDARY(location, 1)) {
1137 if (shadow[1]==old_shadowval) {
1138 shadow[1] = new_shadowval;
1142 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1145 void raceCheckWrite8(thread_id_t thread, const void *location)
1147 uint64_t old_shadowval, new_shadowval;
1148 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1150 raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);