3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
19 static const ModelExecution * get_execution()
21 return model->get_execution();
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
27 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30 raceset = new RaceSet();
33 void * table_calloc(size_t size)
35 if ((((char *)memory_base) + size) > memory_top) {
36 return snapshot_calloc(size, 1);
38 void *tmp = memory_base;
39 memory_base = ((char *)memory_base) + size;
44 /** This function looks up the entry in the shadow table corresponding to a
46 static uint64_t * lookupAddressEntry(const void *address)
48 struct ShadowTable *currtable = root;
50 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51 if (currtable == NULL) {
52 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
56 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57 if (basetable == NULL) {
58 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
60 return &basetable->array[((uintptr_t)address) & MASK16BIT];
64 bool hasNonAtomicStore(const void *address) {
65 uint64_t * shadow = lookupAddressEntry(address);
66 uint64_t shadowval = *shadow;
67 if (ISSHORTRECORD(shadowval)) {
68 //Do we have a non atomic write with a non-zero clock
69 return !(ATOMICMASK & shadowval);
73 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74 return !record->isAtomic;
78 void setAtomicStoreFlag(const void *address) {
79 uint64_t * shadow = lookupAddressEntry(address);
80 uint64_t shadowval = *shadow;
81 if (ISSHORTRECORD(shadowval)) {
82 *shadow = shadowval | ATOMICMASK;
85 *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
88 struct RaceRecord *record = (struct RaceRecord *)shadowval;
93 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
94 uint64_t * shadow = lookupAddressEntry(address);
95 uint64_t shadowval = *shadow;
96 if (ISSHORTRECORD(shadowval) || shadowval == 0) {
97 //Do we have a non atomic write with a non-zero clock
98 *thread = WRTHREADID(shadowval);
99 *clock = WRITEVECTOR(shadowval);
101 struct RaceRecord *record = (struct RaceRecord *)shadowval;
102 *thread = record->writeThread;
103 *clock = record->writeClock;
108 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
109 * to check the potential for a data race.
110 * @param clock1 The current clock vector
111 * @param tid1 The current thread; paired with clock1
112 * @param clock2 The clock value for the potentially-racing action
113 * @param tid2 The thread ID for the potentially-racing action
114 * @return true if the current clock allows a race with the event at clock2/tid2
116 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
117 modelclock_t clock2, thread_id_t tid2)
119 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
123 * Expands a record from the compact form to the full form. This is
124 * necessary for multiple readers or for very large thread ids or time
126 static void expandRecord(uint64_t *shadow)
128 uint64_t shadowval = *shadow;
130 modelclock_t readClock = READVECTOR(shadowval);
131 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
132 modelclock_t writeClock = WRITEVECTOR(shadowval);
133 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
135 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
136 record->writeThread = writeThread;
137 record->writeClock = writeClock;
139 if (readClock != 0) {
140 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
141 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
142 record->numReads = 1;
143 ASSERT(readThread >= 0);
144 record->thread[0] = readThread;
145 record->readClock[0] = readClock;
147 if (shadowval & ATOMICMASK)
148 record->isAtomic = 1;
149 *shadow = (uint64_t) record;
152 #define FIRST_STACK_FRAME 2
154 unsigned int race_hash(struct DataRace *race) {
155 unsigned int hash = 0;
156 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
157 hash ^= ((uintptr_t)race->backtrace[i]);
158 hash = (hash >> 3) | (hash << 29);
164 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
165 if (r1->numframes != r2->numframes)
167 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
168 if (r1->backtrace[i] != r2->backtrace[i])
174 /** This function is called when we detect a data race.*/
175 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
177 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
178 race->oldthread = oldthread;
179 race->oldclock = oldclock;
180 race->isoldwrite = isoldwrite;
181 race->newaction = newaction;
182 race->isnewwrite = isnewwrite;
183 race->address = address;
188 * @brief Assert a data race
190 * Asserts a data race which is currently realized, causing the execution to
191 * end and stashing a message in the model-checker's bug list
193 * @param race The race to report
195 void assert_race(struct DataRace *race)
197 model_print("Race detected at location: \n");
198 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
199 model_print("\nData race detected @ address %p:\n"
200 " Access 1: %5s in thread %2d @ clock %3u\n"
201 " Access 2: %5s in thread %2d @ clock %3u\n\n",
203 race->isoldwrite ? "write" : "read",
204 id_to_int(race->oldthread),
206 race->isnewwrite ? "write" : "read",
207 id_to_int(race->newaction->get_tid()),
208 race->newaction->get_seq_number()
212 /** This function does race detection for a write on an expanded record. */
213 struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
215 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
216 struct DataRace * race = NULL;
218 /* Check for datarace against last read. */
220 for (int i = 0;i < record->numReads;i++) {
221 modelclock_t readClock = record->readClock[i];
222 thread_id_t readThread = record->thread[i];
224 /* Note that readClock can't actuall be zero here, so it could be
227 if (clock_may_race(currClock, thread, readClock, readThread)) {
228 /* We have a datarace */
229 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
234 /* Check for datarace against last write. */
237 modelclock_t writeClock = record->writeClock;
238 thread_id_t writeThread = record->writeThread;
240 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
241 /* We have a datarace */
242 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
247 record->numReads = 0;
248 record->writeThread = thread;
249 record->isAtomic = 0;
250 modelclock_t ourClock = currClock->getClock(thread);
251 record->writeClock = ourClock;
255 /** This function does race detection on a write. */
256 void raceCheckWrite(thread_id_t thread, void *location)
258 uint64_t *shadow = lookupAddressEntry(location);
259 uint64_t shadowval = *shadow;
260 ClockVector *currClock = get_execution()->get_cv(thread);
261 if (currClock == NULL)
264 struct DataRace * race = NULL;
266 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
267 race = fullRaceCheckWrite(thread, location, shadow, currClock);
272 int threadid = id_to_int(thread);
273 modelclock_t ourClock = currClock->getClock(thread);
275 /* Thread ID is too large or clock is too large. */
276 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
277 expandRecord(shadow);
278 race = fullRaceCheckWrite(thread, location, shadow, currClock);
285 /* Check for datarace against last read. */
286 modelclock_t readClock = READVECTOR(shadowval);
287 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
289 if (clock_may_race(currClock, thread, readClock, readThread)) {
290 /* We have a datarace */
291 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
297 /* Check for datarace against last write. */
299 modelclock_t writeClock = WRITEVECTOR(shadowval);
300 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
302 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
303 /* We have a datarace */
304 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
310 *shadow = ENCODEOP(0, 0, threadid, ourClock);
315 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
316 if (raceset->add(race))
318 else model_free(race);
323 /** This function does race detection for a write on an expanded record. */
324 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
326 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
327 struct DataRace * race = NULL;
329 if (record->isAtomic)
332 /* Check for datarace against last read. */
334 for (int i = 0;i < record->numReads;i++) {
335 modelclock_t readClock = record->readClock[i];
336 thread_id_t readThread = record->thread[i];
338 /* Note that readClock can't actuall be zero here, so it could be
341 if (clock_may_race(currClock, thread, readClock, readThread)) {
342 /* We have a datarace */
343 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
348 /* Check for datarace against last write. */
351 modelclock_t writeClock = record->writeClock;
352 thread_id_t writeThread = record->writeThread;
354 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
355 /* We have a datarace */
356 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
361 record->numReads = 0;
362 record->writeThread = thread;
363 record->isAtomic = 1;
364 modelclock_t ourClock = currClock->getClock(thread);
365 record->writeClock = ourClock;
369 /** This function does race detection on a write. */
370 void atomraceCheckWrite(thread_id_t thread, void *location)
372 uint64_t *shadow = lookupAddressEntry(location);
373 uint64_t shadowval = *shadow;
374 ClockVector *currClock = get_execution()->get_cv(thread);
375 if (currClock == NULL)
378 struct DataRace * race = NULL;
380 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
381 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
386 int threadid = id_to_int(thread);
387 modelclock_t ourClock = currClock->getClock(thread);
389 /* Thread ID is too large or clock is too large. */
390 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
391 expandRecord(shadow);
392 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
396 /* Can't race with atomic */
397 if (shadowval & ATOMICMASK)
401 /* Check for datarace against last read. */
403 modelclock_t readClock = READVECTOR(shadowval);
404 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
406 if (clock_may_race(currClock, thread, readClock, readThread)) {
407 /* We have a datarace */
408 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
414 /* Check for datarace against last write. */
416 modelclock_t writeClock = WRITEVECTOR(shadowval);
417 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
419 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
420 /* We have a datarace */
421 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
427 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
432 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
433 if (raceset->add(race))
435 else model_free(race);
439 /** This function does race detection for a write on an expanded record. */
440 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
441 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
442 record->numReads = 0;
443 record->writeThread = thread;
444 modelclock_t ourClock = currClock->getClock(thread);
445 record->writeClock = ourClock;
446 record->isAtomic = 1;
449 /** This function does race detection for a write on an expanded record. */
450 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
451 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
452 record->numReads = 0;
453 record->writeThread = thread;
454 modelclock_t ourClock = currClock->getClock(thread);
455 record->writeClock = ourClock;
456 record->isAtomic = 0;
459 /** This function just updates metadata on atomic write. */
460 void recordWrite(thread_id_t thread, void *location) {
461 uint64_t *shadow = lookupAddressEntry(location);
462 uint64_t shadowval = *shadow;
463 ClockVector *currClock = get_execution()->get_cv(thread);
465 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
466 fullRecordWrite(thread, location, shadow, currClock);
470 int threadid = id_to_int(thread);
471 modelclock_t ourClock = currClock->getClock(thread);
473 /* Thread ID is too large or clock is too large. */
474 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
475 expandRecord(shadow);
476 fullRecordWrite(thread, location, shadow, currClock);
480 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
483 /** This function just updates metadata on atomic write. */
484 void recordCalloc(void *location, size_t size) {
485 thread_id_t thread = thread_current()->get_id();
486 for(;size != 0;size--) {
487 uint64_t *shadow = lookupAddressEntry(location);
488 uint64_t shadowval = *shadow;
489 ClockVector *currClock = get_execution()->get_cv(thread);
491 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
492 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
496 int threadid = id_to_int(thread);
497 modelclock_t ourClock = currClock->getClock(thread);
499 /* Thread ID is too large or clock is too large. */
500 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
501 expandRecord(shadow);
502 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
506 *shadow = ENCODEOP(0, 0, threadid, ourClock);
507 location = (void *)(((char *) location) + 1);
513 /** This function does race detection on a read for an expanded record. */
514 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
516 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
517 struct DataRace * race = NULL;
518 /* Check for datarace against last write. */
520 modelclock_t writeClock = record->writeClock;
521 thread_id_t writeThread = record->writeThread;
523 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
524 /* We have a datarace */
525 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
528 /* Shorten vector when possible */
532 for (int i = 0;i < record->numReads;i++) {
533 modelclock_t readClock = record->readClock[i];
534 thread_id_t readThread = record->thread[i];
536 /* Note that is not really a datarace check as reads cannot
537 actually race. It is just determining that this read subsumes
538 another in the sense that either this read races or neither
539 read races. Note that readClock can't actually be zero, so it
540 could be optimized. */
542 if (clock_may_race(currClock, thread, readClock, readThread)) {
543 /* Still need this read in vector */
544 if (copytoindex != i) {
545 ASSERT(record->thread[i] >= 0);
546 record->readClock[copytoindex] = record->readClock[i];
547 record->thread[copytoindex] = record->thread[i];
553 if (__builtin_popcount(copytoindex) <= 1) {
554 if (copytoindex == 0) {
555 int newCapacity = INITCAPACITY;
556 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
557 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
558 } else if (copytoindex>=INITCAPACITY) {
559 int newCapacity = copytoindex * 2;
560 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
561 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
562 std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
563 std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
564 snapshot_free(record->readClock);
565 snapshot_free(record->thread);
566 record->readClock = newreadClock;
567 record->thread = newthread;
571 modelclock_t ourClock = currClock->getClock(thread);
574 record->thread[copytoindex] = thread;
575 record->readClock[copytoindex] = ourClock;
576 record->numReads = copytoindex + 1;
580 /** This function does race detection on a read. */
581 void raceCheckRead(thread_id_t thread, const void *location)
583 uint64_t *shadow = lookupAddressEntry(location);
584 uint64_t shadowval = *shadow;
585 ClockVector *currClock = get_execution()->get_cv(thread);
586 if (currClock == NULL)
589 struct DataRace * race = NULL;
592 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
593 race = fullRaceCheckRead(thread, location, shadow, currClock);
598 int threadid = id_to_int(thread);
599 modelclock_t ourClock = currClock->getClock(thread);
601 /* Thread ID is too large or clock is too large. */
602 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
603 expandRecord(shadow);
604 race = fullRaceCheckRead(thread, location, shadow, currClock);
608 /* Check for datarace against last write. */
610 modelclock_t writeClock = WRITEVECTOR(shadowval);
611 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
613 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
614 /* We have a datarace */
615 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
621 modelclock_t readClock = READVECTOR(shadowval);
622 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
624 if (clock_may_race(currClock, thread, readClock, readThread)) {
625 /* We don't subsume this read... Have to expand record. */
626 expandRecord(shadow);
627 fullRaceCheckRead(thread, location, shadow, currClock);
632 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
636 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
637 if (raceset->add(race))
639 else model_free(race);
644 /** This function does race detection on a read for an expanded record. */
645 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
647 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
648 struct DataRace * race = NULL;
649 /* Check for datarace against last write. */
650 if (record->isAtomic)
653 modelclock_t writeClock = record->writeClock;
654 thread_id_t writeThread = record->writeThread;
656 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
657 /* We have a datarace */
658 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
663 /** This function does race detection on a read. */
664 void atomraceCheckRead(thread_id_t thread, const void *location)
666 uint64_t *shadow = lookupAddressEntry(location);
667 uint64_t shadowval = *shadow;
668 ClockVector *currClock = get_execution()->get_cv(thread);
669 if (currClock == NULL)
672 struct DataRace * race = NULL;
675 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
676 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
680 if (shadowval & ATOMICMASK)
684 /* Check for datarace against last write. */
686 modelclock_t writeClock = WRITEVECTOR(shadowval);
687 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
689 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
690 /* We have a datarace */
691 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
699 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
700 if (raceset->add(race))
702 else model_free(race);
706 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
708 uint64_t *shadow = lookupAddressEntry(location);
709 uint64_t shadowval = *shadow;
711 ClockVector *currClock = get_execution()->get_cv(thread);
712 if (currClock == NULL)
715 struct DataRace * race = NULL;
718 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
719 race = fullRaceCheckRead(thread, location, shadow, currClock);
724 int threadid = id_to_int(thread);
725 modelclock_t ourClock = currClock->getClock(thread);
727 /* Thread ID is too large or clock is too large. */
728 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
729 expandRecord(shadow);
730 race = fullRaceCheckRead(thread, location, shadow, currClock);
734 /* Check for datarace against last write. */
736 modelclock_t writeClock = WRITEVECTOR(shadowval);
737 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
739 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
740 /* We have a datarace */
741 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
744 modelclock_t readClock = READVECTOR(shadowval);
745 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
747 if (clock_may_race(currClock, thread, readClock, readThread)) {
748 /* We don't subsume this read... Have to expand record. */
749 expandRecord(shadow);
750 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
751 record->thread[1] = thread;
752 record->readClock[1] = ourClock;
758 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
760 *old_val = shadowval;
765 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
766 if (raceset->add(race))
768 else model_free(race);
774 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
775 uint64_t *shadow = lookupAddressEntry(location);
777 uint64_t shadowval = *shadow;
779 ClockVector *currClock = get_execution()->get_cv(thread);
780 if (currClock == NULL)
783 struct DataRace * race = NULL;
786 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
787 race = fullRaceCheckRead(thread, location, shadow, currClock);
792 int threadid = id_to_int(thread);
793 modelclock_t ourClock = currClock->getClock(thread);
795 /* Thread ID is too large or clock is too large. */
796 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
797 expandRecord(shadow);
798 race = fullRaceCheckRead(thread, location, shadow, currClock);
802 /* Check for datarace against last write. */
803 modelclock_t writeClock = WRITEVECTOR(shadowval);
804 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
806 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
807 /* We have a datarace */
808 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
811 modelclock_t readClock = READVECTOR(shadowval);
812 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
814 if (clock_may_race(currClock, thread, readClock, readThread)) {
815 /* We don't subsume this read... Have to expand record. */
816 expandRecord(shadow);
817 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
818 record->thread[1] = thread;
819 record->readClock[1] = ourClock;
825 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
829 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
830 if (raceset->add(race))
832 else model_free(race);
838 void raceCheckRead64(thread_id_t thread, const void *location)
840 uint64_t old_shadowval, new_shadowval;
841 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
844 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
845 if (CHECKBOUNDARY(location, 7)) {
846 if (shadow[1]==old_shadowval)
847 shadow[1] = new_shadowval;
849 if (shadow[2]==old_shadowval)
850 shadow[2] = new_shadowval;
852 if (shadow[3]==old_shadowval)
853 shadow[3] = new_shadowval;
855 if (shadow[4]==old_shadowval)
856 shadow[4] = new_shadowval;
858 if (shadow[5]==old_shadowval)
859 shadow[5] = new_shadowval;
861 if (shadow[6]==old_shadowval)
862 shadow[6] = new_shadowval;
864 if (shadow[7]==old_shadowval)
865 shadow[7] = new_shadowval;
871 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
873 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
875 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
877 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
879 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
881 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
883 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
886 void raceCheckRead32(thread_id_t thread, const void *location)
888 uint64_t old_shadowval, new_shadowval;
889 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
892 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
893 if (CHECKBOUNDARY(location, 3)) {
894 if (shadow[1]==old_shadowval)
895 shadow[1] = new_shadowval;
897 if (shadow[2]==old_shadowval)
898 shadow[2] = new_shadowval;
900 if (shadow[3]==old_shadowval)
901 shadow[3] = new_shadowval;
907 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
909 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
911 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
914 void raceCheckRead16(thread_id_t thread, const void *location)
916 uint64_t old_shadowval, new_shadowval;
917 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
920 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
921 if (CHECKBOUNDARY(location, 1)) {
922 if (shadow[1]==old_shadowval) {
923 shadow[1] = new_shadowval;
927 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
930 void raceCheckRead8(thread_id_t thread, const void *location)
932 uint64_t old_shadowval, new_shadowval;
933 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
936 raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
939 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
941 uint64_t *shadow = lookupAddressEntry(location);
942 uint64_t shadowval = *shadow;
943 ClockVector *currClock = get_execution()->get_cv(thread);
944 if (currClock == NULL)
947 struct DataRace * race = NULL;
949 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
950 race = fullRaceCheckWrite(thread, location, shadow, currClock);
955 int threadid = id_to_int(thread);
956 modelclock_t ourClock = currClock->getClock(thread);
958 /* Thread ID is too large or clock is too large. */
959 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
960 expandRecord(shadow);
961 race = fullRaceCheckWrite(thread, location, shadow, currClock);
966 /* Check for datarace against last read. */
967 modelclock_t readClock = READVECTOR(shadowval);
968 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
970 if (clock_may_race(currClock, thread, readClock, readThread)) {
971 /* We have a datarace */
972 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
978 /* Check for datarace against last write. */
979 modelclock_t writeClock = WRITEVECTOR(shadowval);
980 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
982 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
983 /* We have a datarace */
984 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
990 *shadow = ENCODEOP(0, 0, threadid, ourClock);
992 *old_val = shadowval;
998 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
999 if (raceset->add(race))
1001 else model_free(race);
1007 static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
1008 uint64_t *shadow = lookupAddressEntry(location);
1010 uint64_t shadowval = *shadow;
1012 ClockVector *currClock = get_execution()->get_cv(thread);
1013 if (currClock == NULL)
1016 struct DataRace * race = NULL;
1017 /* Do full record */
1018 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1019 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1024 int threadid = id_to_int(thread);
1025 modelclock_t ourClock = currClock->getClock(thread);
1027 /* Thread ID is too large or clock is too large. */
1028 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1029 expandRecord(shadow);
1030 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1035 /* Check for datarace against last read. */
1036 modelclock_t readClock = READVECTOR(shadowval);
1037 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1039 if (clock_may_race(currClock, thread, readClock, readThread)) {
1040 /* We have a datarace */
1041 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1047 /* Check for datarace against last write. */
1048 modelclock_t writeClock = WRITEVECTOR(shadowval);
1049 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1051 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1052 /* We have a datarace */
1053 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1059 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1064 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1065 if (raceset->add(race))
1067 else model_free(race);
1071 void raceCheckWrite64(thread_id_t thread, const void *location)
1073 uint64_t old_shadowval, new_shadowval;
1074 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1077 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1078 if (CHECKBOUNDARY(location, 7)) {
1079 if (shadow[1]==old_shadowval)
1080 shadow[1] = new_shadowval;
1082 if (shadow[2]==old_shadowval)
1083 shadow[2] = new_shadowval;
1085 if (shadow[3]==old_shadowval)
1086 shadow[3] = new_shadowval;
1088 if (shadow[4]==old_shadowval)
1089 shadow[4] = new_shadowval;
1091 if (shadow[5]==old_shadowval)
1092 shadow[5] = new_shadowval;
1094 if (shadow[6]==old_shadowval)
1095 shadow[6] = new_shadowval;
1097 if (shadow[7]==old_shadowval)
1098 shadow[7] = new_shadowval;
1104 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1106 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1108 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1110 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
1112 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
1114 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
1116 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
1119 void raceCheckWrite32(thread_id_t thread, const void *location)
1121 uint64_t old_shadowval, new_shadowval;
1122 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1125 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1126 if (CHECKBOUNDARY(location, 3)) {
1127 if (shadow[1]==old_shadowval)
1128 shadow[1] = new_shadowval;
1130 if (shadow[2]==old_shadowval)
1131 shadow[2] = new_shadowval;
1133 if (shadow[3]==old_shadowval)
1134 shadow[3] = new_shadowval;
1140 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1142 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1144 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1147 void raceCheckWrite16(thread_id_t thread, const void *location)
1149 uint64_t old_shadowval, new_shadowval;
1150 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1153 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1154 if (CHECKBOUNDARY(location, 1)) {
1155 if (shadow[1]==old_shadowval) {
1156 shadow[1] = new_shadowval;
1160 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1163 void raceCheckWrite8(thread_id_t thread, const void *location)
1165 uint64_t old_shadowval, new_shadowval;
1166 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1169 raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);