3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
19 static unsigned int store8_count = 0;
20 static unsigned int store16_count = 0;
21 static unsigned int store32_count = 0;
22 static unsigned int store64_count = 0;
24 static unsigned int load8_count = 0;
25 static unsigned int load16_count = 0;
26 static unsigned int load32_count = 0;
27 static unsigned int load64_count = 0;
29 static const ModelExecution * get_execution()
31 return model->get_execution();
34 /** This function initialized the data race detector. */
35 void initRaceDetector()
37 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
38 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
39 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
40 raceset = new RaceSet();
43 void * table_calloc(size_t size)
45 if ((((char *)memory_base) + size) > memory_top) {
46 return snapshot_calloc(size, 1);
48 void *tmp = memory_base;
49 memory_base = ((char *)memory_base) + size;
54 /** This function looks up the entry in the shadow table corresponding to a
56 static uint64_t * lookupAddressEntry(const void *address)
58 struct ShadowTable *currtable = root;
60 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
61 if (currtable == NULL) {
62 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
66 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
67 if (basetable == NULL) {
68 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
70 return &basetable->array[((uintptr_t)address) & MASK16BIT];
74 bool hasNonAtomicStore(const void *address) {
75 uint64_t * shadow = lookupAddressEntry(address);
76 uint64_t shadowval = *shadow;
77 if (ISSHORTRECORD(shadowval)) {
78 //Do we have a non atomic write with a non-zero clock
79 return !(ATOMICMASK & shadowval);
83 struct RaceRecord *record = (struct RaceRecord *)shadowval;
84 return !record->isAtomic;
88 void setAtomicStoreFlag(const void *address) {
89 uint64_t * shadow = lookupAddressEntry(address);
90 uint64_t shadowval = *shadow;
91 if (ISSHORTRECORD(shadowval)) {
92 *shadow = shadowval | ATOMICMASK;
95 *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
98 struct RaceRecord *record = (struct RaceRecord *)shadowval;
103 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
104 uint64_t * shadow = lookupAddressEntry(address);
105 uint64_t shadowval = *shadow;
106 if (ISSHORTRECORD(shadowval) || shadowval == 0) {
107 //Do we have a non atomic write with a non-zero clock
108 *thread = WRTHREADID(shadowval);
109 *clock = WRITEVECTOR(shadowval);
111 struct RaceRecord *record = (struct RaceRecord *)shadowval;
112 *thread = record->writeThread;
113 *clock = record->writeClock;
118 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
119 * to check the potential for a data race.
120 * @param clock1 The current clock vector
121 * @param tid1 The current thread; paired with clock1
122 * @param clock2 The clock value for the potentially-racing action
123 * @param tid2 The thread ID for the potentially-racing action
124 * @return true if the current clock allows a race with the event at clock2/tid2
126 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
127 modelclock_t clock2, thread_id_t tid2)
129 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
133 * Expands a record from the compact form to the full form. This is
134 * necessary for multiple readers or for very large thread ids or time
136 static void expandRecord(uint64_t *shadow)
138 uint64_t shadowval = *shadow;
140 modelclock_t readClock = READVECTOR(shadowval);
141 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
142 modelclock_t writeClock = WRITEVECTOR(shadowval);
143 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
145 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
146 record->writeThread = writeThread;
147 record->writeClock = writeClock;
149 if (readClock != 0) {
150 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
151 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
152 record->numReads = 1;
153 ASSERT(readThread >= 0);
154 record->thread[0] = readThread;
155 record->readClock[0] = readClock;
157 record->thread = NULL;
159 if (shadowval & ATOMICMASK)
160 record->isAtomic = 1;
161 *shadow = (uint64_t) record;
164 #define FIRST_STACK_FRAME 2
166 unsigned int race_hash(struct DataRace *race) {
167 unsigned int hash = 0;
168 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
169 hash ^= ((uintptr_t)race->backtrace[i]);
170 hash = (hash >> 3) | (hash << 29);
175 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
176 if (r1->numframes != r2->numframes)
178 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
179 if (r1->backtrace[i] != r2->backtrace[i])
185 /** This function is called when we detect a data race.*/
186 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
188 #ifdef REPORT_DATA_RACES
189 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
190 race->oldthread = oldthread;
191 race->oldclock = oldclock;
192 race->isoldwrite = isoldwrite;
193 race->newaction = newaction;
194 race->isnewwrite = isnewwrite;
195 race->address = address;
203 * @brief Assert a data race
205 * Asserts a data race which is currently realized, causing the execution to
206 * end and stashing a message in the model-checker's bug list
208 * @param race The race to report
210 void assert_race(struct DataRace *race)
212 model_print("Race detected at location: \n");
213 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
214 model_print("\nData race detected @ address %p:\n"
215 " Access 1: %5s in thread %2d @ clock %3u\n"
216 " Access 2: %5s in thread %2d @ clock %3u\n\n",
218 race->isoldwrite ? "write" : "read",
219 id_to_int(race->oldthread),
221 race->isnewwrite ? "write" : "read",
222 id_to_int(race->newaction->get_tid()),
223 race->newaction->get_seq_number()
227 /** This function does race detection for a write on an expanded record. */
228 struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
230 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
231 struct DataRace * race = NULL;
233 /* Check for datarace against last read. */
235 for (int i = 0;i < record->numReads;i++) {
236 modelclock_t readClock = record->readClock[i];
237 thread_id_t readThread = record->thread[i];
239 /* Note that readClock can't actuall be zero here, so it could be
242 if (clock_may_race(currClock, thread, readClock, readThread)) {
243 /* We have a datarace */
244 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
249 /* Check for datarace against last write. */
251 modelclock_t writeClock = record->writeClock;
252 thread_id_t writeThread = record->writeThread;
254 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
255 /* We have a datarace */
256 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
261 record->numReads = 0;
262 record->writeThread = thread;
263 record->isAtomic = 0;
264 modelclock_t ourClock = currClock->getClock(thread);
265 record->writeClock = ourClock;
269 /** This function does race detection on a write. */
270 void raceCheckWrite(thread_id_t thread, void *location)
272 uint64_t *shadow = lookupAddressEntry(location);
273 uint64_t shadowval = *shadow;
274 ClockVector *currClock = get_execution()->get_cv(thread);
275 if (currClock == NULL)
278 struct DataRace * race = NULL;
280 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
281 race = fullRaceCheckWrite(thread, location, shadow, currClock);
286 int threadid = id_to_int(thread);
287 modelclock_t ourClock = currClock->getClock(thread);
289 /* Thread ID is too large or clock is too large. */
290 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
291 expandRecord(shadow);
292 race = fullRaceCheckWrite(thread, location, shadow, currClock);
297 /* Check for datarace against last read. */
298 modelclock_t readClock = READVECTOR(shadowval);
299 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
301 if (clock_may_race(currClock, thread, readClock, readThread)) {
302 /* We have a datarace */
303 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
309 /* Check for datarace against last write. */
310 modelclock_t writeClock = WRITEVECTOR(shadowval);
311 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
313 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
314 /* We have a datarace */
315 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
321 *shadow = ENCODEOP(0, 0, threadid, ourClock);
326 #ifdef REPORT_DATA_RACES
327 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
328 if (raceset->add(race))
330 else model_free(race);
337 /** This function does race detection for a write on an expanded record. */
338 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
340 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
341 struct DataRace * race = NULL;
343 if (record->isAtomic)
346 /* Check for datarace against last read. */
348 for (int i = 0;i < record->numReads;i++) {
349 modelclock_t readClock = record->readClock[i];
350 thread_id_t readThread = record->thread[i];
352 /* Note that readClock can't actuall be zero here, so it could be
355 if (clock_may_race(currClock, thread, readClock, readThread)) {
356 /* We have a datarace */
357 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
362 /* Check for datarace against last write. */
365 modelclock_t writeClock = record->writeClock;
366 thread_id_t writeThread = record->writeThread;
368 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
369 /* We have a datarace */
370 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
375 record->numReads = 0;
376 record->writeThread = thread;
377 record->isAtomic = 1;
378 modelclock_t ourClock = currClock->getClock(thread);
379 record->writeClock = ourClock;
383 /** This function does race detection on a write. */
384 void atomraceCheckWrite(thread_id_t thread, void *location)
386 uint64_t *shadow = lookupAddressEntry(location);
387 uint64_t shadowval = *shadow;
388 ClockVector *currClock = get_execution()->get_cv(thread);
389 if (currClock == NULL)
392 struct DataRace * race = NULL;
394 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
395 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
400 int threadid = id_to_int(thread);
401 modelclock_t ourClock = currClock->getClock(thread);
403 /* Thread ID is too large or clock is too large. */
404 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
405 expandRecord(shadow);
406 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
410 /* Can't race with atomic */
411 if (shadowval & ATOMICMASK)
415 /* Check for datarace against last read. */
416 modelclock_t readClock = READVECTOR(shadowval);
417 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
419 if (clock_may_race(currClock, thread, readClock, readThread)) {
420 /* We have a datarace */
421 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
427 /* Check for datarace against last write. */
428 modelclock_t writeClock = WRITEVECTOR(shadowval);
429 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
431 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
432 /* We have a datarace */
433 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
439 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
444 #ifdef REPORT_DATA_RACES
445 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
446 if (raceset->add(race))
448 else model_free(race);
455 /** This function does race detection for a write on an expanded record. */
456 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
457 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
458 record->numReads = 0;
459 record->writeThread = thread;
460 modelclock_t ourClock = currClock->getClock(thread);
461 record->writeClock = ourClock;
462 record->isAtomic = 1;
465 /** This function does race detection for a write on an expanded record. */
466 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
467 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
468 record->numReads = 0;
469 record->writeThread = thread;
470 modelclock_t ourClock = currClock->getClock(thread);
471 record->writeClock = ourClock;
472 record->isAtomic = 0;
475 /** This function just updates metadata on atomic write. */
476 void recordWrite(thread_id_t thread, void *location) {
477 uint64_t *shadow = lookupAddressEntry(location);
478 uint64_t shadowval = *shadow;
479 ClockVector *currClock = get_execution()->get_cv(thread);
481 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
482 fullRecordWrite(thread, location, shadow, currClock);
486 int threadid = id_to_int(thread);
487 modelclock_t ourClock = currClock->getClock(thread);
489 /* Thread ID is too large or clock is too large. */
490 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
491 expandRecord(shadow);
492 fullRecordWrite(thread, location, shadow, currClock);
496 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
499 /** This function just updates metadata on atomic write. */
500 void recordCalloc(void *location, size_t size) {
501 thread_id_t thread = thread_current()->get_id();
502 for(;size != 0;size--) {
503 uint64_t *shadow = lookupAddressEntry(location);
504 uint64_t shadowval = *shadow;
505 ClockVector *currClock = get_execution()->get_cv(thread);
507 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
508 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
512 int threadid = id_to_int(thread);
513 modelclock_t ourClock = currClock->getClock(thread);
515 /* Thread ID is too large or clock is too large. */
516 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
517 expandRecord(shadow);
518 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
522 *shadow = ENCODEOP(0, 0, threadid, ourClock);
523 location = (void *)(((char *) location) + 1);
527 /** This function does race detection on a read for an expanded record. */
528 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
530 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
531 struct DataRace * race = NULL;
532 /* Check for datarace against last write. */
534 modelclock_t writeClock = record->writeClock;
535 thread_id_t writeThread = record->writeThread;
537 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
538 /* We have a datarace */
539 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
542 /* Shorten vector when possible */
546 for (int i = 0;i < record->numReads;i++) {
547 modelclock_t readClock = record->readClock[i];
548 thread_id_t readThread = record->thread[i];
550 /* Note that is not really a datarace check as reads cannot
551 actually race. It is just determining that this read subsumes
552 another in the sense that either this read races or neither
553 read races. Note that readClock can't actually be zero, so it
554 could be optimized. */
556 if (clock_may_race(currClock, thread, readClock, readThread)) {
557 /* Still need this read in vector */
558 if (copytoindex != i) {
559 ASSERT(record->thread[i] >= 0);
560 record->readClock[copytoindex] = record->readClock[i];
561 record->thread[copytoindex] = record->thread[i];
567 if (__builtin_popcount(copytoindex) <= 1) {
568 if (copytoindex == 0 && record->thread == NULL) {
569 int newCapacity = INITCAPACITY;
570 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
571 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
572 } else if (copytoindex>=INITCAPACITY) {
573 int newCapacity = copytoindex * 2;
574 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
575 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
576 std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
577 std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
578 snapshot_free(record->readClock);
579 snapshot_free(record->thread);
580 record->readClock = newreadClock;
581 record->thread = newthread;
585 modelclock_t ourClock = currClock->getClock(thread);
588 record->thread[copytoindex] = thread;
589 record->readClock[copytoindex] = ourClock;
590 record->numReads = copytoindex + 1;
594 /** This function does race detection on a read. */
595 void raceCheckRead(thread_id_t thread, const void *location)
597 uint64_t *shadow = lookupAddressEntry(location);
598 uint64_t shadowval = *shadow;
599 ClockVector *currClock = get_execution()->get_cv(thread);
600 if (currClock == NULL)
603 struct DataRace * race = NULL;
606 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
607 race = fullRaceCheckRead(thread, location, shadow, currClock);
612 int threadid = id_to_int(thread);
613 modelclock_t ourClock = currClock->getClock(thread);
615 /* Thread ID is too large or clock is too large. */
616 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
617 expandRecord(shadow);
618 race = fullRaceCheckRead(thread, location, shadow, currClock);
622 /* Check for datarace against last write. */
624 modelclock_t writeClock = WRITEVECTOR(shadowval);
625 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
627 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
628 /* We have a datarace */
629 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
635 modelclock_t readClock = READVECTOR(shadowval);
636 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
638 if (clock_may_race(currClock, thread, readClock, readThread)) {
639 /* We don't subsume this read... Have to expand record. */
640 expandRecord(shadow);
641 fullRaceCheckRead(thread, location, shadow, currClock);
646 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
650 #ifdef REPORT_DATA_RACES
651 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
652 if (raceset->add(race))
654 else model_free(race);
662 /** This function does race detection on a read for an expanded record. */
663 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
665 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
666 struct DataRace * race = NULL;
667 /* Check for datarace against last write. */
668 if (record->isAtomic)
671 modelclock_t writeClock = record->writeClock;
672 thread_id_t writeThread = record->writeThread;
674 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
675 /* We have a datarace */
676 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
681 /** This function does race detection on a read. */
682 void atomraceCheckRead(thread_id_t thread, const void *location)
684 uint64_t *shadow = lookupAddressEntry(location);
685 uint64_t shadowval = *shadow;
686 ClockVector *currClock = get_execution()->get_cv(thread);
687 if (currClock == NULL)
690 struct DataRace * race = NULL;
693 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
694 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
698 if (shadowval & ATOMICMASK)
702 /* Check for datarace against last write. */
703 modelclock_t writeClock = WRITEVECTOR(shadowval);
704 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
706 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
707 /* We have a datarace */
708 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
716 #ifdef REPORT_DATA_RACES
717 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
718 if (raceset->add(race))
720 else model_free(race);
727 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
729 uint64_t *shadow = lookupAddressEntry(location);
730 uint64_t shadowval = *shadow;
732 ClockVector *currClock = get_execution()->get_cv(thread);
733 if (currClock == NULL)
736 struct DataRace * race = NULL;
739 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
740 race = fullRaceCheckRead(thread, location, shadow, currClock);
745 int threadid = id_to_int(thread);
746 modelclock_t ourClock = currClock->getClock(thread);
748 /* Thread ID is too large or clock is too large. */
749 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
750 expandRecord(shadow);
751 race = fullRaceCheckRead(thread, location, shadow, currClock);
755 /* Check for datarace against last write. */
756 modelclock_t writeClock = WRITEVECTOR(shadowval);
757 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
759 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
760 /* We have a datarace */
761 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
764 modelclock_t readClock = READVECTOR(shadowval);
765 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
767 if (clock_may_race(currClock, thread, readClock, readThread)) {
768 /* We don't subsume this read... Have to expand record. */
769 expandRecord(shadow);
770 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
771 record->thread[1] = thread;
772 record->readClock[1] = ourClock;
778 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
780 *old_val = shadowval;
785 #ifdef REPORT_DATA_RACES
786 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
787 if (raceset->add(race))
789 else model_free(race);
798 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
799 uint64_t *shadow = lookupAddressEntry(location);
801 uint64_t shadowval = *shadow;
803 ClockVector *currClock = get_execution()->get_cv(thread);
804 if (currClock == NULL)
807 struct DataRace * race = NULL;
810 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
811 race = fullRaceCheckRead(thread, location, shadow, currClock);
816 int threadid = id_to_int(thread);
817 modelclock_t ourClock = currClock->getClock(thread);
819 /* Thread ID is too large or clock is too large. */
820 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
821 expandRecord(shadow);
822 race = fullRaceCheckRead(thread, location, shadow, currClock);
826 /* Check for datarace against last write. */
827 modelclock_t writeClock = WRITEVECTOR(shadowval);
828 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
830 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
831 /* We have a datarace */
832 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
835 modelclock_t readClock = READVECTOR(shadowval);
836 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
838 if (clock_may_race(currClock, thread, readClock, readThread)) {
839 /* We don't subsume this read... Have to expand record. */
840 expandRecord(shadow);
841 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
842 record->thread[1] = thread;
843 record->readClock[1] = ourClock;
849 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
853 #ifdef REPORT_DATA_RACES
854 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
855 if (raceset->add(race))
857 else model_free(race);
864 void raceCheckRead64(thread_id_t thread, const void *location)
866 uint64_t old_shadowval, new_shadowval;
867 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
871 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
872 if (CHECKBOUNDARY(location, 7)) {
873 if (shadow[1]==old_shadowval)
874 shadow[1] = new_shadowval;
876 if (shadow[2]==old_shadowval)
877 shadow[2] = new_shadowval;
879 if (shadow[3]==old_shadowval)
880 shadow[3] = new_shadowval;
882 if (shadow[4]==old_shadowval)
883 shadow[4] = new_shadowval;
885 if (shadow[5]==old_shadowval)
886 shadow[5] = new_shadowval;
888 if (shadow[6]==old_shadowval)
889 shadow[6] = new_shadowval;
891 if (shadow[7]==old_shadowval)
892 shadow[7] = new_shadowval;
898 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
900 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
902 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
904 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
906 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
908 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
910 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
913 void raceCheckRead32(thread_id_t thread, const void *location)
915 uint64_t old_shadowval, new_shadowval;
916 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
920 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
921 if (CHECKBOUNDARY(location, 3)) {
922 if (shadow[1]==old_shadowval)
923 shadow[1] = new_shadowval;
925 if (shadow[2]==old_shadowval)
926 shadow[2] = new_shadowval;
928 if (shadow[3]==old_shadowval)
929 shadow[3] = new_shadowval;
935 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
937 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
939 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
942 void raceCheckRead16(thread_id_t thread, const void *location)
944 uint64_t old_shadowval, new_shadowval;
945 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
949 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
950 if (CHECKBOUNDARY(location, 1)) {
951 if (shadow[1]==old_shadowval) {
952 shadow[1] = new_shadowval;
956 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
959 void raceCheckRead8(thread_id_t thread, const void *location)
961 uint64_t old_shadowval, new_shadowval;
962 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
966 raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
969 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
971 uint64_t *shadow = lookupAddressEntry(location);
972 uint64_t shadowval = *shadow;
973 ClockVector *currClock = get_execution()->get_cv(thread);
974 if (currClock == NULL)
977 struct DataRace * race = NULL;
979 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
980 race = fullRaceCheckWrite(thread, location, shadow, currClock);
985 int threadid = id_to_int(thread);
986 modelclock_t ourClock = currClock->getClock(thread);
988 /* Thread ID is too large or clock is too large. */
989 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
990 expandRecord(shadow);
991 race = fullRaceCheckWrite(thread, location, shadow, currClock);
996 /* Check for datarace against last read. */
997 modelclock_t readClock = READVECTOR(shadowval);
998 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1000 if (clock_may_race(currClock, thread, readClock, readThread)) {
1001 /* We have a datarace */
1002 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1008 /* Check for datarace against last write. */
1009 modelclock_t writeClock = WRITEVECTOR(shadowval);
1010 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1012 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1013 /* We have a datarace */
1014 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1020 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1022 *old_val = shadowval;
1028 #ifdef REPORT_DATA_RACES
1029 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1030 if (raceset->add(race))
1032 else model_free(race);
1041 static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
1042 uint64_t *shadow = lookupAddressEntry(location);
1044 uint64_t shadowval = *shadow;
1046 ClockVector *currClock = get_execution()->get_cv(thread);
1047 if (currClock == NULL)
1050 struct DataRace * race = NULL;
1051 /* Do full record */
1052 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1053 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1058 int threadid = id_to_int(thread);
1059 modelclock_t ourClock = currClock->getClock(thread);
1061 /* Thread ID is too large or clock is too large. */
1062 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1063 expandRecord(shadow);
1064 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1069 /* Check for datarace against last read. */
1070 modelclock_t readClock = READVECTOR(shadowval);
1071 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1073 if (clock_may_race(currClock, thread, readClock, readThread)) {
1074 /* We have a datarace */
1075 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1081 /* Check for datarace against last write. */
1082 modelclock_t writeClock = WRITEVECTOR(shadowval);
1083 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1085 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1086 /* We have a datarace */
1087 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1093 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1098 #ifdef REPORT_DATA_RACES
1099 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1100 if (raceset->add(race))
1102 else model_free(race);
1109 void raceCheckWrite64(thread_id_t thread, const void *location)
1111 uint64_t old_shadowval, new_shadowval;
1112 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1116 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1117 if (CHECKBOUNDARY(location, 7)) {
1118 if (shadow[1]==old_shadowval)
1119 shadow[1] = new_shadowval;
1121 if (shadow[2]==old_shadowval)
1122 shadow[2] = new_shadowval;
1124 if (shadow[3]==old_shadowval)
1125 shadow[3] = new_shadowval;
1127 if (shadow[4]==old_shadowval)
1128 shadow[4] = new_shadowval;
1130 if (shadow[5]==old_shadowval)
1131 shadow[5] = new_shadowval;
1133 if (shadow[6]==old_shadowval)
1134 shadow[6] = new_shadowval;
1136 if (shadow[7]==old_shadowval)
1137 shadow[7] = new_shadowval;
1143 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1145 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1147 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1149 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
1151 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
1153 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
1155 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
1158 void raceCheckWrite32(thread_id_t thread, const void *location)
1160 uint64_t old_shadowval, new_shadowval;
1161 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1165 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1166 if (CHECKBOUNDARY(location, 3)) {
1167 if (shadow[1]==old_shadowval)
1168 shadow[1] = new_shadowval;
1170 if (shadow[2]==old_shadowval)
1171 shadow[2] = new_shadowval;
1173 if (shadow[3]==old_shadowval)
1174 shadow[3] = new_shadowval;
1180 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1182 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1184 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1187 void raceCheckWrite16(thread_id_t thread, const void *location)
1189 uint64_t old_shadowval, new_shadowval;
1190 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1195 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1196 if (CHECKBOUNDARY(location, 1)) {
1197 if (shadow[1]==old_shadowval) {
1198 shadow[1] = new_shadowval;
1202 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1205 void raceCheckWrite8(thread_id_t thread, const void *location)
1207 uint64_t old_shadowval, new_shadowval;
1208 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1212 raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1215 void print_normal_accesses()
1217 model_print("store 8 count: %u\n", store8_count);
1218 model_print("store 16 count: %u\n", store16_count);
1219 model_print("store 32 count: %u\n", store32_count);
1220 model_print("store 64 count: %u\n", store64_count);
1222 model_print("load 8 count: %u\n", load8_count);
1223 model_print("load 16 count: %u\n", load16_count);
1224 model_print("load 32 count: %u\n", load32_count);
1225 model_print("load 64 count: %u\n", load64_count);