3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
20 static unsigned int store8_count = 0;
21 static unsigned int store16_count = 0;
22 static unsigned int store32_count = 0;
23 static unsigned int store64_count = 0;
25 static unsigned int load8_count = 0;
26 static unsigned int load16_count = 0;
27 static unsigned int load32_count = 0;
28 static unsigned int load64_count = 0;
31 static const ModelExecution * get_execution()
33 return model->get_execution();
36 /** This function initialized the data race detector. */
37 void initRaceDetector()
39 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
40 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
41 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
42 raceset = new RaceSet();
45 void * table_calloc(size_t size)
47 if ((((char *)memory_base) + size) > memory_top) {
48 return snapshot_calloc(size, 1);
50 void *tmp = memory_base;
51 memory_base = ((char *)memory_base) + size;
56 /** This function looks up the entry in the shadow table corresponding to a
58 static uint64_t * lookupAddressEntry(const void *address)
60 struct ShadowTable *currtable = root;
62 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
63 if (currtable == NULL) {
64 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
68 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
69 if (basetable == NULL) {
70 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
72 return &basetable->array[((uintptr_t)address) & MASK16BIT];
76 bool hasNonAtomicStore(const void *address) {
77 uint64_t * shadow = lookupAddressEntry(address);
78 uint64_t shadowval = *shadow;
79 if (ISSHORTRECORD(shadowval)) {
80 //Do we have a non atomic write with a non-zero clock
81 return !(ATOMICMASK & shadowval);
85 struct RaceRecord *record = (struct RaceRecord *)shadowval;
86 return !record->isAtomic;
90 void setAtomicStoreFlag(const void *address) {
91 uint64_t * shadow = lookupAddressEntry(address);
92 uint64_t shadowval = *shadow;
93 if (ISSHORTRECORD(shadowval)) {
94 *shadow = shadowval | ATOMICMASK;
97 *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
100 struct RaceRecord *record = (struct RaceRecord *)shadowval;
101 record->isAtomic = 1;
105 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
106 uint64_t * shadow = lookupAddressEntry(address);
107 uint64_t shadowval = *shadow;
108 if (ISSHORTRECORD(shadowval) || shadowval == 0) {
109 //Do we have a non atomic write with a non-zero clock
110 *thread = WRTHREADID(shadowval);
111 *clock = WRITEVECTOR(shadowval);
113 struct RaceRecord *record = (struct RaceRecord *)shadowval;
114 *thread = record->writeThread;
115 *clock = record->writeClock;
120 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
121 * to check the potential for a data race.
122 * @param clock1 The current clock vector
123 * @param tid1 The current thread; paired with clock1
124 * @param clock2 The clock value for the potentially-racing action
125 * @param tid2 The thread ID for the potentially-racing action
126 * @return true if the current clock allows a race with the event at clock2/tid2
128 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
129 modelclock_t clock2, thread_id_t tid2)
131 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
135 * Expands a record from the compact form to the full form. This is
136 * necessary for multiple readers or for very large thread ids or time
138 static void expandRecord(uint64_t *shadow)
140 uint64_t shadowval = *shadow;
142 modelclock_t readClock = READVECTOR(shadowval);
143 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
144 modelclock_t writeClock = WRITEVECTOR(shadowval);
145 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
147 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
148 record->writeThread = writeThread;
149 record->writeClock = writeClock;
151 if (readClock != 0) {
152 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
153 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
154 record->numReads = 1;
155 ASSERT(readThread >= 0);
156 record->thread[0] = readThread;
157 record->readClock[0] = readClock;
159 record->thread = NULL;
161 if (shadowval & ATOMICMASK)
162 record->isAtomic = 1;
163 *shadow = (uint64_t) record;
166 #define FIRST_STACK_FRAME 2
168 unsigned int race_hash(struct DataRace *race) {
169 unsigned int hash = 0;
170 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
171 hash ^= ((uintptr_t)race->backtrace[i]);
172 hash = (hash >> 3) | (hash << 29);
177 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
178 if (r1->numframes != r2->numframes)
180 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
181 if (r1->backtrace[i] != r2->backtrace[i])
187 /** This function is called when we detect a data race.*/
188 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
190 #ifdef REPORT_DATA_RACES
191 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
192 race->oldthread = oldthread;
193 race->oldclock = oldclock;
194 race->isoldwrite = isoldwrite;
195 race->newaction = newaction;
196 race->isnewwrite = isnewwrite;
197 race->address = address;
205 * @brief Assert a data race
207 * Asserts a data race which is currently realized, causing the execution to
208 * end and stashing a message in the model-checker's bug list
210 * @param race The race to report
212 void assert_race(struct DataRace *race)
214 model_print("Race detected at location: \n");
215 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
216 model_print("\nData race detected @ address %p:\n"
217 " Access 1: %5s in thread %2d @ clock %3u\n"
218 " Access 2: %5s in thread %2d @ clock %3u\n\n",
220 race->isoldwrite ? "write" : "read",
221 id_to_int(race->oldthread),
223 race->isnewwrite ? "write" : "read",
224 id_to_int(race->newaction->get_tid()),
225 race->newaction->get_seq_number()
229 /** This function does race detection for a write on an expanded record. */
230 struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
232 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
233 struct DataRace * race = NULL;
235 /* Check for datarace against last read. */
237 for (int i = 0;i < record->numReads;i++) {
238 modelclock_t readClock = record->readClock[i];
239 thread_id_t readThread = record->thread[i];
241 /* Note that readClock can't actuall be zero here, so it could be
244 if (clock_may_race(currClock, thread, readClock, readThread)) {
245 /* We have a datarace */
246 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
251 /* Check for datarace against last write. */
253 modelclock_t writeClock = record->writeClock;
254 thread_id_t writeThread = record->writeThread;
256 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
257 /* We have a datarace */
258 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
263 record->numReads = 0;
264 record->writeThread = thread;
265 record->isAtomic = 0;
266 modelclock_t ourClock = currClock->getClock(thread);
267 record->writeClock = ourClock;
271 /** This function does race detection on a write. */
272 void raceCheckWrite(thread_id_t thread, void *location)
274 uint64_t *shadow = lookupAddressEntry(location);
275 uint64_t shadowval = *shadow;
276 ClockVector *currClock = get_execution()->get_cv(thread);
277 if (currClock == NULL)
280 struct DataRace * race = NULL;
282 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
283 race = fullRaceCheckWrite(thread, location, shadow, currClock);
288 int threadid = id_to_int(thread);
289 modelclock_t ourClock = currClock->getClock(thread);
291 /* Thread ID is too large or clock is too large. */
292 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
293 expandRecord(shadow);
294 race = fullRaceCheckWrite(thread, location, shadow, currClock);
299 /* Check for datarace against last read. */
300 modelclock_t readClock = READVECTOR(shadowval);
301 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
303 if (clock_may_race(currClock, thread, readClock, readThread)) {
304 /* We have a datarace */
305 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
311 /* Check for datarace against last write. */
312 modelclock_t writeClock = WRITEVECTOR(shadowval);
313 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
315 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
316 /* We have a datarace */
317 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
323 *shadow = ENCODEOP(0, 0, threadid, ourClock);
328 #ifdef REPORT_DATA_RACES
329 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
330 if (raceset->add(race))
332 else model_free(race);
339 /** This function does race detection for a write on an expanded record. */
340 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
342 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
343 struct DataRace * race = NULL;
345 if (record->isAtomic)
348 /* Check for datarace against last read. */
350 for (int i = 0;i < record->numReads;i++) {
351 modelclock_t readClock = record->readClock[i];
352 thread_id_t readThread = record->thread[i];
354 /* Note that readClock can't actuall be zero here, so it could be
357 if (clock_may_race(currClock, thread, readClock, readThread)) {
358 /* We have a datarace */
359 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
364 /* Check for datarace against last write. */
367 modelclock_t writeClock = record->writeClock;
368 thread_id_t writeThread = record->writeThread;
370 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
371 /* We have a datarace */
372 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
377 record->numReads = 0;
378 record->writeThread = thread;
379 record->isAtomic = 1;
380 modelclock_t ourClock = currClock->getClock(thread);
381 record->writeClock = ourClock;
385 /** This function does race detection on a write. */
386 void atomraceCheckWrite(thread_id_t thread, void *location)
388 uint64_t *shadow = lookupAddressEntry(location);
389 uint64_t shadowval = *shadow;
390 ClockVector *currClock = get_execution()->get_cv(thread);
391 if (currClock == NULL)
394 struct DataRace * race = NULL;
396 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
397 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
402 int threadid = id_to_int(thread);
403 modelclock_t ourClock = currClock->getClock(thread);
405 /* Thread ID is too large or clock is too large. */
406 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
407 expandRecord(shadow);
408 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
412 /* Can't race with atomic */
413 if (shadowval & ATOMICMASK)
417 /* Check for datarace against last read. */
418 modelclock_t readClock = READVECTOR(shadowval);
419 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
421 if (clock_may_race(currClock, thread, readClock, readThread)) {
422 /* We have a datarace */
423 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
429 /* Check for datarace against last write. */
430 modelclock_t writeClock = WRITEVECTOR(shadowval);
431 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
433 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
434 /* We have a datarace */
435 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
441 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
446 #ifdef REPORT_DATA_RACES
447 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
448 if (raceset->add(race))
450 else model_free(race);
457 /** This function does race detection for a write on an expanded record. */
458 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
459 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
460 record->numReads = 0;
461 record->writeThread = thread;
462 modelclock_t ourClock = currClock->getClock(thread);
463 record->writeClock = ourClock;
464 record->isAtomic = 1;
467 /** This function does race detection for a write on an expanded record. */
468 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
469 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
470 record->numReads = 0;
471 record->writeThread = thread;
472 modelclock_t ourClock = currClock->getClock(thread);
473 record->writeClock = ourClock;
474 record->isAtomic = 0;
477 /** This function just updates metadata on atomic write. */
478 void recordWrite(thread_id_t thread, void *location) {
479 uint64_t *shadow = lookupAddressEntry(location);
480 uint64_t shadowval = *shadow;
481 ClockVector *currClock = get_execution()->get_cv(thread);
483 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
484 fullRecordWrite(thread, location, shadow, currClock);
488 int threadid = id_to_int(thread);
489 modelclock_t ourClock = currClock->getClock(thread);
491 /* Thread ID is too large or clock is too large. */
492 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
493 expandRecord(shadow);
494 fullRecordWrite(thread, location, shadow, currClock);
498 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
501 /** This function just updates metadata on atomic write. */
502 void recordCalloc(void *location, size_t size) {
503 thread_id_t thread = thread_current()->get_id();
504 for(;size != 0;size--) {
505 uint64_t *shadow = lookupAddressEntry(location);
506 uint64_t shadowval = *shadow;
507 ClockVector *currClock = get_execution()->get_cv(thread);
509 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
510 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
514 int threadid = id_to_int(thread);
515 modelclock_t ourClock = currClock->getClock(thread);
517 /* Thread ID is too large or clock is too large. */
518 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
519 expandRecord(shadow);
520 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
524 *shadow = ENCODEOP(0, 0, threadid, ourClock);
525 location = (void *)(((char *) location) + 1);
529 /** This function does race detection on a read for an expanded record. */
530 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
532 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
533 struct DataRace * race = NULL;
534 /* Check for datarace against last write. */
536 modelclock_t writeClock = record->writeClock;
537 thread_id_t writeThread = record->writeThread;
539 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
540 /* We have a datarace */
541 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
544 /* Shorten vector when possible */
548 for (int i = 0;i < record->numReads;i++) {
549 modelclock_t readClock = record->readClock[i];
550 thread_id_t readThread = record->thread[i];
552 /* Note that is not really a datarace check as reads cannot
553 actually race. It is just determining that this read subsumes
554 another in the sense that either this read races or neither
555 read races. Note that readClock can't actually be zero, so it
556 could be optimized. */
558 if (clock_may_race(currClock, thread, readClock, readThread)) {
559 /* Still need this read in vector */
560 if (copytoindex != i) {
561 ASSERT(record->thread[i] >= 0);
562 record->readClock[copytoindex] = record->readClock[i];
563 record->thread[copytoindex] = record->thread[i];
569 if (__builtin_popcount(copytoindex) <= 1) {
570 if (copytoindex == 0 && record->thread == NULL) {
571 int newCapacity = INITCAPACITY;
572 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
573 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
574 } else if (copytoindex>=INITCAPACITY) {
575 int newCapacity = copytoindex * 2;
576 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
577 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
578 std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
579 std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
580 snapshot_free(record->readClock);
581 snapshot_free(record->thread);
582 record->readClock = newreadClock;
583 record->thread = newthread;
587 modelclock_t ourClock = currClock->getClock(thread);
590 record->thread[copytoindex] = thread;
591 record->readClock[copytoindex] = ourClock;
592 record->numReads = copytoindex + 1;
596 /** This function does race detection on a read. */
597 void raceCheckRead(thread_id_t thread, const void *location)
599 uint64_t *shadow = lookupAddressEntry(location);
600 uint64_t shadowval = *shadow;
601 ClockVector *currClock = get_execution()->get_cv(thread);
602 if (currClock == NULL)
605 struct DataRace * race = NULL;
608 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
609 race = fullRaceCheckRead(thread, location, shadow, currClock);
614 int threadid = id_to_int(thread);
615 modelclock_t ourClock = currClock->getClock(thread);
617 /* Thread ID is too large or clock is too large. */
618 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
619 expandRecord(shadow);
620 race = fullRaceCheckRead(thread, location, shadow, currClock);
624 /* Check for datarace against last write. */
626 modelclock_t writeClock = WRITEVECTOR(shadowval);
627 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
629 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
630 /* We have a datarace */
631 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
637 modelclock_t readClock = READVECTOR(shadowval);
638 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
640 if (clock_may_race(currClock, thread, readClock, readThread)) {
641 /* We don't subsume this read... Have to expand record. */
642 expandRecord(shadow);
643 fullRaceCheckRead(thread, location, shadow, currClock);
648 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
652 #ifdef REPORT_DATA_RACES
653 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
654 if (raceset->add(race))
656 else model_free(race);
664 /** This function does race detection on a read for an expanded record. */
665 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
667 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
668 struct DataRace * race = NULL;
669 /* Check for datarace against last write. */
670 if (record->isAtomic)
673 modelclock_t writeClock = record->writeClock;
674 thread_id_t writeThread = record->writeThread;
676 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
677 /* We have a datarace */
678 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
683 /** This function does race detection on a read. */
684 void atomraceCheckRead(thread_id_t thread, const void *location)
686 uint64_t *shadow = lookupAddressEntry(location);
687 uint64_t shadowval = *shadow;
688 ClockVector *currClock = get_execution()->get_cv(thread);
689 if (currClock == NULL)
692 struct DataRace * race = NULL;
695 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
696 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
700 if (shadowval & ATOMICMASK)
704 /* Check for datarace against last write. */
705 modelclock_t writeClock = WRITEVECTOR(shadowval);
706 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
708 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
709 /* We have a datarace */
710 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
718 #ifdef REPORT_DATA_RACES
719 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
720 if (raceset->add(race))
722 else model_free(race);
729 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
731 uint64_t *shadow = lookupAddressEntry(location);
732 uint64_t shadowval = *shadow;
734 ClockVector *currClock = get_execution()->get_cv(thread);
735 if (currClock == NULL)
738 struct DataRace * race = NULL;
741 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
742 race = fullRaceCheckRead(thread, location, shadow, currClock);
747 int threadid = id_to_int(thread);
748 modelclock_t ourClock = currClock->getClock(thread);
750 /* Thread ID is too large or clock is too large. */
751 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
752 expandRecord(shadow);
753 race = fullRaceCheckRead(thread, location, shadow, currClock);
757 /* Check for datarace against last write. */
758 modelclock_t writeClock = WRITEVECTOR(shadowval);
759 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
761 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
762 /* We have a datarace */
763 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
766 modelclock_t readClock = READVECTOR(shadowval);
767 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
769 if (clock_may_race(currClock, thread, readClock, readThread)) {
770 /* We don't subsume this read... Have to expand record. */
771 expandRecord(shadow);
772 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
773 record->thread[1] = thread;
774 record->readClock[1] = ourClock;
780 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
782 *old_val = shadowval;
787 #ifdef REPORT_DATA_RACES
788 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
789 if (raceset->add(race))
791 else model_free(race);
800 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
801 uint64_t *shadow = lookupAddressEntry(location);
803 uint64_t shadowval = *shadow;
805 ClockVector *currClock = get_execution()->get_cv(thread);
806 if (currClock == NULL)
809 struct DataRace * race = NULL;
812 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
813 race = fullRaceCheckRead(thread, location, shadow, currClock);
818 int threadid = id_to_int(thread);
819 modelclock_t ourClock = currClock->getClock(thread);
821 /* Thread ID is too large or clock is too large. */
822 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
823 expandRecord(shadow);
824 race = fullRaceCheckRead(thread, location, shadow, currClock);
828 /* Check for datarace against last write. */
829 modelclock_t writeClock = WRITEVECTOR(shadowval);
830 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
832 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
833 /* We have a datarace */
834 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
837 modelclock_t readClock = READVECTOR(shadowval);
838 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
840 if (clock_may_race(currClock, thread, readClock, readThread)) {
841 /* We don't subsume this read... Have to expand record. */
842 expandRecord(shadow);
843 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
844 record->thread[1] = thread;
845 record->readClock[1] = ourClock;
851 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
855 #ifdef REPORT_DATA_RACES
856 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
857 if (raceset->add(race))
859 else model_free(race);
866 void raceCheckRead64(thread_id_t thread, const void *location)
868 uint64_t old_shadowval, new_shadowval;
869 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
873 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
874 if (CHECKBOUNDARY(location, 7)) {
875 if (shadow[1]==old_shadowval)
876 shadow[1] = new_shadowval;
878 if (shadow[2]==old_shadowval)
879 shadow[2] = new_shadowval;
881 if (shadow[3]==old_shadowval)
882 shadow[3] = new_shadowval;
884 if (shadow[4]==old_shadowval)
885 shadow[4] = new_shadowval;
887 if (shadow[5]==old_shadowval)
888 shadow[5] = new_shadowval;
890 if (shadow[6]==old_shadowval)
891 shadow[6] = new_shadowval;
893 if (shadow[7]==old_shadowval)
894 shadow[7] = new_shadowval;
900 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
902 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
904 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
906 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
908 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
910 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
912 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
915 void raceCheckRead32(thread_id_t thread, const void *location)
917 uint64_t old_shadowval, new_shadowval;
918 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
922 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
923 if (CHECKBOUNDARY(location, 3)) {
924 if (shadow[1]==old_shadowval)
925 shadow[1] = new_shadowval;
927 if (shadow[2]==old_shadowval)
928 shadow[2] = new_shadowval;
930 if (shadow[3]==old_shadowval)
931 shadow[3] = new_shadowval;
937 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
939 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
941 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
944 void raceCheckRead16(thread_id_t thread, const void *location)
946 uint64_t old_shadowval, new_shadowval;
947 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
951 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
952 if (CHECKBOUNDARY(location, 1)) {
953 if (shadow[1]==old_shadowval) {
954 shadow[1] = new_shadowval;
958 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
961 void raceCheckRead8(thread_id_t thread, const void *location)
963 uint64_t old_shadowval, new_shadowval;
964 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
968 raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
971 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
973 uint64_t *shadow = lookupAddressEntry(location);
974 uint64_t shadowval = *shadow;
975 ClockVector *currClock = get_execution()->get_cv(thread);
976 if (currClock == NULL)
979 struct DataRace * race = NULL;
981 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
982 race = fullRaceCheckWrite(thread, location, shadow, currClock);
987 int threadid = id_to_int(thread);
988 modelclock_t ourClock = currClock->getClock(thread);
990 /* Thread ID is too large or clock is too large. */
991 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
992 expandRecord(shadow);
993 race = fullRaceCheckWrite(thread, location, shadow, currClock);
998 /* Check for datarace against last read. */
999 modelclock_t readClock = READVECTOR(shadowval);
1000 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1002 if (clock_may_race(currClock, thread, readClock, readThread)) {
1003 /* We have a datarace */
1004 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1010 /* Check for datarace against last write. */
1011 modelclock_t writeClock = WRITEVECTOR(shadowval);
1012 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1014 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1015 /* We have a datarace */
1016 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1022 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1024 *old_val = shadowval;
1030 #ifdef REPORT_DATA_RACES
1031 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1032 if (raceset->add(race))
1034 else model_free(race);
1043 static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
1044 uint64_t *shadow = lookupAddressEntry(location);
1046 uint64_t shadowval = *shadow;
1048 ClockVector *currClock = get_execution()->get_cv(thread);
1049 if (currClock == NULL)
1052 struct DataRace * race = NULL;
1053 /* Do full record */
1054 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1055 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1060 int threadid = id_to_int(thread);
1061 modelclock_t ourClock = currClock->getClock(thread);
1063 /* Thread ID is too large or clock is too large. */
1064 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1065 expandRecord(shadow);
1066 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1071 /* Check for datarace against last read. */
1072 modelclock_t readClock = READVECTOR(shadowval);
1073 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1075 if (clock_may_race(currClock, thread, readClock, readThread)) {
1076 /* We have a datarace */
1077 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1083 /* Check for datarace against last write. */
1084 modelclock_t writeClock = WRITEVECTOR(shadowval);
1085 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1087 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1088 /* We have a datarace */
1089 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1095 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1100 #ifdef REPORT_DATA_RACES
1101 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1102 if (raceset->add(race))
1104 else model_free(race);
1111 void raceCheckWrite64(thread_id_t thread, const void *location)
1113 uint64_t old_shadowval, new_shadowval;
1114 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1118 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1119 if (CHECKBOUNDARY(location, 7)) {
1120 if (shadow[1]==old_shadowval)
1121 shadow[1] = new_shadowval;
1123 if (shadow[2]==old_shadowval)
1124 shadow[2] = new_shadowval;
1126 if (shadow[3]==old_shadowval)
1127 shadow[3] = new_shadowval;
1129 if (shadow[4]==old_shadowval)
1130 shadow[4] = new_shadowval;
1132 if (shadow[5]==old_shadowval)
1133 shadow[5] = new_shadowval;
1135 if (shadow[6]==old_shadowval)
1136 shadow[6] = new_shadowval;
1138 if (shadow[7]==old_shadowval)
1139 shadow[7] = new_shadowval;
1145 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1147 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1149 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1151 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
1153 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
1155 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
1157 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
1160 void raceCheckWrite32(thread_id_t thread, const void *location)
1162 uint64_t old_shadowval, new_shadowval;
1163 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1167 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1168 if (CHECKBOUNDARY(location, 3)) {
1169 if (shadow[1]==old_shadowval)
1170 shadow[1] = new_shadowval;
1172 if (shadow[2]==old_shadowval)
1173 shadow[2] = new_shadowval;
1175 if (shadow[3]==old_shadowval)
1176 shadow[3] = new_shadowval;
1182 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1184 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1186 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1189 void raceCheckWrite16(thread_id_t thread, const void *location)
1191 uint64_t old_shadowval, new_shadowval;
1192 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1197 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1198 if (CHECKBOUNDARY(location, 1)) {
1199 if (shadow[1]==old_shadowval) {
1200 shadow[1] = new_shadowval;
1204 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1207 void raceCheckWrite8(thread_id_t thread, const void *location)
1209 uint64_t old_shadowval, new_shadowval;
1210 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1214 raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1218 void print_normal_accesses()
1220 model_print("store 8 count: %u\n", store8_count);
1221 model_print("store 16 count: %u\n", store16_count);
1222 model_print("store 32 count: %u\n", store32_count);
1223 model_print("store 64 count: %u\n", store64_count);
1225 model_print("load 8 count: %u\n", load8_count);
1226 model_print("load 16 count: %u\n", load16_count);
1227 model_print("load 32 count: %u\n", load32_count);
1228 model_print("load 64 count: %u\n", load64_count);