3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
20 static unsigned int store8_count = 0;
21 static unsigned int store16_count = 0;
22 static unsigned int store32_count = 0;
23 static unsigned int store64_count = 0;
25 static unsigned int load8_count = 0;
26 static unsigned int load16_count = 0;
27 static unsigned int load32_count = 0;
28 static unsigned int load64_count = 0;
31 static const ModelExecution * get_execution()
33 return model->get_execution();
36 /** This function initialized the data race detector. */
37 void initRaceDetector()
39 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
40 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
41 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
42 raceset = new RaceSet();
45 void * table_calloc(size_t size)
47 if ((((char *)memory_base) + size) > memory_top) {
48 return snapshot_calloc(size, 1);
50 void *tmp = memory_base;
51 memory_base = ((char *)memory_base) + size;
56 /** This function looks up the entry in the shadow table corresponding to a
58 static inline uint64_t * lookupAddressEntry(const void *address)
60 struct ShadowTable *currtable = root;
62 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
63 if (currtable == NULL) {
64 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
68 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
69 if (basetable == NULL) {
70 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
72 return &basetable->array[((uintptr_t)address) & MASK16BIT];
76 bool hasNonAtomicStore(const void *address) {
77 uint64_t * shadow = lookupAddressEntry(address);
78 uint64_t shadowval = *shadow;
79 if (ISSHORTRECORD(shadowval)) {
80 //Do we have a non atomic write with a non-zero clock
81 return !(ATOMICMASK & shadowval);
85 struct RaceRecord *record = (struct RaceRecord *)shadowval;
86 return !record->isAtomic;
90 void setAtomicStoreFlag(const void *address) {
91 uint64_t * shadow = lookupAddressEntry(address);
92 uint64_t shadowval = *shadow;
93 if (ISSHORTRECORD(shadowval)) {
94 *shadow = shadowval | ATOMICMASK;
97 *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
100 struct RaceRecord *record = (struct RaceRecord *)shadowval;
101 record->isAtomic = 1;
105 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
106 uint64_t * shadow = lookupAddressEntry(address);
107 uint64_t shadowval = *shadow;
108 if (ISSHORTRECORD(shadowval) || shadowval == 0) {
109 //Do we have a non atomic write with a non-zero clock
110 *thread = WRTHREADID(shadowval);
111 *clock = WRITEVECTOR(shadowval);
113 struct RaceRecord *record = (struct RaceRecord *)shadowval;
114 *thread = record->writeThread;
115 *clock = record->writeClock;
120 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
121 * to check the potential for a data race.
122 * @param clock1 The current clock vector
123 * @param tid1 The current thread; paired with clock1
124 * @param clock2 The clock value for the potentially-racing action
125 * @param tid2 The thread ID for the potentially-racing action
126 * @return true if the current clock allows a race with the event at clock2/tid2
128 static inline bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
129 modelclock_t clock2, thread_id_t tid2)
131 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
135 * Expands a record from the compact form to the full form. This is
136 * necessary for multiple readers or for very large thread ids or time
138 static void expandRecord(uint64_t *shadow)
140 uint64_t shadowval = *shadow;
142 modelclock_t readClock = READVECTOR(shadowval);
143 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
144 modelclock_t writeClock = WRITEVECTOR(shadowval);
145 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
147 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
148 record->writeThread = writeThread;
149 record->writeClock = writeClock;
151 if (readClock != 0) {
152 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
153 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
154 record->numReads = 1;
155 ASSERT(readThread >= 0);
156 record->thread[0] = readThread;
157 record->readClock[0] = readClock;
159 record->thread = NULL;
161 if (shadowval & ATOMICMASK)
162 record->isAtomic = 1;
163 *shadow = (uint64_t) record;
166 #define FIRST_STACK_FRAME 2
168 unsigned int race_hash(struct DataRace *race) {
169 unsigned int hash = 0;
170 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
171 hash ^= ((uintptr_t)race->backtrace[i]);
172 hash = (hash >> 3) | (hash << 29);
177 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
178 if (r1->numframes != r2->numframes)
180 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
181 if (r1->backtrace[i] != r2->backtrace[i])
187 /** This function is called when we detect a data race.*/
188 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
190 #ifdef REPORT_DATA_RACES
191 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
192 race->oldthread = oldthread;
193 race->oldclock = oldclock;
194 race->isoldwrite = isoldwrite;
195 race->newaction = newaction;
196 race->isnewwrite = isnewwrite;
197 race->address = address;
205 * @brief Assert a data race
207 * Asserts a data race which is currently realized, causing the execution to
208 * end and stashing a message in the model-checker's bug list
210 * @param race The race to report
212 void assert_race(struct DataRace *race)
214 model_print("Race detected at location: \n");
215 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
216 model_print("\nData race detected @ address %p:\n"
217 " Access 1: %5s in thread %2d @ clock %3u\n"
218 " Access 2: %5s in thread %2d @ clock %3u\n\n",
220 race->isoldwrite ? "write" : "read",
221 id_to_int(race->oldthread),
223 race->isnewwrite ? "write" : "read",
224 id_to_int(race->newaction->get_tid()),
225 race->newaction->get_seq_number()
229 /** This function does race detection for a write on an expanded record. */
230 struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
232 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
233 struct DataRace * race = NULL;
235 /* Check for datarace against last read. */
237 for (int i = 0;i < record->numReads;i++) {
238 modelclock_t readClock = record->readClock[i];
239 thread_id_t readThread = record->thread[i];
241 /* Note that readClock can't actuall be zero here, so it could be
244 if (clock_may_race(currClock, thread, readClock, readThread)) {
245 /* We have a datarace */
246 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
251 /* Check for datarace against last write. */
253 modelclock_t writeClock = record->writeClock;
254 thread_id_t writeThread = record->writeThread;
256 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
257 /* We have a datarace */
258 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
263 record->numReads = 0;
264 record->writeThread = thread;
265 record->isAtomic = 0;
266 modelclock_t ourClock = currClock->getClock(thread);
267 record->writeClock = ourClock;
271 /** This function does race detection for a write on an expanded record. */
272 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
274 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
275 struct DataRace * race = NULL;
277 if (record->isAtomic)
280 /* Check for datarace against last read. */
282 for (int i = 0;i < record->numReads;i++) {
283 modelclock_t readClock = record->readClock[i];
284 thread_id_t readThread = record->thread[i];
286 /* Note that readClock can't actuall be zero here, so it could be
289 if (clock_may_race(currClock, thread, readClock, readThread)) {
290 /* We have a datarace */
291 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
296 /* Check for datarace against last write. */
299 modelclock_t writeClock = record->writeClock;
300 thread_id_t writeThread = record->writeThread;
302 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
303 /* We have a datarace */
304 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
309 record->numReads = 0;
310 record->writeThread = thread;
311 record->isAtomic = 1;
312 modelclock_t ourClock = currClock->getClock(thread);
313 record->writeClock = ourClock;
317 /** This function does race detection on a write. */
318 void atomraceCheckWrite(thread_id_t thread, void *location)
320 uint64_t *shadow = lookupAddressEntry(location);
321 uint64_t shadowval = *shadow;
322 ClockVector *currClock = get_execution()->get_cv(thread);
323 if (currClock == NULL)
326 struct DataRace * race = NULL;
328 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
329 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
334 int threadid = id_to_int(thread);
335 modelclock_t ourClock = currClock->getClock(thread);
337 /* Thread ID is too large or clock is too large. */
338 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
339 expandRecord(shadow);
340 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
344 /* Can't race with atomic */
345 if (shadowval & ATOMICMASK)
349 /* Check for datarace against last read. */
350 modelclock_t readClock = READVECTOR(shadowval);
351 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
353 if (clock_may_race(currClock, thread, readClock, readThread)) {
354 /* We have a datarace */
355 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
361 /* Check for datarace against last write. */
362 modelclock_t writeClock = WRITEVECTOR(shadowval);
363 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
365 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
366 /* We have a datarace */
367 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
373 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
378 #ifdef REPORT_DATA_RACES
379 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
380 if (raceset->add(race))
382 else model_free(race);
389 /** This function does race detection for a write on an expanded record. */
390 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
391 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
392 record->numReads = 0;
393 record->writeThread = thread;
394 modelclock_t ourClock = currClock->getClock(thread);
395 record->writeClock = ourClock;
396 record->isAtomic = 1;
399 /** This function does race detection for a write on an expanded record. */
400 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
401 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
402 record->numReads = 0;
403 record->writeThread = thread;
404 modelclock_t ourClock = currClock->getClock(thread);
405 record->writeClock = ourClock;
406 record->isAtomic = 0;
409 /** This function just updates metadata on atomic write. */
410 void recordWrite(thread_id_t thread, void *location) {
411 uint64_t *shadow = lookupAddressEntry(location);
412 uint64_t shadowval = *shadow;
413 ClockVector *currClock = get_execution()->get_cv(thread);
415 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
416 fullRecordWrite(thread, location, shadow, currClock);
420 int threadid = id_to_int(thread);
421 modelclock_t ourClock = currClock->getClock(thread);
423 /* Thread ID is too large or clock is too large. */
424 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
425 expandRecord(shadow);
426 fullRecordWrite(thread, location, shadow, currClock);
430 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
433 /** This function just updates metadata on atomic write. */
434 void recordCalloc(void *location, size_t size) {
435 thread_id_t thread = thread_current_id();
436 for(;size != 0;size--) {
437 uint64_t *shadow = lookupAddressEntry(location);
438 uint64_t shadowval = *shadow;
439 ClockVector *currClock = get_execution()->get_cv(thread);
441 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
442 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
446 int threadid = id_to_int(thread);
447 modelclock_t ourClock = currClock->getClock(thread);
449 /* Thread ID is too large or clock is too large. */
450 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
451 expandRecord(shadow);
452 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
456 *shadow = ENCODEOP(0, 0, threadid, ourClock);
457 location = (void *)(((char *) location) + 1);
461 /** This function does race detection on a read for an expanded record. */
462 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
464 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
465 struct DataRace * race = NULL;
466 /* Check for datarace against last write. */
468 modelclock_t writeClock = record->writeClock;
469 thread_id_t writeThread = record->writeThread;
471 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
472 /* We have a datarace */
473 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
476 /* Shorten vector when possible */
480 for (int i = 0;i < record->numReads;i++) {
481 modelclock_t readClock = record->readClock[i];
482 thread_id_t readThread = record->thread[i];
484 /* Note that is not really a datarace check as reads cannot
485 actually race. It is just determining that this read subsumes
486 another in the sense that either this read races or neither
487 read races. Note that readClock can't actually be zero, so it
488 could be optimized. */
490 if (clock_may_race(currClock, thread, readClock, readThread)) {
491 /* Still need this read in vector */
492 if (copytoindex != i) {
493 ASSERT(readThread >= 0);
494 record->readClock[copytoindex] = readClock;
495 record->thread[copytoindex] = readThread;
501 if (__builtin_popcount(copytoindex) <= 1) {
502 if (copytoindex == 0 && record->thread == NULL) {
503 int newCapacity = INITCAPACITY;
504 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
505 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
506 } else if (copytoindex>=INITCAPACITY) {
507 int newCapacity = copytoindex * 2;
508 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
509 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
510 real_memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
511 real_memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
512 snapshot_free(record->readClock);
513 snapshot_free(record->thread);
514 record->readClock = newreadClock;
515 record->thread = newthread;
519 modelclock_t ourClock = currClock->getClock(thread);
522 record->thread[copytoindex] = thread;
523 record->readClock[copytoindex] = ourClock;
524 record->numReads = copytoindex + 1;
528 /** This function does race detection on a read for an expanded record. */
529 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
531 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
532 struct DataRace * race = NULL;
533 /* Check for datarace against last write. */
534 if (record->isAtomic)
537 modelclock_t writeClock = record->writeClock;
538 thread_id_t writeThread = record->writeThread;
540 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
541 /* We have a datarace */
542 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
547 /** This function does race detection on a read. */
548 void atomraceCheckRead(thread_id_t thread, const void *location)
550 uint64_t *shadow = lookupAddressEntry(location);
551 uint64_t shadowval = *shadow;
552 ClockVector *currClock = get_execution()->get_cv(thread);
553 if (currClock == NULL)
556 struct DataRace * race = NULL;
559 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
560 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
564 if (shadowval & ATOMICMASK)
568 /* Check for datarace against last write. */
569 modelclock_t writeClock = WRITEVECTOR(shadowval);
570 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
572 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
573 /* We have a datarace */
574 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
580 #ifdef REPORT_DATA_RACES
581 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
582 if (raceset->add(race))
584 else model_free(race);
591 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
593 uint64_t *shadow = lookupAddressEntry(location);
594 uint64_t shadowval = *shadow;
596 ClockVector *currClock = get_execution()->get_cv(thread);
597 if (currClock == NULL)
600 struct DataRace * race = NULL;
603 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
604 race = fullRaceCheckRead(thread, location, shadow, currClock);
609 int threadid = id_to_int(thread);
610 modelclock_t ourClock = currClock->getClock(thread);
612 /* Thread ID is too large or clock is too large. */
613 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
614 expandRecord(shadow);
615 race = fullRaceCheckRead(thread, location, shadow, currClock);
619 /* Check for datarace against last write. */
620 modelclock_t writeClock = WRITEVECTOR(shadowval);
621 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
623 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
624 /* We have a datarace */
625 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
628 modelclock_t readClock = READVECTOR(shadowval);
629 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
631 if (clock_may_race(currClock, thread, readClock, readThread)) {
632 /* We don't subsume this read... Have to expand record. */
633 expandRecord(shadow);
634 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
635 record->thread[1] = thread;
636 record->readClock[1] = ourClock;
642 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
644 *old_val = shadowval;
649 #ifdef REPORT_DATA_RACES
650 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
651 if (raceset->add(race))
653 else model_free(race);
662 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location)
664 uint64_t *shadow = lookupAddressEntry(location);
665 uint64_t shadowval = *shadow;
666 ClockVector *currClock = get_execution()->get_cv(thread);
667 if (currClock == NULL)
670 struct DataRace * race = NULL;
673 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
674 race = fullRaceCheckRead(thread, location, shadow, currClock);
679 int threadid = id_to_int(thread);
680 modelclock_t ourClock = currClock->getClock(thread);
682 /* Thread ID is too large or clock is too large. */
683 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
684 expandRecord(shadow);
685 race = fullRaceCheckRead(thread, location, shadow, currClock);
689 /* Check for datarace against last write. */
690 modelclock_t writeClock = WRITEVECTOR(shadowval);
691 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
693 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
694 /* We have a datarace */
695 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
698 modelclock_t readClock = READVECTOR(shadowval);
699 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
701 if (clock_may_race(currClock, thread, readClock, readThread)) {
702 /* We don't subsume this read... Have to expand record. */
703 expandRecord(shadow);
704 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
705 record->thread[1] = thread;
706 record->readClock[1] = ourClock;
712 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
716 #ifdef REPORT_DATA_RACES
717 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
718 if (raceset->add(race))
720 else model_free(race);
727 void raceCheckRead64(thread_id_t thread, const void *location)
729 int old_flag = GET_MODEL_FLAG;
732 uint64_t old_shadowval, new_shadowval;
733 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
737 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
738 if (CHECKBOUNDARY(location, 7)) {
739 if (shadow[1]==old_shadowval)
740 shadow[1] = new_shadowval;
742 if (shadow[2]==old_shadowval)
743 shadow[2] = new_shadowval;
745 if (shadow[3]==old_shadowval)
746 shadow[3] = new_shadowval;
748 if (shadow[4]==old_shadowval)
749 shadow[4] = new_shadowval;
751 if (shadow[5]==old_shadowval)
752 shadow[5] = new_shadowval;
754 if (shadow[6]==old_shadowval)
755 shadow[6] = new_shadowval;
757 if (shadow[7]==old_shadowval)
758 shadow[7] = new_shadowval;
760 RESTORE_MODEL_FLAG(old_flag);
765 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
767 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
769 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
771 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
773 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
775 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
777 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
778 RESTORE_MODEL_FLAG(old_flag);
781 void raceCheckRead32(thread_id_t thread, const void *location)
783 int old_flag = GET_MODEL_FLAG;
786 uint64_t old_shadowval, new_shadowval;
787 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
791 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
792 if (CHECKBOUNDARY(location, 3)) {
793 if (shadow[1]==old_shadowval)
794 shadow[1] = new_shadowval;
796 if (shadow[2]==old_shadowval)
797 shadow[2] = new_shadowval;
799 if (shadow[3]==old_shadowval)
800 shadow[3] = new_shadowval;
802 RESTORE_MODEL_FLAG(old_flag);
807 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
809 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
811 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
812 RESTORE_MODEL_FLAG(old_flag);
815 void raceCheckRead16(thread_id_t thread, const void *location)
817 int old_flag = GET_MODEL_FLAG;
820 uint64_t old_shadowval, new_shadowval;
821 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
825 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
826 if (CHECKBOUNDARY(location, 1)) {
827 if (shadow[1]==old_shadowval) {
828 shadow[1] = new_shadowval;
829 RESTORE_MODEL_FLAG(old_flag);
833 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
834 RESTORE_MODEL_FLAG(old_flag);
837 void raceCheckRead8(thread_id_t thread, const void *location)
839 int old_flag = GET_MODEL_FLAG;
845 raceCheckRead_otherIt(thread, location);
846 RESTORE_MODEL_FLAG(old_flag);
849 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
851 uint64_t *shadow = lookupAddressEntry(location);
852 uint64_t shadowval = *shadow;
853 ClockVector *currClock = get_execution()->get_cv(thread);
854 if (currClock == NULL)
857 struct DataRace * race = NULL;
859 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
860 race = fullRaceCheckWrite(thread, location, shadow, currClock);
865 int threadid = id_to_int(thread);
866 modelclock_t ourClock = currClock->getClock(thread);
868 /* Thread ID is too large or clock is too large. */
869 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
870 expandRecord(shadow);
871 race = fullRaceCheckWrite(thread, location, shadow, currClock);
876 /* Check for datarace against last read. */
877 modelclock_t readClock = READVECTOR(shadowval);
878 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
880 if (clock_may_race(currClock, thread, readClock, readThread)) {
881 /* We have a datarace */
882 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
888 /* Check for datarace against last write. */
889 modelclock_t writeClock = WRITEVECTOR(shadowval);
890 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
892 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
893 /* We have a datarace */
894 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
900 *shadow = ENCODEOP(0, 0, threadid, ourClock);
902 *old_val = shadowval;
908 #ifdef REPORT_DATA_RACES
909 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
910 if (raceset->add(race))
912 else model_free(race);
921 static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location)
923 uint64_t *shadow = lookupAddressEntry(location);
924 uint64_t shadowval = *shadow;
925 ClockVector *currClock = get_execution()->get_cv(thread);
926 if (currClock == NULL)
929 struct DataRace * race = NULL;
931 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
932 race = fullRaceCheckWrite(thread, location, shadow, currClock);
937 int threadid = id_to_int(thread);
938 modelclock_t ourClock = currClock->getClock(thread);
940 /* Thread ID is too large or clock is too large. */
941 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
942 expandRecord(shadow);
943 race = fullRaceCheckWrite(thread, location, shadow, currClock);
948 /* Check for datarace against last read. */
949 modelclock_t readClock = READVECTOR(shadowval);
950 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
952 if (clock_may_race(currClock, thread, readClock, readThread)) {
953 /* We have a datarace */
954 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
960 /* Check for datarace against last write. */
961 modelclock_t writeClock = WRITEVECTOR(shadowval);
962 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
964 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
965 /* We have a datarace */
966 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
972 *shadow = ENCODEOP(0, 0, threadid, ourClock);
977 #ifdef REPORT_DATA_RACES
978 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
979 if (raceset->add(race))
981 else model_free(race);
988 void raceCheckWrite64(thread_id_t thread, const void *location)
990 int old_flag = GET_MODEL_FLAG;
992 uint64_t old_shadowval, new_shadowval;
993 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
997 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
998 if (CHECKBOUNDARY(location, 7)) {
999 if (shadow[1]==old_shadowval)
1000 shadow[1] = new_shadowval;
1002 if (shadow[2]==old_shadowval)
1003 shadow[2] = new_shadowval;
1005 if (shadow[3]==old_shadowval)
1006 shadow[3] = new_shadowval;
1008 if (shadow[4]==old_shadowval)
1009 shadow[4] = new_shadowval;
1011 if (shadow[5]==old_shadowval)
1012 shadow[5] = new_shadowval;
1014 if (shadow[6]==old_shadowval)
1015 shadow[6] = new_shadowval;
1017 if (shadow[7]==old_shadowval)
1018 shadow[7] = new_shadowval;
1020 RESTORE_MODEL_FLAG(old_flag);
1025 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1027 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1029 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1031 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
1033 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
1035 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
1037 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
1038 RESTORE_MODEL_FLAG(old_flag);
1041 void raceCheckWrite32(thread_id_t thread, const void *location)
1043 int old_flag = GET_MODEL_FLAG;
1046 uint64_t old_shadowval, new_shadowval;
1047 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1051 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1052 if (CHECKBOUNDARY(location, 3)) {
1053 if (shadow[1]==old_shadowval)
1054 shadow[1] = new_shadowval;
1056 if (shadow[2]==old_shadowval)
1057 shadow[2] = new_shadowval;
1059 if (shadow[3]==old_shadowval)
1060 shadow[3] = new_shadowval;
1062 RESTORE_MODEL_FLAG(old_flag);
1067 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1069 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1071 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1072 RESTORE_MODEL_FLAG(old_flag);
1075 void raceCheckWrite16(thread_id_t thread, const void *location)
1077 int old_flag = GET_MODEL_FLAG;
1080 uint64_t old_shadowval, new_shadowval;
1081 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1086 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1087 if (CHECKBOUNDARY(location, 1)) {
1088 if (shadow[1]==old_shadowval) {
1089 shadow[1] = new_shadowval;
1090 RESTORE_MODEL_FLAG(old_flag);
1094 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1095 RESTORE_MODEL_FLAG(old_flag);
1098 void raceCheckWrite8(thread_id_t thread, const void *location)
1100 int old_flag = GET_MODEL_FLAG;
1106 raceCheckWrite_otherIt(thread, location);
1107 RESTORE_MODEL_FLAG(old_flag);
1110 void raceCheckWriteMemop(thread_id_t thread, const void *location, size_t size)
1112 int old_flag = GET_MODEL_FLAG;
1115 ClockVector *currClock = get_execution()->get_cv(thread);
1116 if (currClock == NULL) {
1117 RESTORE_MODEL_FLAG(old_flag);
1121 bool alreadyHasRace = false;
1122 for (uint i = 0; i < size; i++) {
1123 uint64_t *shadow = lookupAddressEntry(location);
1124 uint64_t shadowval = *shadow;
1126 struct DataRace * race = NULL;
1127 /* Do full record */
1128 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1129 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1134 int threadid = id_to_int(thread);
1135 modelclock_t ourClock = currClock->getClock(thread);
1137 /* Thread ID is too large or clock is too large. */
1138 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1139 expandRecord(shadow);
1140 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1145 /* Check for datarace against last read. */
1146 modelclock_t readClock = READVECTOR(shadowval);
1147 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1149 if (clock_may_race(currClock, thread, readClock, readThread)) {
1150 /* We have a datarace */
1151 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1157 /* Check for datarace against last write. */
1158 modelclock_t writeClock = WRITEVECTOR(shadowval);
1159 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1161 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1162 /* We have a datarace */
1163 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1169 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1174 #ifdef REPORT_DATA_RACES
1175 if (!alreadyHasRace) {
1176 alreadyHasRace = true;
1177 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1178 if (raceset->add(race))
1180 else model_free(race);
1189 RESTORE_MODEL_FLAG(old_flag);
1192 void raceCheckReadMemop(thread_id_t thread, const void * location, size_t size)
1194 int old_flag = GET_MODEL_FLAG;
1197 ClockVector *currClock = get_execution()->get_cv(thread);
1198 if (currClock == NULL) {
1199 RESTORE_MODEL_FLAG(old_flag);
1203 bool alreadyHasRace = false;
1204 for (uint i = 0; i < size; i++) {
1205 uint64_t *shadow = lookupAddressEntry(location);
1206 uint64_t shadowval = *shadow;
1207 struct DataRace * race = NULL;
1209 /* Do full record */
1210 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1211 race = fullRaceCheckRead(thread, location, shadow, currClock);
1216 int threadid = id_to_int(thread);
1217 modelclock_t ourClock = currClock->getClock(thread);
1219 /* Thread ID is too large or clock is too large. */
1220 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1221 expandRecord(shadow);
1222 race = fullRaceCheckRead(thread, location, shadow, currClock);
1226 /* Check for datarace against last write. */
1227 modelclock_t writeClock = WRITEVECTOR(shadowval);
1228 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1230 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1231 /* We have a datarace */
1232 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
1235 modelclock_t readClock = READVECTOR(shadowval);
1236 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1238 if (clock_may_race(currClock, thread, readClock, readThread)) {
1239 /* We don't subsume this read... Have to expand record. */
1240 expandRecord(shadow);
1241 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
1242 record->thread[1] = thread;
1243 record->readClock[1] = ourClock;
1249 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
1253 #ifdef REPORT_DATA_RACES
1254 if (!alreadyHasRace) {
1255 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1256 if (raceset->add(race))
1258 else model_free(race);
1267 RESTORE_MODEL_FLAG(old_flag);
1271 void print_normal_accesses()
1273 model_print("store 8 count: %u\n", store8_count);
1274 model_print("store 16 count: %u\n", store16_count);
1275 model_print("store 32 count: %u\n", store32_count);
1276 model_print("store 64 count: %u\n", store64_count);
1278 model_print("load 8 count: %u\n", load8_count);
1279 model_print("load 16 count: %u\n", load16_count);
1280 model_print("load 32 count: %u\n", load32_count);
1281 model_print("load 64 count: %u\n", load64_count);