3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
19 static const ModelExecution * get_execution()
21 return model->get_execution();
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
27 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30 raceset = new RaceSet();
33 void * table_calloc(size_t size)
35 if ((((char *)memory_base) + size) > memory_top) {
36 return snapshot_calloc(size, 1);
38 void *tmp = memory_base;
39 memory_base = ((char *)memory_base) + size;
44 /** This function looks up the entry in the shadow table corresponding to a
46 static uint64_t * lookupAddressEntry(const void *address)
48 struct ShadowTable *currtable = root;
50 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51 if (currtable == NULL) {
52 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
56 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57 if (basetable == NULL) {
58 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
60 return &basetable->array[((uintptr_t)address) & MASK16BIT];
64 bool hasNonAtomicStore(const void *address) {
65 uint64_t * shadow = lookupAddressEntry(address);
66 uint64_t shadowval = *shadow;
67 if (ISSHORTRECORD(shadowval)) {
68 //Do we have a non atomic write with a non-zero clock
69 return !(ATOMICMASK & shadowval);
73 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74 return !record->isAtomic;
78 void setAtomicStoreFlag(const void *address) {
79 uint64_t * shadow = lookupAddressEntry(address);
80 uint64_t shadowval = *shadow;
81 if (ISSHORTRECORD(shadowval)) {
82 *shadow = shadowval | ATOMICMASK;
85 *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
88 struct RaceRecord *record = (struct RaceRecord *)shadowval;
93 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
94 uint64_t * shadow = lookupAddressEntry(address);
95 uint64_t shadowval = *shadow;
96 if (ISSHORTRECORD(shadowval) || shadowval == 0) {
97 //Do we have a non atomic write with a non-zero clock
98 *thread = WRTHREADID(shadowval);
99 *clock = WRITEVECTOR(shadowval);
101 struct RaceRecord *record = (struct RaceRecord *)shadowval;
102 *thread = record->writeThread;
103 *clock = record->writeClock;
108 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
109 * to check the potential for a data race.
110 * @param clock1 The current clock vector
111 * @param tid1 The current thread; paired with clock1
112 * @param clock2 The clock value for the potentially-racing action
113 * @param tid2 The thread ID for the potentially-racing action
114 * @return true if the current clock allows a race with the event at clock2/tid2
116 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
117 modelclock_t clock2, thread_id_t tid2)
119 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
123 * Expands a record from the compact form to the full form. This is
124 * necessary for multiple readers or for very large thread ids or time
126 static void expandRecord(uint64_t *shadow)
128 uint64_t shadowval = *shadow;
130 modelclock_t readClock = READVECTOR(shadowval);
131 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
132 modelclock_t writeClock = WRITEVECTOR(shadowval);
133 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
135 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
136 record->writeThread = writeThread;
137 record->writeClock = writeClock;
139 if (readClock != 0) {
140 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
141 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
142 record->numReads = 1;
143 ASSERT(readThread >= 0);
144 record->thread[0] = readThread;
145 record->readClock[0] = readClock;
147 if (shadowval & ATOMICMASK)
148 record->isAtomic = 1;
149 *shadow = (uint64_t) record;
152 #define FIRST_STACK_FRAME 2
154 unsigned int race_hash(struct DataRace *race) {
155 unsigned int hash = 0;
156 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
157 hash ^= ((uintptr_t)race->backtrace[i]);
158 hash = (hash >> 3) | (hash << 29);
164 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
165 if (r1->numframes != r2->numframes)
167 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
168 if (r1->backtrace[i] != r2->backtrace[i])
174 /** This function is called when we detect a data race.*/
175 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
177 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
178 race->oldthread = oldthread;
179 race->oldclock = oldclock;
180 race->isoldwrite = isoldwrite;
181 race->newaction = newaction;
182 race->isnewwrite = isnewwrite;
183 race->address = address;
188 * @brief Assert a data race
190 * Asserts a data race which is currently realized, causing the execution to
191 * end and stashing a message in the model-checker's bug list
193 * @param race The race to report
195 void assert_race(struct DataRace *race)
197 model_print("Race detected at location: \n");
198 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
199 model_print("\nData race detected @ address %p:\n"
200 " Access 1: %5s in thread %2d @ clock %3u\n"
201 " Access 2: %5s in thread %2d @ clock %3u\n\n",
203 race->isoldwrite ? "write" : "read",
204 id_to_int(race->oldthread),
206 race->isnewwrite ? "write" : "read",
207 id_to_int(race->newaction->get_tid()),
208 race->newaction->get_seq_number()
212 /** This function does race detection for a write on an expanded record. */
213 struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
215 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
216 struct DataRace * race = NULL;
218 /* Check for datarace against last read. */
220 for (int i = 0;i < record->numReads;i++) {
221 modelclock_t readClock = record->readClock[i];
222 thread_id_t readThread = record->thread[i];
224 /* Note that readClock can't actuall be zero here, so it could be
227 if (clock_may_race(currClock, thread, readClock, readThread)) {
228 /* We have a datarace */
229 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
234 /* Check for datarace against last write. */
237 modelclock_t writeClock = record->writeClock;
238 thread_id_t writeThread = record->writeThread;
240 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
241 /* We have a datarace */
242 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
247 record->numReads = 0;
248 record->writeThread = thread;
249 record->isAtomic = 0;
250 modelclock_t ourClock = currClock->getClock(thread);
251 record->writeClock = ourClock;
255 /** This function does race detection on a write. */
256 void raceCheckWrite(thread_id_t thread, void *location)
258 uint64_t *shadow = lookupAddressEntry(location);
259 uint64_t shadowval = *shadow;
260 ClockVector *currClock = get_execution()->get_cv(thread);
261 if (currClock == NULL)
264 struct DataRace * race = NULL;
266 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
267 race = fullRaceCheckWrite(thread, location, shadow, currClock);
272 int threadid = id_to_int(thread);
273 modelclock_t ourClock = currClock->getClock(thread);
275 /* Thread ID is too large or clock is too large. */
276 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
277 expandRecord(shadow);
278 race = fullRaceCheckWrite(thread, location, shadow, currClock);
285 /* Check for datarace against last read. */
287 modelclock_t readClock = READVECTOR(shadowval);
288 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
290 if (clock_may_race(currClock, thread, readClock, readThread)) {
291 /* We have a datarace */
292 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
298 /* Check for datarace against last write. */
300 modelclock_t writeClock = WRITEVECTOR(shadowval);
301 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
303 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
304 /* We have a datarace */
305 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
311 *shadow = ENCODEOP(0, 0, threadid, ourClock);
316 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
317 if (raceset->add(race))
319 else model_free(race);
324 /** This function does race detection for a write on an expanded record. */
325 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
327 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
328 struct DataRace * race = NULL;
330 if (record->isAtomic)
333 /* Check for datarace against last read. */
335 for (int i = 0;i < record->numReads;i++) {
336 modelclock_t readClock = record->readClock[i];
337 thread_id_t readThread = record->thread[i];
339 /* Note that readClock can't actuall be zero here, so it could be
342 if (clock_may_race(currClock, thread, readClock, readThread)) {
343 /* We have a datarace */
344 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
349 /* Check for datarace against last write. */
352 modelclock_t writeClock = record->writeClock;
353 thread_id_t writeThread = record->writeThread;
355 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
356 /* We have a datarace */
357 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
362 record->numReads = 0;
363 record->writeThread = thread;
364 record->isAtomic = 1;
365 modelclock_t ourClock = currClock->getClock(thread);
366 record->writeClock = ourClock;
370 /** This function does race detection on a write. */
371 void atomraceCheckWrite(thread_id_t thread, void *location)
373 uint64_t *shadow = lookupAddressEntry(location);
374 uint64_t shadowval = *shadow;
375 ClockVector *currClock = get_execution()->get_cv(thread);
376 if (currClock == NULL)
379 struct DataRace * race = NULL;
381 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
382 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
387 int threadid = id_to_int(thread);
388 modelclock_t ourClock = currClock->getClock(thread);
390 /* Thread ID is too large or clock is too large. */
391 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
392 expandRecord(shadow);
393 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
397 /* Can't race with atomic */
398 if (shadowval & ATOMICMASK)
402 /* Check for datarace against last read. */
404 modelclock_t readClock = READVECTOR(shadowval);
405 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
407 if (clock_may_race(currClock, thread, readClock, readThread)) {
408 /* We have a datarace */
409 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
415 /* Check for datarace against last write. */
417 modelclock_t writeClock = WRITEVECTOR(shadowval);
418 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
420 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
421 /* We have a datarace */
422 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
428 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
433 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
434 if (raceset->add(race))
436 else model_free(race);
440 /** This function does race detection for a write on an expanded record. */
441 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
442 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
443 record->numReads = 0;
444 record->writeThread = thread;
445 modelclock_t ourClock = currClock->getClock(thread);
446 record->writeClock = ourClock;
447 record->isAtomic = 1;
450 /** This function does race detection for a write on an expanded record. */
451 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
452 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
453 record->numReads = 0;
454 record->writeThread = thread;
455 modelclock_t ourClock = currClock->getClock(thread);
456 record->writeClock = ourClock;
457 record->isAtomic = 0;
460 /** This function just updates metadata on atomic write. */
461 void recordWrite(thread_id_t thread, void *location) {
462 uint64_t *shadow = lookupAddressEntry(location);
463 uint64_t shadowval = *shadow;
464 ClockVector *currClock = get_execution()->get_cv(thread);
466 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
467 fullRecordWrite(thread, location, shadow, currClock);
471 int threadid = id_to_int(thread);
472 modelclock_t ourClock = currClock->getClock(thread);
474 /* Thread ID is too large or clock is too large. */
475 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
476 expandRecord(shadow);
477 fullRecordWrite(thread, location, shadow, currClock);
481 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
484 /** This function just updates metadata on atomic write. */
485 void recordCalloc(void *location, size_t size) {
486 thread_id_t thread = thread_current()->get_id();
487 for(;size != 0;size--) {
488 uint64_t *shadow = lookupAddressEntry(location);
489 uint64_t shadowval = *shadow;
490 ClockVector *currClock = get_execution()->get_cv(thread);
492 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
493 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
497 int threadid = id_to_int(thread);
498 modelclock_t ourClock = currClock->getClock(thread);
500 /* Thread ID is too large or clock is too large. */
501 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
502 expandRecord(shadow);
503 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
507 *shadow = ENCODEOP(0, 0, threadid, ourClock);
508 location = (void *)(((char *) location) + 1);
514 /** This function does race detection on a read for an expanded record. */
515 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
517 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
518 struct DataRace * race = NULL;
519 /* Check for datarace against last write. */
521 modelclock_t writeClock = record->writeClock;
522 thread_id_t writeThread = record->writeThread;
524 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
525 /* We have a datarace */
526 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
529 /* Shorten vector when possible */
533 for (int i = 0;i < record->numReads;i++) {
534 modelclock_t readClock = record->readClock[i];
535 thread_id_t readThread = record->thread[i];
537 /* Note that is not really a datarace check as reads cannot
538 actually race. It is just determining that this read subsumes
539 another in the sense that either this read races or neither
540 read races. Note that readClock can't actually be zero, so it
541 could be optimized. */
543 if (clock_may_race(currClock, thread, readClock, readThread)) {
544 /* Still need this read in vector */
545 if (copytoindex != i) {
546 ASSERT(record->thread[i] >= 0);
547 record->readClock[copytoindex] = record->readClock[i];
548 record->thread[copytoindex] = record->thread[i];
554 if (__builtin_popcount(copytoindex) <= 1) {
555 if (copytoindex == 0) {
556 int newCapacity = INITCAPACITY;
557 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
558 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
559 } else if (copytoindex>=INITCAPACITY) {
560 int newCapacity = copytoindex * 2;
561 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
562 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
563 std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
564 std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
565 snapshot_free(record->readClock);
566 snapshot_free(record->thread);
567 record->readClock = newreadClock;
568 record->thread = newthread;
572 modelclock_t ourClock = currClock->getClock(thread);
575 record->thread[copytoindex] = thread;
576 record->readClock[copytoindex] = ourClock;
577 record->numReads = copytoindex + 1;
581 /** This function does race detection on a read. */
582 void raceCheckRead(thread_id_t thread, const void *location)
584 uint64_t *shadow = lookupAddressEntry(location);
585 uint64_t shadowval = *shadow;
586 ClockVector *currClock = get_execution()->get_cv(thread);
587 if (currClock == NULL)
590 struct DataRace * race = NULL;
593 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
594 race = fullRaceCheckRead(thread, location, shadow, currClock);
599 int threadid = id_to_int(thread);
600 modelclock_t ourClock = currClock->getClock(thread);
602 /* Thread ID is too large or clock is too large. */
603 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
604 expandRecord(shadow);
605 race = fullRaceCheckRead(thread, location, shadow, currClock);
609 /* Check for datarace against last write. */
611 modelclock_t writeClock = WRITEVECTOR(shadowval);
612 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
614 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
615 /* We have a datarace */
616 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
622 modelclock_t readClock = READVECTOR(shadowval);
623 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
625 if (clock_may_race(currClock, thread, readClock, readThread)) {
626 /* We don't subsume this read... Have to expand record. */
627 expandRecord(shadow);
628 fullRaceCheckRead(thread, location, shadow, currClock);
633 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
637 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
638 if (raceset->add(race))
640 else model_free(race);
645 /** This function does race detection on a read for an expanded record. */
646 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
648 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
649 struct DataRace * race = NULL;
650 /* Check for datarace against last write. */
651 if (record->isAtomic)
654 modelclock_t writeClock = record->writeClock;
655 thread_id_t writeThread = record->writeThread;
657 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
658 /* We have a datarace */
659 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
664 /** This function does race detection on a read. */
665 void atomraceCheckRead(thread_id_t thread, const void *location)
667 uint64_t *shadow = lookupAddressEntry(location);
668 uint64_t shadowval = *shadow;
669 ClockVector *currClock = get_execution()->get_cv(thread);
670 if (currClock == NULL)
673 struct DataRace * race = NULL;
676 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
677 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
681 if (shadowval & ATOMICMASK)
685 /* Check for datarace against last write. */
687 modelclock_t writeClock = WRITEVECTOR(shadowval);
688 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
690 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
691 /* We have a datarace */
692 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
700 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
701 if (raceset->add(race))
703 else model_free(race);