3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
19 static const ModelExecution * get_execution()
21 return model->get_execution();
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
27 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30 raceset = new RaceSet();
33 void * table_calloc(size_t size)
35 if ((((char *)memory_base) + size) > memory_top) {
36 return snapshot_calloc(size, 1);
38 void *tmp = memory_base;
39 memory_base = ((char *)memory_base) + size;
44 /** This function looks up the entry in the shadow table corresponding to a
46 static uint64_t * lookupAddressEntry(const void *address)
48 struct ShadowTable *currtable = root;
50 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51 if (currtable == NULL) {
52 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
56 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57 if (basetable == NULL) {
58 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
60 return &basetable->array[((uintptr_t)address) & MASK16BIT];
64 bool hasNonAtomicStore(const void *address) {
65 uint64_t * shadow = lookupAddressEntry(address);
66 uint64_t shadowval = *shadow;
67 if (ISSHORTRECORD(shadowval)) {
68 //Do we have a non atomic write with a non-zero clock
69 return ((WRITEVECTOR(shadowval) != 0) && !(ATOMICMASK & shadowval));
73 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74 return !record->isAtomic && record->writeClock != 0;
78 void setAtomicStoreFlag(const void *address) {
79 uint64_t * shadow = lookupAddressEntry(address);
80 uint64_t shadowval = *shadow;
81 if (ISSHORTRECORD(shadowval)) {
82 *shadow = shadowval | ATOMICMASK;
86 struct RaceRecord *record = (struct RaceRecord *)shadowval;
91 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
92 uint64_t * shadow = lookupAddressEntry(address);
93 uint64_t shadowval = *shadow;
94 if (ISSHORTRECORD(shadowval)) {
95 //Do we have a non atomic write with a non-zero clock
96 *thread = WRTHREADID(shadowval);
97 *clock = WRITEVECTOR(shadowval);
99 struct RaceRecord *record = (struct RaceRecord *)shadowval;
100 *thread = record->writeThread;
101 *clock = record->writeClock;
106 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
107 * to check the potential for a data race.
108 * @param clock1 The current clock vector
109 * @param tid1 The current thread; paired with clock1
110 * @param clock2 The clock value for the potentially-racing action
111 * @param tid2 The thread ID for the potentially-racing action
112 * @return true if the current clock allows a race with the event at clock2/tid2
114 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
115 modelclock_t clock2, thread_id_t tid2)
117 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
121 * Expands a record from the compact form to the full form. This is
122 * necessary for multiple readers or for very large thread ids or time
124 static void expandRecord(uint64_t *shadow)
126 uint64_t shadowval = *shadow;
128 modelclock_t readClock = READVECTOR(shadowval);
129 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
130 modelclock_t writeClock = WRITEVECTOR(shadowval);
131 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
133 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
134 record->writeThread = writeThread;
135 record->writeClock = writeClock;
137 if (readClock != 0) {
138 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
139 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
140 record->numReads = 1;
141 ASSERT(readThread >= 0);
142 record->thread[0] = readThread;
143 record->readClock[0] = readClock;
145 if (shadowval & ATOMICMASK)
146 record->isAtomic = 1;
147 *shadow = (uint64_t) record;
150 #define FIRST_STACK_FRAME 2
152 unsigned int race_hash(struct DataRace *race) {
153 unsigned int hash = 0;
154 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
155 hash ^= ((uintptr_t)race->backtrace[i]);
156 hash = (hash >> 3) | (hash << 29);
162 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
163 if (r1->numframes != r2->numframes)
165 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
166 if (r1->backtrace[i] != r2->backtrace[i])
172 /** This function is called when we detect a data race.*/
173 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
175 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
176 race->oldthread = oldthread;
177 race->oldclock = oldclock;
178 race->isoldwrite = isoldwrite;
179 race->newaction = newaction;
180 race->isnewwrite = isnewwrite;
181 race->address = address;
186 * @brief Assert a data race
188 * Asserts a data race which is currently realized, causing the execution to
189 * end and stashing a message in the model-checker's bug list
191 * @param race The race to report
193 void assert_race(struct DataRace *race)
195 model_print("Race detected at location: \n");
196 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
197 model_print("\nData race detected @ address %p:\n"
198 " Access 1: %5s in thread %2d @ clock %3u\n"
199 " Access 2: %5s in thread %2d @ clock %3u\n\n",
201 race->isoldwrite ? "write" : "read",
202 id_to_int(race->oldthread),
204 race->isnewwrite ? "write" : "read",
205 id_to_int(race->newaction->get_tid()),
206 race->newaction->get_seq_number()
210 /** This function does race detection for a write on an expanded record. */
211 struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
213 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
214 struct DataRace * race = NULL;
216 /* Check for datarace against last read. */
218 for (int i = 0;i < record->numReads;i++) {
219 modelclock_t readClock = record->readClock[i];
220 thread_id_t readThread = record->thread[i];
222 /* Note that readClock can't actuall be zero here, so it could be
225 if (clock_may_race(currClock, thread, readClock, readThread)) {
226 /* We have a datarace */
227 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
232 /* Check for datarace against last write. */
235 modelclock_t writeClock = record->writeClock;
236 thread_id_t writeThread = record->writeThread;
238 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
239 /* We have a datarace */
240 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
245 record->numReads = 0;
246 record->writeThread = thread;
247 record->isAtomic = 0;
248 modelclock_t ourClock = currClock->getClock(thread);
249 record->writeClock = ourClock;
253 /** This function does race detection on a write. */
254 void raceCheckWrite(thread_id_t thread, void *location)
256 uint64_t *shadow = lookupAddressEntry(location);
257 uint64_t shadowval = *shadow;
258 ClockVector *currClock = get_execution()->get_cv(thread);
259 if (currClock == NULL)
262 struct DataRace * race = NULL;
264 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
265 race = fullRaceCheckWrite(thread, location, shadow, currClock);
270 int threadid = id_to_int(thread);
271 modelclock_t ourClock = currClock->getClock(thread);
273 /* Thread ID is too large or clock is too large. */
274 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
275 expandRecord(shadow);
276 race = fullRaceCheckWrite(thread, location, shadow, currClock);
283 /* Check for datarace against last read. */
285 modelclock_t readClock = READVECTOR(shadowval);
286 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
288 if (clock_may_race(currClock, thread, readClock, readThread)) {
289 /* We have a datarace */
290 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
296 /* Check for datarace against last write. */
298 modelclock_t writeClock = WRITEVECTOR(shadowval);
299 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
301 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
302 /* We have a datarace */
303 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
309 *shadow = ENCODEOP(0, 0, threadid, ourClock);
314 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
315 if (raceset->add(race))
317 else model_free(race);
322 /** This function does race detection for a write on an expanded record. */
323 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
325 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
326 struct DataRace * race = NULL;
328 if (record->isAtomic)
331 /* Check for datarace against last read. */
333 for (int i = 0;i < record->numReads;i++) {
334 modelclock_t readClock = record->readClock[i];
335 thread_id_t readThread = record->thread[i];
337 /* Note that readClock can't actuall be zero here, so it could be
340 if (clock_may_race(currClock, thread, readClock, readThread)) {
341 /* We have a datarace */
342 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
347 /* Check for datarace against last write. */
350 modelclock_t writeClock = record->writeClock;
351 thread_id_t writeThread = record->writeThread;
353 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
354 /* We have a datarace */
355 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
360 record->numReads = 0;
361 record->writeThread = thread;
362 record->isAtomic = 1;
363 modelclock_t ourClock = currClock->getClock(thread);
364 record->writeClock = ourClock;
368 /** This function does race detection on a write. */
369 void atomraceCheckWrite(thread_id_t thread, void *location)
371 uint64_t *shadow = lookupAddressEntry(location);
372 uint64_t shadowval = *shadow;
373 ClockVector *currClock = get_execution()->get_cv(thread);
374 if (currClock == NULL)
377 struct DataRace * race = NULL;
379 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
380 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
385 int threadid = id_to_int(thread);
386 modelclock_t ourClock = currClock->getClock(thread);
388 /* Thread ID is too large or clock is too large. */
389 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
390 expandRecord(shadow);
391 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
395 /* Can't race with atomic */
396 if (shadowval & ATOMICMASK)
400 /* Check for datarace against last read. */
402 modelclock_t readClock = READVECTOR(shadowval);
403 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
405 if (clock_may_race(currClock, thread, readClock, readThread)) {
406 /* We have a datarace */
407 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
413 /* Check for datarace against last write. */
415 modelclock_t writeClock = WRITEVECTOR(shadowval);
416 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
418 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
419 /* We have a datarace */
420 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
426 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
431 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
432 if (raceset->add(race))
434 else model_free(race);
438 /** This function does race detection for a write on an expanded record. */
439 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
440 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
441 record->numReads = 0;
442 record->writeThread = thread;
443 modelclock_t ourClock = currClock->getClock(thread);
444 record->writeClock = ourClock;
445 record->isAtomic = 1;
448 /** This function does race detection for a write on an expanded record. */
449 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
450 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
451 record->numReads = 0;
452 record->writeThread = thread;
453 modelclock_t ourClock = currClock->getClock(thread);
454 record->writeClock = ourClock;
455 record->isAtomic = 0;
458 /** This function just updates metadata on atomic write. */
459 void recordWrite(thread_id_t thread, void *location) {
460 uint64_t *shadow = lookupAddressEntry(location);
461 uint64_t shadowval = *shadow;
462 ClockVector *currClock = get_execution()->get_cv(thread);
464 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
465 fullRecordWrite(thread, location, shadow, currClock);
469 int threadid = id_to_int(thread);
470 modelclock_t ourClock = currClock->getClock(thread);
472 /* Thread ID is too large or clock is too large. */
473 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
474 expandRecord(shadow);
475 fullRecordWrite(thread, location, shadow, currClock);
479 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
482 /** This function just updates metadata on atomic write. */
483 void recordCalloc(void *location, size_t size) {
484 thread_id_t thread = thread_current()->get_id();
485 for(;size != 0;size--) {
486 uint64_t *shadow = lookupAddressEntry(location);
487 uint64_t shadowval = *shadow;
488 ClockVector *currClock = get_execution()->get_cv(thread);
490 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
491 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
495 int threadid = id_to_int(thread);
496 modelclock_t ourClock = currClock->getClock(thread);
498 /* Thread ID is too large or clock is too large. */
499 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
500 expandRecord(shadow);
501 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
505 *shadow = ENCODEOP(0, 0, threadid, ourClock);
506 location = (void *)(((char *) location) + 1);
512 /** This function does race detection on a read for an expanded record. */
513 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
515 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
516 struct DataRace * race = NULL;
517 /* Check for datarace against last write. */
519 modelclock_t writeClock = record->writeClock;
520 thread_id_t writeThread = record->writeThread;
522 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
523 /* We have a datarace */
524 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
527 /* Shorten vector when possible */
531 for (int i = 0;i < record->numReads;i++) {
532 modelclock_t readClock = record->readClock[i];
533 thread_id_t readThread = record->thread[i];
535 /* Note that is not really a datarace check as reads cannot
536 actually race. It is just determining that this read subsumes
537 another in the sense that either this read races or neither
538 read races. Note that readClock can't actually be zero, so it
539 could be optimized. */
541 if (clock_may_race(currClock, thread, readClock, readThread)) {
542 /* Still need this read in vector */
543 if (copytoindex != i) {
544 ASSERT(record->thread[i] >= 0);
545 record->readClock[copytoindex] = record->readClock[i];
546 record->thread[copytoindex] = record->thread[i];
552 if (__builtin_popcount(copytoindex) <= 1) {
553 if (copytoindex == 0) {
554 int newCapacity = INITCAPACITY;
555 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
556 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
557 } else if (copytoindex>=INITCAPACITY) {
558 int newCapacity = copytoindex * 2;
559 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
560 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
561 std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
562 std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
563 snapshot_free(record->readClock);
564 snapshot_free(record->thread);
565 record->readClock = newreadClock;
566 record->thread = newthread;
570 modelclock_t ourClock = currClock->getClock(thread);
573 record->thread[copytoindex] = thread;
574 record->readClock[copytoindex] = ourClock;
575 record->numReads = copytoindex + 1;
579 /** This function does race detection on a read. */
580 void raceCheckRead(thread_id_t thread, const void *location)
582 uint64_t *shadow = lookupAddressEntry(location);
583 uint64_t shadowval = *shadow;
584 ClockVector *currClock = get_execution()->get_cv(thread);
585 if (currClock == NULL)
588 struct DataRace * race = NULL;
591 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
592 race = fullRaceCheckRead(thread, location, shadow, currClock);
597 int threadid = id_to_int(thread);
598 modelclock_t ourClock = currClock->getClock(thread);
600 /* Thread ID is too large or clock is too large. */
601 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
602 expandRecord(shadow);
603 race = fullRaceCheckRead(thread, location, shadow, currClock);
607 /* Check for datarace against last write. */
609 modelclock_t writeClock = WRITEVECTOR(shadowval);
610 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
612 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
613 /* We have a datarace */
614 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
620 modelclock_t readClock = READVECTOR(shadowval);
621 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
623 if (clock_may_race(currClock, thread, readClock, readThread)) {
624 /* We don't subsume this read... Have to expand record. */
625 expandRecord(shadow);
626 fullRaceCheckRead(thread, location, shadow, currClock);
631 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
635 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
636 if (raceset->add(race))
638 else model_free(race);
643 /** This function does race detection on a read for an expanded record. */
644 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
646 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
647 struct DataRace * race = NULL;
648 /* Check for datarace against last write. */
649 if (record->isAtomic)
652 modelclock_t writeClock = record->writeClock;
653 thread_id_t writeThread = record->writeThread;
655 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
656 /* We have a datarace */
657 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
662 /** This function does race detection on a read. */
663 void atomraceCheckRead(thread_id_t thread, const void *location)
665 uint64_t *shadow = lookupAddressEntry(location);
666 uint64_t shadowval = *shadow;
667 ClockVector *currClock = get_execution()->get_cv(thread);
668 if (currClock == NULL)
671 struct DataRace * race = NULL;
674 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
675 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
679 if (shadowval & ATOMICMASK)
683 /* Check for datarace against last write. */
685 modelclock_t writeClock = WRITEVECTOR(shadowval);
686 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
688 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
689 /* We have a datarace */
690 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
698 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
699 if (raceset->add(race))
701 else model_free(race);