3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
19 static const ModelExecution * get_execution()
21 return model->get_execution();
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
27 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30 raceset = new RaceSet();
33 void * table_calloc(size_t size)
35 if ((((char *)memory_base) + size) > memory_top) {
36 return snapshot_calloc(size, 1);
38 void *tmp = memory_base;
39 memory_base = ((char *)memory_base) + size;
44 /** This function looks up the entry in the shadow table corresponding to a
46 static uint64_t * lookupAddressEntry(const void *address)
48 struct ShadowTable *currtable = root;
50 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51 if (currtable == NULL) {
52 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
56 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57 if (basetable == NULL) {
58 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
60 return &basetable->array[((uintptr_t)address) & MASK16BIT];
64 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
65 * to check the potential for a data race.
66 * @param clock1 The current clock vector
67 * @param tid1 The current thread; paired with clock1
68 * @param clock2 The clock value for the potentially-racing action
69 * @param tid2 The thread ID for the potentially-racing action
70 * @return true if the current clock allows a race with the event at clock2/tid2
72 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
73 modelclock_t clock2, thread_id_t tid2)
75 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
79 * Expands a record from the compact form to the full form. This is
80 * necessary for multiple readers or for very large thread ids or time
82 static void expandRecord(uint64_t *shadow)
84 uint64_t shadowval = *shadow;
86 modelclock_t readClock = READVECTOR(shadowval);
87 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
88 modelclock_t writeClock = WRITEVECTOR(shadowval);
89 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
91 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
92 record->writeThread = writeThread;
93 record->writeClock = writeClock;
96 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
97 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
99 ASSERT(readThread >= 0);
100 record->thread[0] = readThread;
101 record->readClock[0] = readClock;
103 if (shadowval & ATOMICMASK)
104 record->isAtomic = 1;
105 *shadow = (uint64_t) record;
108 #define FIRST_STACK_FRAME 2
110 unsigned int race_hash(struct DataRace *race) {
111 unsigned int hash = 0;
112 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
113 hash ^= ((uintptr_t)race->backtrace[i]);
114 hash = (hash >> 3) | (hash << 29);
120 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
121 if (r1->numframes != r2->numframes)
123 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
124 if (r1->backtrace[i] != r2->backtrace[i])
130 /** This function is called when we detect a data race.*/
131 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
133 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
134 race->oldthread = oldthread;
135 race->oldclock = oldclock;
136 race->isoldwrite = isoldwrite;
137 race->newaction = newaction;
138 race->isnewwrite = isnewwrite;
139 race->address = address;
144 * @brief Assert a data race
146 * Asserts a data race which is currently realized, causing the execution to
147 * end and stashing a message in the model-checker's bug list
149 * @param race The race to report
151 void assert_race(struct DataRace *race)
153 model_print("At location: \n");
154 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
155 model_print("Data race detected @ address %p:\n"
156 " Access 1: %5s in thread %2d @ clock %3u\n"
157 " Access 2: %5s in thread %2d @ clock %3u",
159 race->isoldwrite ? "write" : "read",
160 id_to_int(race->oldthread),
162 race->isnewwrite ? "write" : "read",
163 id_to_int(race->newaction->get_tid()),
164 race->newaction->get_seq_number()
168 /** This function does race detection for a write on an expanded record. */
169 struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
171 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
172 struct DataRace * race = NULL;
174 /* Check for datarace against last read. */
176 for (int i = 0;i < record->numReads;i++) {
177 modelclock_t readClock = record->readClock[i];
178 thread_id_t readThread = record->thread[i];
180 /* Note that readClock can't actuall be zero here, so it could be
183 if (clock_may_race(currClock, thread, readClock, readThread)) {
184 /* We have a datarace */
185 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
190 /* Check for datarace against last write. */
193 modelclock_t writeClock = record->writeClock;
194 thread_id_t writeThread = record->writeThread;
196 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
197 /* We have a datarace */
198 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
203 record->numReads = 0;
204 record->writeThread = thread;
205 record->isAtomic = 0;
206 modelclock_t ourClock = currClock->getClock(thread);
207 record->writeClock = ourClock;
211 /** This function does race detection on a write. */
212 void raceCheckWrite(thread_id_t thread, void *location)
214 uint64_t *shadow = lookupAddressEntry(location);
215 uint64_t shadowval = *shadow;
216 ClockVector *currClock = get_execution()->get_cv(thread);
217 struct DataRace * race = NULL;
219 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
220 race = fullRaceCheckWrite(thread, location, shadow, currClock);
225 int threadid = id_to_int(thread);
226 modelclock_t ourClock = currClock->getClock(thread);
228 /* Thread ID is too large or clock is too large. */
229 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
230 expandRecord(shadow);
231 race = fullRaceCheckWrite(thread, location, shadow, currClock);
238 /* Check for datarace against last read. */
240 modelclock_t readClock = READVECTOR(shadowval);
241 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
243 if (clock_may_race(currClock, thread, readClock, readThread)) {
244 /* We have a datarace */
245 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
251 /* Check for datarace against last write. */
253 modelclock_t writeClock = WRITEVECTOR(shadowval);
254 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
256 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
257 /* We have a datarace */
258 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
264 *shadow = ENCODEOP(0, 0, threadid, ourClock);
269 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
270 if (raceset->add(race))
272 else model_free(race);
276 /** This function does race detection for a write on an expanded record. */
277 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
278 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
279 record->numReads = 0;
280 record->writeThread = thread;
281 modelclock_t ourClock = currClock->getClock(thread);
282 record->writeClock = ourClock;
283 record->isAtomic = 1;
286 /** This function just updates metadata on atomic write. */
287 void recordWrite(thread_id_t thread, void *location) {
288 uint64_t *shadow = lookupAddressEntry(location);
289 uint64_t shadowval = *shadow;
290 ClockVector *currClock = get_execution()->get_cv(thread);
292 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
293 fullRecordWrite(thread, location, shadow, currClock);
297 int threadid = id_to_int(thread);
298 modelclock_t ourClock = currClock->getClock(thread);
300 /* Thread ID is too large or clock is too large. */
301 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
302 expandRecord(shadow);
303 fullRecordWrite(thread, location, shadow, currClock);
307 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
312 /** This function does race detection on a read for an expanded record. */
313 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
315 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
316 struct DataRace * race = NULL;
317 /* Check for datarace against last write. */
319 modelclock_t writeClock = record->writeClock;
320 thread_id_t writeThread = record->writeThread;
322 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
323 /* We have a datarace */
324 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
327 /* Shorten vector when possible */
331 for (int i = 0;i < record->numReads;i++) {
332 modelclock_t readClock = record->readClock[i];
333 thread_id_t readThread = record->thread[i];
335 /* Note that is not really a datarace check as reads cannot
336 actually race. It is just determining that this read subsumes
337 another in the sense that either this read races or neither
338 read races. Note that readClock can't actually be zero, so it
339 could be optimized. */
341 if (clock_may_race(currClock, thread, readClock, readThread)) {
342 /* Still need this read in vector */
343 if (copytoindex != i) {
344 ASSERT(record->thread[i] >= 0);
345 record->readClock[copytoindex] = record->readClock[i];
346 record->thread[copytoindex] = record->thread[i];
352 if (__builtin_popcount(copytoindex) <= 1) {
353 if (copytoindex == 0) {
354 int newCapacity = INITCAPACITY;
355 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
356 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
357 } else if (copytoindex>=INITCAPACITY) {
358 int newCapacity = copytoindex * 2;
359 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
360 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
361 std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
362 std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
363 snapshot_free(record->readClock);
364 snapshot_free(record->thread);
365 record->readClock = newreadClock;
366 record->thread = newthread;
370 modelclock_t ourClock = currClock->getClock(thread);
373 record->thread[copytoindex] = thread;
374 record->readClock[copytoindex] = ourClock;
375 record->numReads = copytoindex + 1;
379 /** This function does race detection on a read. */
380 void raceCheckRead(thread_id_t thread, const void *location)
382 uint64_t *shadow = lookupAddressEntry(location);
383 uint64_t shadowval = *shadow;
384 ClockVector *currClock = get_execution()->get_cv(thread);
385 struct DataRace * race = NULL;
388 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
389 race = fullRaceCheckRead(thread, location, shadow, currClock);
394 int threadid = id_to_int(thread);
395 modelclock_t ourClock = currClock->getClock(thread);
397 /* Thread ID is too large or clock is too large. */
398 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
399 expandRecord(shadow);
400 race = fullRaceCheckRead(thread, location, shadow, currClock);
404 /* Check for datarace against last write. */
406 modelclock_t writeClock = WRITEVECTOR(shadowval);
407 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
409 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
410 /* We have a datarace */
411 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
417 modelclock_t readClock = READVECTOR(shadowval);
418 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
420 if (clock_may_race(currClock, thread, readClock, readThread)) {
421 /* We don't subsume this read... Have to expand record. */
422 expandRecord(shadow);
423 fullRaceCheckRead(thread, location, shadow, currClock);
428 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
432 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
433 if (raceset->add(race))
435 else model_free(race);