3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
19 static const ModelExecution * get_execution()
21 return model->get_execution();
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
27 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30 raceset = new RaceSet();
33 void * table_calloc(size_t size)
35 if ((((char *)memory_base) + size) > memory_top) {
36 return snapshot_calloc(size, 1);
38 void *tmp = memory_base;
39 memory_base = ((char *)memory_base) + size;
44 /** This function looks up the entry in the shadow table corresponding to a
46 static uint64_t * lookupAddressEntry(const void *address)
48 struct ShadowTable *currtable = root;
50 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51 if (currtable == NULL) {
52 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
56 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57 if (basetable == NULL) {
58 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
60 return &basetable->array[((uintptr_t)address) & MASK16BIT];
64 bool hasNonAtomicStore(const void *address) {
65 uint64_t * shadow = lookupAddressEntry(address);
66 uint64_t shadowval = *shadow;
67 if (ISSHORTRECORD(shadowval)) {
68 //Do we have a non atomic write with a non-zero clock
69 return ((WRITEVECTOR(shadowval) != 0) && !(ATOMICMASK & shadowval));
73 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74 return !record->isAtomic && record->writeClock != 0;
78 void setAtomicStoreFlag(const void *address) {
79 uint64_t * shadow = lookupAddressEntry(address);
80 uint64_t shadowval = *shadow;
81 if (ISSHORTRECORD(shadowval)) {
82 *shadow = shadowval | ATOMICMASK;
86 struct RaceRecord *record = (struct RaceRecord *)shadowval;
91 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
92 uint64_t * shadow = lookupAddressEntry(address);
93 uint64_t shadowval = *shadow;
94 if (ISSHORTRECORD(shadowval)) {
95 //Do we have a non atomic write with a non-zero clock
96 *thread = WRTHREADID(shadowval);
97 *clock = WRITEVECTOR(shadowval);
99 struct RaceRecord *record = (struct RaceRecord *)shadowval;
100 *thread = record->writeThread;
101 *clock = record->writeClock;
106 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
107 * to check the potential for a data race.
108 * @param clock1 The current clock vector
109 * @param tid1 The current thread; paired with clock1
110 * @param clock2 The clock value for the potentially-racing action
111 * @param tid2 The thread ID for the potentially-racing action
112 * @return true if the current clock allows a race with the event at clock2/tid2
114 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
115 modelclock_t clock2, thread_id_t tid2)
117 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
121 * Expands a record from the compact form to the full form. This is
122 * necessary for multiple readers or for very large thread ids or time
124 static void expandRecord(uint64_t *shadow)
126 uint64_t shadowval = *shadow;
128 modelclock_t readClock = READVECTOR(shadowval);
129 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
130 modelclock_t writeClock = WRITEVECTOR(shadowval);
131 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
133 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
134 record->writeThread = writeThread;
135 record->writeClock = writeClock;
137 if (readClock != 0) {
138 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
139 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
140 record->numReads = 1;
141 ASSERT(readThread >= 0);
142 record->thread[0] = readThread;
143 record->readClock[0] = readClock;
145 if (shadowval & ATOMICMASK)
146 record->isAtomic = 1;
147 *shadow = (uint64_t) record;
150 #define FIRST_STACK_FRAME 2
152 unsigned int race_hash(struct DataRace *race) {
153 unsigned int hash = 0;
154 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
155 hash ^= ((uintptr_t)race->backtrace[i]);
156 hash = (hash >> 3) | (hash << 29);
162 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
163 if (r1->numframes != r2->numframes)
165 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
166 if (r1->backtrace[i] != r2->backtrace[i])
172 /** This function is called when we detect a data race.*/
173 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
175 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
176 race->oldthread = oldthread;
177 race->oldclock = oldclock;
178 race->isoldwrite = isoldwrite;
179 race->newaction = newaction;
180 race->isnewwrite = isnewwrite;
181 race->address = address;
186 * @brief Assert a data race
188 * Asserts a data race which is currently realized, causing the execution to
189 * end and stashing a message in the model-checker's bug list
191 * @param race The race to report
193 void assert_race(struct DataRace *race)
195 model_print("Race detected at location: \n");
196 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
197 model_print("\nData race detected @ address %p:\n"
198 " Access 1: %5s in thread %2d @ clock %3u\n"
199 " Access 2: %5s in thread %2d @ clock %3u\n\n",
201 race->isoldwrite ? "write" : "read",
202 id_to_int(race->oldthread),
204 race->isnewwrite ? "write" : "read",
205 id_to_int(race->newaction->get_tid()),
206 race->newaction->get_seq_number()
210 /** This function does race detection for a write on an expanded record. */
211 struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
213 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
214 struct DataRace * race = NULL;
216 /* Check for datarace against last read. */
218 for (int i = 0;i < record->numReads;i++) {
219 modelclock_t readClock = record->readClock[i];
220 thread_id_t readThread = record->thread[i];
222 /* Note that readClock can't actuall be zero here, so it could be
225 if (clock_may_race(currClock, thread, readClock, readThread)) {
226 /* We have a datarace */
227 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
232 /* Check for datarace against last write. */
235 modelclock_t writeClock = record->writeClock;
236 thread_id_t writeThread = record->writeThread;
238 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
239 /* We have a datarace */
240 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
245 record->numReads = 0;
246 record->writeThread = thread;
247 record->isAtomic = 0;
248 modelclock_t ourClock = currClock->getClock(thread);
249 record->writeClock = ourClock;
253 /** This function does race detection on a write. */
254 void raceCheckWrite(thread_id_t thread, void *location)
256 uint64_t *shadow = lookupAddressEntry(location);
257 uint64_t shadowval = *shadow;
258 ClockVector *currClock = get_execution()->get_cv(thread);
259 if (currClock == NULL)
262 struct DataRace * race = NULL;
264 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
265 race = fullRaceCheckWrite(thread, location, shadow, currClock);
270 int threadid = id_to_int(thread);
271 modelclock_t ourClock = currClock->getClock(thread);
273 /* Thread ID is too large or clock is too large. */
274 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
275 expandRecord(shadow);
276 race = fullRaceCheckWrite(thread, location, shadow, currClock);
283 /* Check for datarace against last read. */
285 modelclock_t readClock = READVECTOR(shadowval);
286 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
288 if (clock_may_race(currClock, thread, readClock, readThread)) {
289 /* We have a datarace */
290 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
296 /* Check for datarace against last write. */
298 modelclock_t writeClock = WRITEVECTOR(shadowval);
299 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
301 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
302 /* We have a datarace */
303 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
309 *shadow = ENCODEOP(0, 0, threadid, ourClock);
314 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
315 if (raceset->add(race))
317 else model_free(race);
321 /** This function does race detection for a write on an expanded record. */
322 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
323 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
324 record->numReads = 0;
325 record->writeThread = thread;
326 modelclock_t ourClock = currClock->getClock(thread);
327 record->writeClock = ourClock;
328 record->isAtomic = 1;
331 /** This function just updates metadata on atomic write. */
332 void recordWrite(thread_id_t thread, void *location) {
333 uint64_t *shadow = lookupAddressEntry(location);
334 uint64_t shadowval = *shadow;
335 ClockVector *currClock = get_execution()->get_cv(thread);
337 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
338 fullRecordWrite(thread, location, shadow, currClock);
342 int threadid = id_to_int(thread);
343 modelclock_t ourClock = currClock->getClock(thread);
345 /* Thread ID is too large or clock is too large. */
346 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
347 expandRecord(shadow);
348 fullRecordWrite(thread, location, shadow, currClock);
352 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
357 /** This function does race detection on a read for an expanded record. */
358 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
360 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
361 struct DataRace * race = NULL;
362 /* Check for datarace against last write. */
364 modelclock_t writeClock = record->writeClock;
365 thread_id_t writeThread = record->writeThread;
367 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
368 /* We have a datarace */
369 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
372 /* Shorten vector when possible */
376 for (int i = 0;i < record->numReads;i++) {
377 modelclock_t readClock = record->readClock[i];
378 thread_id_t readThread = record->thread[i];
380 /* Note that is not really a datarace check as reads cannot
381 actually race. It is just determining that this read subsumes
382 another in the sense that either this read races or neither
383 read races. Note that readClock can't actually be zero, so it
384 could be optimized. */
386 if (clock_may_race(currClock, thread, readClock, readThread)) {
387 /* Still need this read in vector */
388 if (copytoindex != i) {
389 ASSERT(record->thread[i] >= 0);
390 record->readClock[copytoindex] = record->readClock[i];
391 record->thread[copytoindex] = record->thread[i];
397 if (__builtin_popcount(copytoindex) <= 1) {
398 if (copytoindex == 0) {
399 int newCapacity = INITCAPACITY;
400 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
401 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
402 } else if (copytoindex>=INITCAPACITY) {
403 int newCapacity = copytoindex * 2;
404 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
405 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
406 std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
407 std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
408 snapshot_free(record->readClock);
409 snapshot_free(record->thread);
410 record->readClock = newreadClock;
411 record->thread = newthread;
415 modelclock_t ourClock = currClock->getClock(thread);
418 record->thread[copytoindex] = thread;
419 record->readClock[copytoindex] = ourClock;
420 record->numReads = copytoindex + 1;
424 /** This function does race detection on a read. */
425 void raceCheckRead(thread_id_t thread, const void *location)
427 uint64_t *shadow = lookupAddressEntry(location);
428 uint64_t shadowval = *shadow;
429 ClockVector *currClock = get_execution()->get_cv(thread);
430 if (currClock == NULL)
433 struct DataRace * race = NULL;
436 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
437 race = fullRaceCheckRead(thread, location, shadow, currClock);
442 int threadid = id_to_int(thread);
443 modelclock_t ourClock = currClock->getClock(thread);
445 /* Thread ID is too large or clock is too large. */
446 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
447 expandRecord(shadow);
448 race = fullRaceCheckRead(thread, location, shadow, currClock);
452 /* Check for datarace against last write. */
454 modelclock_t writeClock = WRITEVECTOR(shadowval);
455 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
457 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
458 /* We have a datarace */
459 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
465 modelclock_t readClock = READVECTOR(shadowval);
466 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
468 if (clock_may_race(currClock, thread, readClock, readThread)) {
469 /* We don't subsume this read... Have to expand record. */
470 expandRecord(shadow);
471 fullRaceCheckRead(thread, location, shadow, currClock);
476 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
480 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
481 if (raceset->add(race))
483 else model_free(race);