3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
19 static const ModelExecution * get_execution()
21 return model->get_execution();
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
27 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30 raceset = new RaceSet();
33 void * table_calloc(size_t size)
35 if ((((char *)memory_base) + size) > memory_top) {
36 return snapshot_calloc(size, 1);
38 void *tmp = memory_base;
39 memory_base = ((char *)memory_base) + size;
44 /** This function looks up the entry in the shadow table corresponding to a
46 static uint64_t * lookupAddressEntry(const void *address)
48 struct ShadowTable *currtable = root;
50 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51 if (currtable == NULL) {
52 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
56 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57 if (basetable == NULL) {
58 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
60 return &basetable->array[((uintptr_t)address) & MASK16BIT];
64 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
65 * to check the potential for a data race.
66 * @param clock1 The current clock vector
67 * @param tid1 The current thread; paired with clock1
68 * @param clock2 The clock value for the potentially-racing action
69 * @param tid2 The thread ID for the potentially-racing action
70 * @return true if the current clock allows a race with the event at clock2/tid2
72 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
73 modelclock_t clock2, thread_id_t tid2)
75 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
79 * Expands a record from the compact form to the full form. This is
80 * necessary for multiple readers or for very large thread ids or time
82 static void expandRecord(uint64_t *shadow)
84 uint64_t shadowval = *shadow;
86 modelclock_t readClock = READVECTOR(shadowval);
87 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
88 modelclock_t writeClock = WRITEVECTOR(shadowval);
89 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
91 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
92 record->writeThread = writeThread;
93 record->writeClock = writeClock;
96 record->capacity = INITCAPACITY;
97 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * record->capacity);
98 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * record->capacity);
100 ASSERT(readThread >= 0);
101 record->thread[0] = readThread;
102 record->readClock[0] = readClock;
104 *shadow = (uint64_t) record;
107 #define FIRST_STACK_FRAME 2
109 unsigned int race_hash(struct DataRace *race) {
110 unsigned int hash = 0;
111 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
112 hash ^= ((uintptr_t)race->backtrace[i]);
113 hash = (hash >> 3) | (hash << 29);
119 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
120 if (r1->numframes != r2->numframes)
122 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
123 if (r1->backtrace[i] != r2->backtrace[i])
129 /** This function is called when we detect a data race.*/
130 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
132 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
133 race->oldthread = oldthread;
134 race->oldclock = oldclock;
135 race->isoldwrite = isoldwrite;
136 race->newaction = newaction;
137 race->isnewwrite = isnewwrite;
138 race->address = address;
143 * @brief Assert a data race
145 * Asserts a data race which is currently realized, causing the execution to
146 * end and stashing a message in the model-checker's bug list
148 * @param race The race to report
150 void assert_race(struct DataRace *race)
152 model_print("At location: \n");
153 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
154 model_print("Data race detected @ address %p:\n"
155 " Access 1: %5s in thread %2d @ clock %3u\n"
156 " Access 2: %5s in thread %2d @ clock %3u",
158 race->isoldwrite ? "write" : "read",
159 id_to_int(race->oldthread),
161 race->isnewwrite ? "write" : "read",
162 id_to_int(race->newaction->get_tid()),
163 race->newaction->get_seq_number()
167 /** This function does race detection for a write on an expanded record. */
168 struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
170 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
171 struct DataRace * race = NULL;
173 /* Check for datarace against last read. */
175 for (int i = 0;i < record->numReads;i++) {
176 modelclock_t readClock = record->readClock[i];
177 thread_id_t readThread = record->thread[i];
179 /* Note that readClock can't actuall be zero here, so it could be
182 if (clock_may_race(currClock, thread, readClock, readThread)) {
183 /* We have a datarace */
184 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
189 /* Check for datarace against last write. */
192 modelclock_t writeClock = record->writeClock;
193 thread_id_t writeThread = record->writeThread;
195 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
196 /* We have a datarace */
197 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
202 record->numReads = 0;
203 record->writeThread = thread;
204 modelclock_t ourClock = currClock->getClock(thread);
205 record->writeClock = ourClock;
209 /** This function does race detection on a write. */
210 void raceCheckWrite(thread_id_t thread, void *location)
212 uint64_t *shadow = lookupAddressEntry(location);
213 uint64_t shadowval = *shadow;
214 ClockVector *currClock = get_execution()->get_cv(thread);
215 struct DataRace * race = NULL;
217 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
218 race = fullRaceCheckWrite(thread, location, shadow, currClock);
223 int threadid = id_to_int(thread);
224 modelclock_t ourClock = currClock->getClock(thread);
226 /* Thread ID is too large or clock is too large. */
227 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
228 expandRecord(shadow);
229 race = fullRaceCheckWrite(thread, location, shadow, currClock);
236 /* Check for datarace against last read. */
238 modelclock_t readClock = READVECTOR(shadowval);
239 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
241 if (clock_may_race(currClock, thread, readClock, readThread)) {
242 /* We have a datarace */
243 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
249 /* Check for datarace against last write. */
251 modelclock_t writeClock = WRITEVECTOR(shadowval);
252 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
254 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
255 /* We have a datarace */
256 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
262 *shadow = ENCODEOP(0, 0, threadid, ourClock);
267 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
268 if (raceset->add(race))
270 else model_free(race);
273 /** This function does race detection on a read for an expanded record. */
274 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
276 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
277 struct DataRace * race = NULL;
278 /* Check for datarace against last write. */
280 modelclock_t writeClock = record->writeClock;
281 thread_id_t writeThread = record->writeThread;
283 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
284 /* We have a datarace */
285 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
288 /* Shorten vector when possible */
292 for (int i = 0;i < record->numReads;i++) {
293 modelclock_t readClock = record->readClock[i];
294 thread_id_t readThread = record->thread[i];
296 /* Note that is not really a datarace check as reads cannot
297 actually race. It is just determining that this read subsumes
298 another in the sense that either this read races or neither
299 read races. Note that readClock can't actually be zero, so it
300 could be optimized. */
302 if (clock_may_race(currClock, thread, readClock, readThread)) {
303 /* Still need this read in vector */
304 if (copytoindex != i) {
305 ASSERT(record->thread[i] >= 0);
306 record->readClock[copytoindex] = record->readClock[i];
307 record->thread[copytoindex] = record->thread[i];
313 if (copytoindex >= record->capacity) {
314 if (record->capacity == 0) {
315 int newCapacity = INITCAPACITY;
316 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
317 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
318 record->capacity = newCapacity;
320 int newCapacity = record->capacity * 2;
321 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
322 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
323 std::memcpy(newthread, record->thread, record->capacity * sizeof(thread_id_t));
324 std::memcpy(newreadClock, record->readClock, record->capacity * sizeof(modelclock_t));
325 snapshot_free(record->readClock);
326 snapshot_free(record->thread);
327 record->readClock = newreadClock;
328 record->thread = newthread;
329 record->capacity = newCapacity;
333 modelclock_t ourClock = currClock->getClock(thread);
336 record->thread[copytoindex] = thread;
337 record->readClock[copytoindex] = ourClock;
338 record->numReads = copytoindex + 1;
342 /** This function does race detection on a read. */
343 void raceCheckRead(thread_id_t thread, const void *location)
345 uint64_t *shadow = lookupAddressEntry(location);
346 uint64_t shadowval = *shadow;
347 ClockVector *currClock = get_execution()->get_cv(thread);
348 struct DataRace * race = NULL;
351 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
352 race = fullRaceCheckRead(thread, location, shadow, currClock);
357 int threadid = id_to_int(thread);
358 modelclock_t ourClock = currClock->getClock(thread);
360 /* Thread ID is too large or clock is too large. */
361 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
362 expandRecord(shadow);
363 race = fullRaceCheckRead(thread, location, shadow, currClock);
367 /* Check for datarace against last write. */
369 modelclock_t writeClock = WRITEVECTOR(shadowval);
370 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
372 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
373 /* We have a datarace */
374 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
380 modelclock_t readClock = READVECTOR(shadowval);
381 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
383 if (clock_may_race(currClock, thread, readClock, readThread)) {
384 /* We don't subsume this read... Have to expand record. */
385 expandRecord(shadow);
386 fullRaceCheckRead(thread, location, shadow, currClock);
391 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock);
395 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
396 if (raceset->add(race))
398 else model_free(race);