3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
12 static struct ShadowTable *root;
13 SnapVector<struct DataRace *> unrealizedraces;
14 static void *memory_base;
15 static void *memory_top;
17 static const ModelExecution * get_execution()
19 return model->get_execution();
22 /** This function initialized the data race detector. */
23 void initRaceDetector()
25 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
26 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
27 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30 void * table_calloc(size_t size)
32 if ((((char *)memory_base) + size) > memory_top) {
33 return snapshot_calloc(size, 1);
35 void *tmp = memory_base;
36 memory_base = ((char *)memory_base) + size;
41 /** This function looks up the entry in the shadow table corresponding to a
43 static uint64_t * lookupAddressEntry(const void *address)
45 struct ShadowTable *currtable = root;
47 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
48 if (currtable == NULL) {
49 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
53 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
54 if (basetable == NULL) {
55 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
57 return &basetable->array[((uintptr_t)address) & MASK16BIT];
61 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
62 * to check the potential for a data race.
63 * @param clock1 The current clock vector
64 * @param tid1 The current thread; paired with clock1
65 * @param clock2 The clock value for the potentially-racing action
66 * @param tid2 The thread ID for the potentially-racing action
67 * @return true if the current clock allows a race with the event at clock2/tid2
69 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
70 modelclock_t clock2, thread_id_t tid2)
72 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
76 * Expands a record from the compact form to the full form. This is
77 * necessary for multiple readers or for very large thread ids or time
79 static void expandRecord(uint64_t *shadow)
81 uint64_t shadowval = *shadow;
83 modelclock_t readClock = READVECTOR(shadowval);
84 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
85 modelclock_t writeClock = WRITEVECTOR(shadowval);
86 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
88 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
89 record->writeThread = writeThread;
90 record->writeClock = writeClock;
93 record->capacity = INITCAPACITY;
94 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * record->capacity);
95 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * record->capacity);
97 record->thread[0] = readThread;
98 record->readClock[0] = readClock;
100 *shadow = (uint64_t) record;
103 /** This function is called when we detect a data race.*/
104 static void reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
106 struct DataRace *race = (struct DataRace *)snapshot_malloc(sizeof(struct DataRace));
107 race->oldthread = oldthread;
108 race->oldclock = oldclock;
109 race->isoldwrite = isoldwrite;
110 race->newaction = newaction;
111 race->isnewwrite = isnewwrite;
112 race->address = address;
113 unrealizedraces.push_back(race);
115 /* If the race is realized, bail out now. */
116 if (checkDataRaces())
117 model->switch_to_master(NULL);
121 * @brief Check and report data races
123 * If the trace is feasible (a feasible prefix), clear out the list of
124 * unrealized data races, asserting any realized ones as execution bugs so that
125 * the model-checker will end the execution.
127 * @return True if any data races were realized
129 bool checkDataRaces()
131 if (get_execution()->isfeasibleprefix()) {
132 bool race_asserted = false;
133 /* Prune the non-racing unrealized dataraces */
134 for (unsigned i = 0; i < unrealizedraces.size(); i++) {
135 struct DataRace *race = unrealizedraces[i];
136 if (clock_may_race(race->newaction->get_cv(), race->newaction->get_tid(), race->oldclock, race->oldthread)) {
138 race_asserted = true;
142 unrealizedraces.clear();
143 return race_asserted;
149 * @brief Assert a data race
151 * Asserts a data race which is currently realized, causing the execution to
152 * end and stashing a message in the model-checker's bug list
154 * @param race The race to report
156 void assert_race(struct DataRace *race)
159 "Data race detected @ address %p:\n"
160 " Access 1: %5s in thread %2d @ clock %3u\n"
161 " Access 2: %5s in thread %2d @ clock %3u",
163 race->isoldwrite ? "write" : "read",
164 id_to_int(race->oldthread),
166 race->isnewwrite ? "write" : "read",
167 id_to_int(race->newaction->get_tid()),
168 race->newaction->get_seq_number()
172 /** This function does race detection for a write on an expanded record. */
173 void fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
175 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
177 /* Check for datarace against last read. */
179 for (int i = 0; i < record->numReads; i++) {
180 modelclock_t readClock = record->readClock[i];
181 thread_id_t readThread = record->thread[i];
183 /* Note that readClock can't actuall be zero here, so it could be
186 if (clock_may_race(currClock, thread, readClock, readThread)) {
187 /* We have a datarace */
188 reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
192 /* Check for datarace against last write. */
194 modelclock_t writeClock = record->writeClock;
195 thread_id_t writeThread = record->writeThread;
197 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
198 /* We have a datarace */
199 reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
202 record->numReads = 0;
203 record->writeThread = thread;
204 modelclock_t ourClock = currClock->getClock(thread);
205 record->writeClock = ourClock;
208 /** This function does race detection on a write. */
209 void raceCheckWrite(thread_id_t thread, void *location)
211 uint64_t *shadow = lookupAddressEntry(location);
212 uint64_t shadowval = *shadow;
213 ClockVector *currClock = get_execution()->get_cv(thread);
216 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
217 fullRaceCheckWrite(thread, location, shadow, currClock);
221 int threadid = id_to_int(thread);
222 modelclock_t ourClock = currClock->getClock(thread);
224 /* Thread ID is too large or clock is too large. */
225 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
226 expandRecord(shadow);
227 fullRaceCheckWrite(thread, location, shadow, currClock);
231 /* Check for datarace against last read. */
233 modelclock_t readClock = READVECTOR(shadowval);
234 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
236 if (clock_may_race(currClock, thread, readClock, readThread)) {
237 /* We have a datarace */
238 reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
241 /* Check for datarace against last write. */
243 modelclock_t writeClock = WRITEVECTOR(shadowval);
244 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
246 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
247 /* We have a datarace */
248 reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
250 *shadow = ENCODEOP(0, 0, threadid, ourClock);
253 /** This function does race detection on a read for an expanded record. */
254 void fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
256 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
258 /* Check for datarace against last write. */
260 modelclock_t writeClock = record->writeClock;
261 thread_id_t writeThread = record->writeThread;
263 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
264 /* We have a datarace */
265 reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
268 /* Shorten vector when possible */
272 for (int i = 0; i < record->numReads; i++) {
273 modelclock_t readClock = record->readClock[i];
274 thread_id_t readThread = record->thread[i];
276 /* Note that is not really a datarace check as reads cannott
277 actually race. It is just determining that this read subsumes
278 another in the sense that either this read races or neither
279 read races. Note that readClock can't actually be zero, so it
280 could be optimized. */
282 if (clock_may_race(currClock, thread, readClock, readThread)) {
283 /* Still need this read in vector */
284 if (copytoindex != i) {
285 record->readClock[copytoindex] = record->readClock[i];
286 record->thread[copytoindex] = record->thread[i];
292 if (copytoindex >= record->capacity) {
293 int newCapacity = record->capacity * 2;
294 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
295 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
296 std::memcpy(newthread, record->thread, record->capacity * sizeof(thread_id_t));
297 std::memcpy(newreadClock, record->readClock, record->capacity * sizeof(modelclock_t));
298 snapshot_free(record->readClock);
299 snapshot_free(record->thread);
300 record->readClock = newreadClock;
301 record->thread = newthread;
302 record->capacity = newCapacity;
305 modelclock_t ourClock = currClock->getClock(thread);
307 record->thread[copytoindex] = thread;
308 record->readClock[copytoindex] = ourClock;
309 record->numReads = copytoindex + 1;
312 /** This function does race detection on a read. */
313 void raceCheckRead(thread_id_t thread, const void *location)
315 uint64_t *shadow = lookupAddressEntry(location);
316 uint64_t shadowval = *shadow;
317 ClockVector *currClock = get_execution()->get_cv(thread);
320 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
321 fullRaceCheckRead(thread, location, shadow, currClock);
325 int threadid = id_to_int(thread);
326 modelclock_t ourClock = currClock->getClock(thread);
328 /* Thread ID is too large or clock is too large. */
329 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
330 expandRecord(shadow);
331 fullRaceCheckRead(thread, location, shadow, currClock);
335 /* Check for datarace against last write. */
337 modelclock_t writeClock = WRITEVECTOR(shadowval);
338 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
340 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
341 /* We have a datarace */
342 reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
345 modelclock_t readClock = READVECTOR(shadowval);
346 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
348 if (clock_may_race(currClock, thread, readClock, readThread)) {
349 /* We don't subsume this read... Have to expand record. */
350 expandRecord(shadow);
351 fullRaceCheckRead(thread, location, shadow, currClock);
355 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock);