fix memory leak
[c11tester.git] / datarace.cc
1 #include "datarace.h"
2 #include "model.h"
3 #include "threads-model.h"
4 #include <stdio.h>
5 #include <cstring>
6 #include "mymemory.h"
7 #include "clockvector.h"
8 #include "config.h"
9 #include "action.h"
10 #include "execution.h"
11 #include "stl-model.h"
12 #include <execinfo.h>
13
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
18
19 static const ModelExecution * get_execution()
20 {
21         return model->get_execution();
22 }
23
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
26 {
27         root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28         memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29         memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30         raceset = new RaceSet();
31 }
32
33 void * table_calloc(size_t size)
34 {
35         if ((((char *)memory_base) + size) > memory_top) {
36                 return snapshot_calloc(size, 1);
37         } else {
38                 void *tmp = memory_base;
39                 memory_base = ((char *)memory_base) + size;
40                 return tmp;
41         }
42 }
43
44 /** This function looks up the entry in the shadow table corresponding to a
45  * given address.*/
46 static uint64_t * lookupAddressEntry(const void *address)
47 {
48         struct ShadowTable *currtable = root;
49 #if BIT48
50         currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51         if (currtable == NULL) {
52                 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
53         }
54 #endif
55
56         struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57         if (basetable == NULL) {
58                 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
59         }
60         return &basetable->array[((uintptr_t)address) & MASK16BIT];
61 }
62
63
64 bool hasNonAtomicStore(const void *address) {
65         uint64_t * shadow = lookupAddressEntry(address);
66         uint64_t shadowval = *shadow;
67         if (ISSHORTRECORD(shadowval)) {
68                 //Do we have a non atomic write with a non-zero clock
69                 return !(ATOMICMASK & shadowval);
70         } else {
71                 if (shadowval == 0)
72                         return true;
73                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74                 return !record->isAtomic;
75         }
76 }
77
78 void setAtomicStoreFlag(const void *address) {
79         uint64_t * shadow = lookupAddressEntry(address);
80         uint64_t shadowval = *shadow;
81         if (ISSHORTRECORD(shadowval)) {
82                 *shadow = shadowval | ATOMICMASK;
83         } else {
84                 if (shadowval == 0) {
85                         *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
86                         return;
87                 }
88                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
89                 record->isAtomic = 1;
90         }
91 }
92
93 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
94         uint64_t * shadow = lookupAddressEntry(address);
95         uint64_t shadowval = *shadow;
96         if (ISSHORTRECORD(shadowval) || shadowval == 0) {
97                 //Do we have a non atomic write with a non-zero clock
98                 *thread = WRTHREADID(shadowval);
99                 *clock = WRITEVECTOR(shadowval);
100         } else {
101                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
102                 *thread = record->writeThread;
103                 *clock = record->writeClock;
104         }
105 }
106
107 /**
108  * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
109  * to check the potential for a data race.
110  * @param clock1 The current clock vector
111  * @param tid1 The current thread; paired with clock1
112  * @param clock2 The clock value for the potentially-racing action
113  * @param tid2 The thread ID for the potentially-racing action
114  * @return true if the current clock allows a race with the event at clock2/tid2
115  */
116 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
117                                                                                                          modelclock_t clock2, thread_id_t tid2)
118 {
119         return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
120 }
121
122 /**
123  * Expands a record from the compact form to the full form.  This is
124  * necessary for multiple readers or for very large thread ids or time
125  * stamps. */
126 static void expandRecord(uint64_t *shadow)
127 {
128         uint64_t shadowval = *shadow;
129
130         modelclock_t readClock = READVECTOR(shadowval);
131         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
132         modelclock_t writeClock = WRITEVECTOR(shadowval);
133         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
134
135         struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
136         record->writeThread = writeThread;
137         record->writeClock = writeClock;
138
139         if (readClock != 0) {
140                 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
141                 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
142                 record->numReads = 1;
143                 ASSERT(readThread >= 0);
144                 record->thread[0] = readThread;
145                 record->readClock[0] = readClock;
146         } else {
147                 record->thread = NULL;
148         }
149         if (shadowval & ATOMICMASK)
150                 record->isAtomic = 1;
151         *shadow = (uint64_t) record;
152 }
153
154 #define FIRST_STACK_FRAME 2
155
156 unsigned int race_hash(struct DataRace *race) {
157         unsigned int hash = 0;
158         for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
159                 hash ^= ((uintptr_t)race->backtrace[i]);
160                 hash = (hash >> 3) | (hash << 29);
161         }
162         return hash;
163 }
164
165 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
166         if (r1->numframes != r2->numframes)
167                 return false;
168         for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
169                 if (r1->backtrace[i] != r2->backtrace[i])
170                         return false;
171         }
172         return true;
173 }
174
175 /** This function is called when we detect a data race.*/
176 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
177 {
178         struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
179         race->oldthread = oldthread;
180         race->oldclock = oldclock;
181         race->isoldwrite = isoldwrite;
182         race->newaction = newaction;
183         race->isnewwrite = isnewwrite;
184         race->address = address;
185         return race;
186 }
187
188 /**
189  * @brief Assert a data race
190  *
191  * Asserts a data race which is currently realized, causing the execution to
192  * end and stashing a message in the model-checker's bug list
193  *
194  * @param race The race to report
195  */
196 void assert_race(struct DataRace *race)
197 {
198         model_print("Race detected at location: \n");
199         backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
200         model_print("\nData race detected @ address %p:\n"
201                                                         "    Access 1: %5s in thread %2d @ clock %3u\n"
202                                                         "    Access 2: %5s in thread %2d @ clock %3u\n\n",
203                                                         race->address,
204                                                         race->isoldwrite ? "write" : "read",
205                                                         id_to_int(race->oldthread),
206                                                         race->oldclock,
207                                                         race->isnewwrite ? "write" : "read",
208                                                         id_to_int(race->newaction->get_tid()),
209                                                         race->newaction->get_seq_number()
210                                                         );
211 }
212
213 /** This function does race detection for a write on an expanded record. */
214 struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
215 {
216         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
217         struct DataRace * race = NULL;
218
219         /* Check for datarace against last read. */
220
221         for (int i = 0;i < record->numReads;i++) {
222                 modelclock_t readClock = record->readClock[i];
223                 thread_id_t readThread = record->thread[i];
224
225                 /* Note that readClock can't actuall be zero here, so it could be
226                          optimized. */
227
228                 if (clock_may_race(currClock, thread, readClock, readThread)) {
229                         /* We have a datarace */
230                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
231                         goto Exit;
232                 }
233         }
234
235         /* Check for datarace against last write. */
236         {
237                 modelclock_t writeClock = record->writeClock;
238                 thread_id_t writeThread = record->writeThread;
239
240                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
241                         /* We have a datarace */
242                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
243                         goto Exit;
244                 }
245         }
246 Exit:
247         record->numReads = 0;
248         record->writeThread = thread;
249         record->isAtomic = 0;
250         modelclock_t ourClock = currClock->getClock(thread);
251         record->writeClock = ourClock;
252         return race;
253 }
254
255 /** This function does race detection on a write. */
256 void raceCheckWrite(thread_id_t thread, void *location)
257 {
258         uint64_t *shadow = lookupAddressEntry(location);
259         uint64_t shadowval = *shadow;
260         ClockVector *currClock = get_execution()->get_cv(thread);
261         if (currClock == NULL)
262                 return;
263
264         struct DataRace * race = NULL;
265         /* Do full record */
266         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
267                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
268                 goto Exit;
269         }
270
271         {
272                 int threadid = id_to_int(thread);
273                 modelclock_t ourClock = currClock->getClock(thread);
274
275                 /* Thread ID is too large or clock is too large. */
276                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
277                         expandRecord(shadow);
278                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
279                         goto Exit;
280                 }
281
282                 {
283                         /* Check for datarace against last read. */
284                         modelclock_t readClock = READVECTOR(shadowval);
285                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
286
287                         if (clock_may_race(currClock, thread, readClock, readThread)) {
288                                 /* We have a datarace */
289                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
290                                 goto ShadowExit;
291                         }
292                 }
293
294                 {
295                         /* Check for datarace against last write. */
296                         modelclock_t writeClock = WRITEVECTOR(shadowval);
297                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
298
299                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
300                                 /* We have a datarace */
301                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
302                                 goto ShadowExit;
303                         }
304                 }
305
306 ShadowExit:
307                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
308         }
309
310 Exit:
311         if (race) {
312                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
313                 if (raceset->add(race))
314                         assert_race(race);
315                 else model_free(race);
316         }
317 }
318
319 /** This function does race detection for a write on an expanded record. */
320 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
321 {
322         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
323         struct DataRace * race = NULL;
324
325         if (record->isAtomic)
326                 goto Exit;
327
328         /* Check for datarace against last read. */
329
330         for (int i = 0;i < record->numReads;i++) {
331                 modelclock_t readClock = record->readClock[i];
332                 thread_id_t readThread = record->thread[i];
333
334                 /* Note that readClock can't actuall be zero here, so it could be
335                          optimized. */
336
337                 if (clock_may_race(currClock, thread, readClock, readThread)) {
338                         /* We have a datarace */
339                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
340                         goto Exit;
341                 }
342         }
343
344         /* Check for datarace against last write. */
345
346         {
347                 modelclock_t writeClock = record->writeClock;
348                 thread_id_t writeThread = record->writeThread;
349
350                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
351                         /* We have a datarace */
352                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
353                         goto Exit;
354                 }
355         }
356 Exit:
357         record->numReads = 0;
358         record->writeThread = thread;
359         record->isAtomic = 1;
360         modelclock_t ourClock = currClock->getClock(thread);
361         record->writeClock = ourClock;
362         return race;
363 }
364
365 /** This function does race detection on a write. */
366 void atomraceCheckWrite(thread_id_t thread, void *location)
367 {
368         uint64_t *shadow = lookupAddressEntry(location);
369         uint64_t shadowval = *shadow;
370         ClockVector *currClock = get_execution()->get_cv(thread);
371         if (currClock == NULL)
372                 return;
373
374         struct DataRace * race = NULL;
375         /* Do full record */
376         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
377                 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
378                 goto Exit;
379         }
380
381         {
382                 int threadid = id_to_int(thread);
383                 modelclock_t ourClock = currClock->getClock(thread);
384
385                 /* Thread ID is too large or clock is too large. */
386                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
387                         expandRecord(shadow);
388                         race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
389                         goto Exit;
390                 }
391
392                 /* Can't race with atomic */
393                 if (shadowval & ATOMICMASK)
394                         goto ShadowExit;
395
396                 {
397                         /* Check for datarace against last read. */
398                         modelclock_t readClock = READVECTOR(shadowval);
399                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
400
401                         if (clock_may_race(currClock, thread, readClock, readThread)) {
402                                 /* We have a datarace */
403                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
404                                 goto ShadowExit;
405                         }
406                 }
407
408                 {
409                         /* Check for datarace against last write. */
410                         modelclock_t writeClock = WRITEVECTOR(shadowval);
411                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
412
413                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
414                                 /* We have a datarace */
415                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
416                                 goto ShadowExit;
417                         }
418                 }
419
420 ShadowExit:
421                 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
422         }
423
424 Exit:
425         if (race) {
426                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
427                 if (raceset->add(race))
428                         assert_race(race);
429                 else model_free(race);
430         }
431 }
432
433 /** This function does race detection for a write on an expanded record. */
434 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
435         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
436         record->numReads = 0;
437         record->writeThread = thread;
438         modelclock_t ourClock = currClock->getClock(thread);
439         record->writeClock = ourClock;
440         record->isAtomic = 1;
441 }
442
443 /** This function does race detection for a write on an expanded record. */
444 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
445         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
446         record->numReads = 0;
447         record->writeThread = thread;
448         modelclock_t ourClock = currClock->getClock(thread);
449         record->writeClock = ourClock;
450         record->isAtomic = 0;
451 }
452
453 /** This function just updates metadata on atomic write. */
454 void recordWrite(thread_id_t thread, void *location) {
455         uint64_t *shadow = lookupAddressEntry(location);
456         uint64_t shadowval = *shadow;
457         ClockVector *currClock = get_execution()->get_cv(thread);
458         /* Do full record */
459         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
460                 fullRecordWrite(thread, location, shadow, currClock);
461                 return;
462         }
463
464         int threadid = id_to_int(thread);
465         modelclock_t ourClock = currClock->getClock(thread);
466
467         /* Thread ID is too large or clock is too large. */
468         if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
469                 expandRecord(shadow);
470                 fullRecordWrite(thread, location, shadow, currClock);
471                 return;
472         }
473
474         *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
475 }
476
477 /** This function just updates metadata on atomic write. */
478 void recordCalloc(void *location, size_t size) {
479         thread_id_t thread = thread_current()->get_id();
480         for(;size != 0;size--) {
481                 uint64_t *shadow = lookupAddressEntry(location);
482                 uint64_t shadowval = *shadow;
483                 ClockVector *currClock = get_execution()->get_cv(thread);
484                 /* Do full record */
485                 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
486                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
487                         return;
488                 }
489
490                 int threadid = id_to_int(thread);
491                 modelclock_t ourClock = currClock->getClock(thread);
492
493                 /* Thread ID is too large or clock is too large. */
494                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
495                         expandRecord(shadow);
496                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
497                         return;
498                 }
499
500                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
501                 location = (void *)(((char *) location) + 1);
502         }
503 }
504
505 /** This function does race detection on a read for an expanded record. */
506 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
507 {
508         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
509         struct DataRace * race = NULL;
510         /* Check for datarace against last write. */
511
512         modelclock_t writeClock = record->writeClock;
513         thread_id_t writeThread = record->writeThread;
514
515         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
516                 /* We have a datarace */
517                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
518         }
519
520         /* Shorten vector when possible */
521
522         int copytoindex = 0;
523
524         for (int i = 0;i < record->numReads;i++) {
525                 modelclock_t readClock = record->readClock[i];
526                 thread_id_t readThread = record->thread[i];
527
528                 /*  Note that is not really a datarace check as reads cannot
529                                 actually race.  It is just determining that this read subsumes
530                                 another in the sense that either this read races or neither
531                                 read races. Note that readClock can't actually be zero, so it
532                                 could be optimized.  */
533
534                 if (clock_may_race(currClock, thread, readClock, readThread)) {
535                         /* Still need this read in vector */
536                         if (copytoindex != i) {
537                                 ASSERT(record->thread[i] >= 0);
538                                 record->readClock[copytoindex] = record->readClock[i];
539                                 record->thread[copytoindex] = record->thread[i];
540                         }
541                         copytoindex++;
542                 }
543         }
544
545         if (__builtin_popcount(copytoindex) <= 1) {
546                 if (copytoindex == 0 && record->thread == NULL) {
547                         int newCapacity = INITCAPACITY;
548                         record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
549                         record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
550                 } else if (copytoindex>=INITCAPACITY) {
551                         int newCapacity = copytoindex * 2;
552                         thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
553                         modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
554                         std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
555                         std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
556                         snapshot_free(record->readClock);
557                         snapshot_free(record->thread);
558                         record->readClock = newreadClock;
559                         record->thread = newthread;
560                 }
561         }
562
563         modelclock_t ourClock = currClock->getClock(thread);
564
565         ASSERT(thread >= 0);
566         record->thread[copytoindex] = thread;
567         record->readClock[copytoindex] = ourClock;
568         record->numReads = copytoindex + 1;
569         return race;
570 }
571
572 /** This function does race detection on a read. */
573 void raceCheckRead(thread_id_t thread, const void *location)
574 {
575         uint64_t *shadow = lookupAddressEntry(location);
576         uint64_t shadowval = *shadow;
577         ClockVector *currClock = get_execution()->get_cv(thread);
578         if (currClock == NULL)
579                 return;
580
581         struct DataRace * race = NULL;
582
583         /* Do full record */
584         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
585                 race = fullRaceCheckRead(thread, location, shadow, currClock);
586                 goto Exit;
587         }
588
589         {
590                 int threadid = id_to_int(thread);
591                 modelclock_t ourClock = currClock->getClock(thread);
592
593                 /* Thread ID is too large or clock is too large. */
594                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
595                         expandRecord(shadow);
596                         race = fullRaceCheckRead(thread, location, shadow, currClock);
597                         goto Exit;
598                 }
599
600                 /* Check for datarace against last write. */
601
602                 modelclock_t writeClock = WRITEVECTOR(shadowval);
603                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
604
605                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
606                         /* We have a datarace */
607                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
608                         goto ShadowExit;
609                 }
610
611 ShadowExit:
612                 {
613                         modelclock_t readClock = READVECTOR(shadowval);
614                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
615
616                         if (clock_may_race(currClock, thread, readClock, readThread)) {
617                                 /* We don't subsume this read... Have to expand record. */
618                                 expandRecord(shadow);
619                                 fullRaceCheckRead(thread, location, shadow, currClock);
620                                 goto Exit;
621                         }
622                 }
623
624                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
625         }
626 Exit:
627         if (race) {
628                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
629                 if (raceset->add(race))
630                         assert_race(race);
631                 else model_free(race);
632         }
633 }
634
635
636 /** This function does race detection on a read for an expanded record. */
637 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
638 {
639         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
640         struct DataRace * race = NULL;
641         /* Check for datarace against last write. */
642         if (record->isAtomic)
643                 return NULL;
644
645         modelclock_t writeClock = record->writeClock;
646         thread_id_t writeThread = record->writeThread;
647
648         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
649                 /* We have a datarace */
650                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
651         }
652         return race;
653 }
654
655 /** This function does race detection on a read. */
656 void atomraceCheckRead(thread_id_t thread, const void *location)
657 {
658         uint64_t *shadow = lookupAddressEntry(location);
659         uint64_t shadowval = *shadow;
660         ClockVector *currClock = get_execution()->get_cv(thread);
661         if (currClock == NULL)
662                 return;
663
664         struct DataRace * race = NULL;
665
666         /* Do full record */
667         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
668                 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
669                 goto Exit;
670         }
671
672         if (shadowval & ATOMICMASK)
673                 return;
674
675         {
676                 /* Check for datarace against last write. */
677                 modelclock_t writeClock = WRITEVECTOR(shadowval);
678                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
679
680                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
681                         /* We have a datarace */
682                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
683                         goto Exit;
684                 }
685
686
687         }
688 Exit:
689         if (race) {
690                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
691                 if (raceset->add(race))
692                         assert_race(race);
693                 else model_free(race);
694         }
695 }
696
697 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
698 {
699         uint64_t *shadow = lookupAddressEntry(location);
700         uint64_t shadowval = *shadow;
701
702         ClockVector *currClock = get_execution()->get_cv(thread);
703         if (currClock == NULL)
704                 return shadow;
705
706         struct DataRace * race = NULL;
707
708         /* Do full record */
709         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
710                 race = fullRaceCheckRead(thread, location, shadow, currClock);
711                 goto Exit;
712         }
713
714         {
715                 int threadid = id_to_int(thread);
716                 modelclock_t ourClock = currClock->getClock(thread);
717
718                 /* Thread ID is too large or clock is too large. */
719                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
720                         expandRecord(shadow);
721                         race = fullRaceCheckRead(thread, location, shadow, currClock);
722                         goto Exit;
723                 }
724
725                 /* Check for datarace against last write. */
726                 modelclock_t writeClock = WRITEVECTOR(shadowval);
727                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
728
729                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
730                         /* We have a datarace */
731                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
732                 }
733
734                 modelclock_t readClock = READVECTOR(shadowval);
735                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
736
737                 if (clock_may_race(currClock, thread, readClock, readThread)) {
738                         /* We don't subsume this read... Have to expand record. */
739                         expandRecord(shadow);
740                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
741                         record->thread[1] = thread;
742                         record->readClock[1] = ourClock;
743                         record->numReads++;
744
745                         goto Exit;
746                 }
747
748                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
749
750                 *old_val = shadowval;
751                 *new_val = *shadow;
752         }
753 Exit:
754         if (race) {
755                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
756                 if (raceset->add(race))
757                         assert_race(race);
758                 else model_free(race);
759         }
760
761         return shadow;
762 }
763
764 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
765         uint64_t *shadow = lookupAddressEntry(location);
766
767         uint64_t shadowval = *shadow;
768
769         ClockVector *currClock = get_execution()->get_cv(thread);
770         if (currClock == NULL)
771                 return;
772
773         struct DataRace * race = NULL;
774
775         /* Do full record */
776         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
777                 race = fullRaceCheckRead(thread, location, shadow, currClock);
778                 goto Exit;
779         }
780
781         {
782                 int threadid = id_to_int(thread);
783                 modelclock_t ourClock = currClock->getClock(thread);
784
785                 /* Thread ID is too large or clock is too large. */
786                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
787                         expandRecord(shadow);
788                         race = fullRaceCheckRead(thread, location, shadow, currClock);
789                         goto Exit;
790                 }
791
792                 /* Check for datarace against last write. */
793                 modelclock_t writeClock = WRITEVECTOR(shadowval);
794                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
795
796                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
797                         /* We have a datarace */
798                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
799                 }
800
801                 modelclock_t readClock = READVECTOR(shadowval);
802                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
803
804                 if (clock_may_race(currClock, thread, readClock, readThread)) {
805                         /* We don't subsume this read... Have to expand record. */
806                         expandRecord(shadow);
807                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
808                         record->thread[1] = thread;
809                         record->readClock[1] = ourClock;
810                         record->numReads++;
811
812                         goto Exit;
813                 }
814
815                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
816         }
817 Exit:
818         if (race) {
819                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
820                 if (raceset->add(race))
821                         assert_race(race);
822                 else model_free(race);
823         }
824 }
825
826 void raceCheckRead64(thread_id_t thread, const void *location)
827 {
828         uint64_t old_shadowval, new_shadowval;
829         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
830
831         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
832         if (CHECKBOUNDARY(location, 7)) {
833                 if (shadow[1]==old_shadowval)
834                         shadow[1] = new_shadowval;
835                 else goto L1;
836                 if (shadow[2]==old_shadowval)
837                         shadow[2] = new_shadowval;
838                 else goto L2;
839                 if (shadow[3]==old_shadowval)
840                         shadow[3] = new_shadowval;
841                 else goto L3;
842                 if (shadow[4]==old_shadowval)
843                         shadow[4] = new_shadowval;
844                 else goto L4;
845                 if (shadow[5]==old_shadowval)
846                         shadow[5] = new_shadowval;
847                 else goto L5;
848                 if (shadow[6]==old_shadowval)
849                         shadow[6] = new_shadowval;
850                 else goto L6;
851                 if (shadow[7]==old_shadowval)
852                         shadow[7] = new_shadowval;
853                 else goto L7;
854                 return;
855         }
856
857 L1:
858         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
859 L2:
860         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
861 L3:
862         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
863 L4:
864         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
865 L5:
866         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
867 L6:
868         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
869 L7:
870         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
871 }
872
873 void raceCheckRead32(thread_id_t thread, const void *location)
874 {
875         uint64_t old_shadowval, new_shadowval;
876         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
877
878         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
879         if (CHECKBOUNDARY(location, 3)) {
880                 if (shadow[1]==old_shadowval)
881                         shadow[1] = new_shadowval;
882                 else goto L1;
883                 if (shadow[2]==old_shadowval)
884                         shadow[2] = new_shadowval;
885                 else goto L2;
886                 if (shadow[3]==old_shadowval)
887                         shadow[3] = new_shadowval;
888                 else goto L3;
889                 return;
890         }
891
892 L1:
893         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
894 L2:
895         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
896 L3:
897         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
898 }
899
900 void raceCheckRead16(thread_id_t thread, const void *location)
901 {
902         uint64_t old_shadowval, new_shadowval;
903         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
904
905
906         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
907         if (CHECKBOUNDARY(location, 1)) {
908                 if (shadow[1]==old_shadowval) {
909                         shadow[1] = new_shadowval;
910                         return;
911                 }
912         }
913         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
914 }
915
916 void raceCheckRead8(thread_id_t thread, const void *location)
917 {
918         uint64_t old_shadowval, new_shadowval;
919         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
920
921         raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
922 }
923
924 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
925 {
926         uint64_t *shadow = lookupAddressEntry(location);
927         uint64_t shadowval = *shadow;
928         ClockVector *currClock = get_execution()->get_cv(thread);
929         if (currClock == NULL)
930                 return shadow;
931
932         struct DataRace * race = NULL;
933         /* Do full record */
934         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
935                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
936                 goto Exit;
937         }
938
939         {
940                 int threadid = id_to_int(thread);
941                 modelclock_t ourClock = currClock->getClock(thread);
942
943                 /* Thread ID is too large or clock is too large. */
944                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
945                         expandRecord(shadow);
946                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
947                         goto Exit;
948                 }
949
950                 {
951                         /* Check for datarace against last read. */
952                         modelclock_t readClock = READVECTOR(shadowval);
953                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
954
955                         if (clock_may_race(currClock, thread, readClock, readThread)) {
956                                 /* We have a datarace */
957                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
958                                 goto ShadowExit;
959                         }
960                 }
961
962                 {
963                         /* Check for datarace against last write. */
964                         modelclock_t writeClock = WRITEVECTOR(shadowval);
965                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
966
967                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
968                                 /* We have a datarace */
969                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
970                                 goto ShadowExit;
971                         }
972                 }
973
974 ShadowExit:
975                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
976
977                 *old_val = shadowval;
978                 *new_val = *shadow;
979         }
980
981 Exit:
982         if (race) {
983                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
984                 if (raceset->add(race))
985                         assert_race(race);
986                 else model_free(race);
987         }
988
989         return shadow;
990 }
991
992 static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
993         uint64_t *shadow = lookupAddressEntry(location);
994
995         uint64_t shadowval = *shadow;
996
997         ClockVector *currClock = get_execution()->get_cv(thread);
998         if (currClock == NULL)
999                 return;
1000
1001         struct DataRace * race = NULL;
1002         /* Do full record */
1003         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1004                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1005                 goto Exit;
1006         }
1007
1008         {
1009                 int threadid = id_to_int(thread);
1010                 modelclock_t ourClock = currClock->getClock(thread);
1011
1012                 /* Thread ID is too large or clock is too large. */
1013                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1014                         expandRecord(shadow);
1015                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
1016                         goto Exit;
1017                 }
1018
1019                 {
1020                         /* Check for datarace against last read. */
1021                         modelclock_t readClock = READVECTOR(shadowval);
1022                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1023
1024                         if (clock_may_race(currClock, thread, readClock, readThread)) {
1025                                 /* We have a datarace */
1026                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1027                                 goto ShadowExit;
1028                         }
1029                 }
1030
1031                 {
1032                         /* Check for datarace against last write. */
1033                         modelclock_t writeClock = WRITEVECTOR(shadowval);
1034                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1035
1036                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1037                                 /* We have a datarace */
1038                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1039                                 goto ShadowExit;
1040                         }
1041                 }
1042
1043 ShadowExit:
1044                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1045         }
1046
1047 Exit:
1048         if (race) {
1049                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1050                 if (raceset->add(race))
1051                         assert_race(race);
1052                 else model_free(race);
1053         }
1054 }
1055
1056 void raceCheckWrite64(thread_id_t thread, const void *location)
1057 {
1058         uint64_t old_shadowval, new_shadowval;
1059         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1060
1061         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1062         if (CHECKBOUNDARY(location, 7)) {
1063                 if (shadow[1]==old_shadowval)
1064                         shadow[1] = new_shadowval;
1065                 else goto L1;
1066                 if (shadow[2]==old_shadowval)
1067                         shadow[2] = new_shadowval;
1068                 else goto L2;
1069                 if (shadow[3]==old_shadowval)
1070                         shadow[3] = new_shadowval;
1071                 else goto L3;
1072                 if (shadow[4]==old_shadowval)
1073                         shadow[4] = new_shadowval;
1074                 else goto L4;
1075                 if (shadow[5]==old_shadowval)
1076                         shadow[5] = new_shadowval;
1077                 else goto L5;
1078                 if (shadow[6]==old_shadowval)
1079                         shadow[6] = new_shadowval;
1080                 else goto L6;
1081                 if (shadow[7]==old_shadowval)
1082                         shadow[7] = new_shadowval;
1083                 else goto L7;
1084                 return;
1085         }
1086
1087 L1:
1088         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1089 L2:
1090         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1091 L3:
1092         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1093 L4:
1094         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
1095 L5:
1096         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
1097 L6:
1098         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
1099 L7:
1100         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
1101 }
1102
1103 void raceCheckWrite32(thread_id_t thread, const void *location)
1104 {
1105         uint64_t old_shadowval, new_shadowval;
1106         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1107
1108         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1109         if (CHECKBOUNDARY(location, 3)) {
1110                 if (shadow[1]==old_shadowval)
1111                         shadow[1] = new_shadowval;
1112                 else goto L1;
1113                 if (shadow[2]==old_shadowval)
1114                         shadow[2] = new_shadowval;
1115                 else goto L2;
1116                 if (shadow[3]==old_shadowval)
1117                         shadow[3] = new_shadowval;
1118                 else goto L3;
1119                 return;
1120         }
1121
1122 L1:
1123         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1124 L2:
1125         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1126 L3:
1127         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1128 }
1129
1130 void raceCheckWrite16(thread_id_t thread, const void *location)
1131 {
1132         uint64_t old_shadowval, new_shadowval;
1133         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1134
1135         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1136         if (CHECKBOUNDARY(location, 1)) {
1137                 if (shadow[1]==old_shadowval) {
1138                         shadow[1] = new_shadowval;
1139                         return;
1140                 }
1141         }
1142         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1143 }
1144
1145 void raceCheckWrite8(thread_id_t thread, const void *location)
1146 {
1147         uint64_t old_shadowval, new_shadowval;
1148         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1149
1150         raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1151 }