Remove unused headers
[c11tester.git] / datarace.cc
1 #include "datarace.h"
2 #include "model.h"
3 #include "threads-model.h"
4 #include <stdio.h>
5 #include <cstring>
6 #include "mymemory.h"
7 #include "clockvector.h"
8 #include "config.h"
9 #include "action.h"
10 #include "execution.h"
11 #include "stl-model.h"
12 #include <execinfo.h>
13
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
18
19 #ifdef COLLECT_STAT
20 static unsigned int store8_count = 0;
21 static unsigned int store16_count = 0;
22 static unsigned int store32_count = 0;
23 static unsigned int store64_count = 0;
24
25 static unsigned int load8_count = 0;
26 static unsigned int load16_count = 0;
27 static unsigned int load32_count = 0;
28 static unsigned int load64_count = 0;
29 #endif
30
31 static const ModelExecution * get_execution()
32 {
33         return model->get_execution();
34 }
35
36 /** This function initialized the data race detector. */
37 void initRaceDetector()
38 {
39         root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
40         memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
41         memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
42         raceset = new RaceSet();
43 }
44
45 void * table_calloc(size_t size)
46 {
47         if ((((char *)memory_base) + size) > memory_top) {
48                 return snapshot_calloc(size, 1);
49         } else {
50                 void *tmp = memory_base;
51                 memory_base = ((char *)memory_base) + size;
52                 return tmp;
53         }
54 }
55
56 /** This function looks up the entry in the shadow table corresponding to a
57  * given address.*/
58 static inline uint64_t * lookupAddressEntry(const void *address)
59 {
60         struct ShadowTable *currtable = root;
61 #if BIT48
62         currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
63         if (currtable == NULL) {
64                 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
65         }
66 #endif
67
68         struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
69         if (basetable == NULL) {
70                 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
71         }
72         return &basetable->array[((uintptr_t)address) & MASK16BIT];
73 }
74
75
76 bool hasNonAtomicStore(const void *address) {
77         uint64_t * shadow = lookupAddressEntry(address);
78         uint64_t shadowval = *shadow;
79         if (ISSHORTRECORD(shadowval)) {
80                 //Do we have a non atomic write with a non-zero clock
81                 return !(ATOMICMASK & shadowval);
82         } else {
83                 if (shadowval == 0)
84                         return true;
85                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
86                 return !record->isAtomic;
87         }
88 }
89
90 void setAtomicStoreFlag(const void *address) {
91         uint64_t * shadow = lookupAddressEntry(address);
92         uint64_t shadowval = *shadow;
93         if (ISSHORTRECORD(shadowval)) {
94                 *shadow = shadowval | ATOMICMASK;
95         } else {
96                 if (shadowval == 0) {
97                         *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
98                         return;
99                 }
100                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
101                 record->isAtomic = 1;
102         }
103 }
104
105 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
106         uint64_t * shadow = lookupAddressEntry(address);
107         uint64_t shadowval = *shadow;
108         if (ISSHORTRECORD(shadowval) || shadowval == 0) {
109                 //Do we have a non atomic write with a non-zero clock
110                 *thread = WRTHREADID(shadowval);
111                 *clock = WRITEVECTOR(shadowval);
112         } else {
113                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
114                 *thread = record->writeThread;
115                 *clock = record->writeClock;
116         }
117 }
118
119 /**
120  * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
121  * to check the potential for a data race.
122  * @param clock1 The current clock vector
123  * @param tid1 The current thread; paired with clock1
124  * @param clock2 The clock value for the potentially-racing action
125  * @param tid2 The thread ID for the potentially-racing action
126  * @return true if the current clock allows a race with the event at clock2/tid2
127  */
128 static inline bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
129                                                                                                          modelclock_t clock2, thread_id_t tid2)
130 {
131         return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
132 }
133
134 /**
135  * Expands a record from the compact form to the full form.  This is
136  * necessary for multiple readers or for very large thread ids or time
137  * stamps. */
138 static void expandRecord(uint64_t *shadow)
139 {
140         uint64_t shadowval = *shadow;
141
142         modelclock_t readClock = READVECTOR(shadowval);
143         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
144         modelclock_t writeClock = WRITEVECTOR(shadowval);
145         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
146
147         struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
148         record->writeThread = writeThread;
149         record->writeClock = writeClock;
150
151         if (readClock != 0) {
152                 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
153                 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
154                 record->numReads = 1;
155                 ASSERT(readThread >= 0);
156                 record->thread[0] = readThread;
157                 record->readClock[0] = readClock;
158         } else {
159                 record->thread = NULL;
160         }
161         if (shadowval & ATOMICMASK)
162                 record->isAtomic = 1;
163         *shadow = (uint64_t) record;
164 }
165
166 #define FIRST_STACK_FRAME 2
167
168 unsigned int race_hash(struct DataRace *race) {
169         unsigned int hash = 0;
170         for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
171                 hash ^= ((uintptr_t)race->backtrace[i]);
172                 hash = (hash >> 3) | (hash << 29);
173         }
174         return hash;
175 }
176
177 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
178         if (r1->numframes != r2->numframes)
179                 return false;
180         for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
181                 if (r1->backtrace[i] != r2->backtrace[i])
182                         return false;
183         }
184         return true;
185 }
186
187 /** This function is called when we detect a data race.*/
188 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
189 {
190 #ifdef REPORT_DATA_RACES
191         struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
192         race->oldthread = oldthread;
193         race->oldclock = oldclock;
194         race->isoldwrite = isoldwrite;
195         race->newaction = newaction;
196         race->isnewwrite = isnewwrite;
197         race->address = address;
198         return race;
199 #else
200         return NULL;
201 #endif
202 }
203
204 /**
205  * @brief Assert a data race
206  *
207  * Asserts a data race which is currently realized, causing the execution to
208  * end and stashing a message in the model-checker's bug list
209  *
210  * @param race The race to report
211  */
212 void assert_race(struct DataRace *race)
213 {
214         model_print("Race detected at location: \n");
215         backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
216         model_print("\nData race detected @ address %p:\n"
217                                                         "    Access 1: %5s in thread %2d @ clock %3u\n"
218                                                         "    Access 2: %5s in thread %2d @ clock %3u\n\n",
219                                                         race->address,
220                                                         race->isoldwrite ? "write" : "read",
221                                                         id_to_int(race->oldthread),
222                                                         race->oldclock,
223                                                         race->isnewwrite ? "write" : "read",
224                                                         id_to_int(race->newaction->get_tid()),
225                                                         race->newaction->get_seq_number()
226                                                         );
227 }
228
229 /** This function does race detection for a write on an expanded record. */
230 struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
231 {
232         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
233         struct DataRace * race = NULL;
234
235         /* Check for datarace against last read. */
236
237         for (int i = 0;i < record->numReads;i++) {
238                 modelclock_t readClock = record->readClock[i];
239                 thread_id_t readThread = record->thread[i];
240
241                 /* Note that readClock can't actuall be zero here, so it could be
242                          optimized. */
243
244                 if (clock_may_race(currClock, thread, readClock, readThread)) {
245                         /* We have a datarace */
246                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
247                         goto Exit;
248                 }
249         }
250
251         /* Check for datarace against last write. */
252         {
253                 modelclock_t writeClock = record->writeClock;
254                 thread_id_t writeThread = record->writeThread;
255
256                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
257                         /* We have a datarace */
258                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
259                         goto Exit;
260                 }
261         }
262 Exit:
263         record->numReads = 0;
264         record->writeThread = thread;
265         record->isAtomic = 0;
266         modelclock_t ourClock = currClock->getClock(thread);
267         record->writeClock = ourClock;
268         return race;
269 }
270
271 /** This function does race detection for a write on an expanded record. */
272 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
273 {
274         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
275         struct DataRace * race = NULL;
276
277         if (record->isAtomic)
278                 goto Exit;
279
280         /* Check for datarace against last read. */
281
282         for (int i = 0;i < record->numReads;i++) {
283                 modelclock_t readClock = record->readClock[i];
284                 thread_id_t readThread = record->thread[i];
285
286                 /* Note that readClock can't actuall be zero here, so it could be
287                          optimized. */
288
289                 if (clock_may_race(currClock, thread, readClock, readThread)) {
290                         /* We have a datarace */
291                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
292                         goto Exit;
293                 }
294         }
295
296         /* Check for datarace against last write. */
297
298         {
299                 modelclock_t writeClock = record->writeClock;
300                 thread_id_t writeThread = record->writeThread;
301
302                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
303                         /* We have a datarace */
304                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
305                         goto Exit;
306                 }
307         }
308 Exit:
309         record->numReads = 0;
310         record->writeThread = thread;
311         record->isAtomic = 1;
312         modelclock_t ourClock = currClock->getClock(thread);
313         record->writeClock = ourClock;
314         return race;
315 }
316
317 /** This function does race detection on a write. */
318 void atomraceCheckWrite(thread_id_t thread, void *location)
319 {
320         uint64_t *shadow = lookupAddressEntry(location);
321         uint64_t shadowval = *shadow;
322         ClockVector *currClock = get_execution()->get_cv(thread);
323         if (currClock == NULL)
324                 return;
325
326         struct DataRace * race = NULL;
327         /* Do full record */
328         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
329                 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
330                 goto Exit;
331         }
332
333         {
334                 int threadid = id_to_int(thread);
335                 modelclock_t ourClock = currClock->getClock(thread);
336
337                 /* Thread ID is too large or clock is too large. */
338                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
339                         expandRecord(shadow);
340                         race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
341                         goto Exit;
342                 }
343
344                 /* Can't race with atomic */
345                 if (shadowval & ATOMICMASK)
346                         goto ShadowExit;
347
348                 {
349                         /* Check for datarace against last read. */
350                         modelclock_t readClock = READVECTOR(shadowval);
351                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
352
353                         if (clock_may_race(currClock, thread, readClock, readThread)) {
354                                 /* We have a datarace */
355                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
356                                 goto ShadowExit;
357                         }
358                 }
359
360                 {
361                         /* Check for datarace against last write. */
362                         modelclock_t writeClock = WRITEVECTOR(shadowval);
363                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
364
365                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
366                                 /* We have a datarace */
367                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
368                                 goto ShadowExit;
369                         }
370                 }
371
372 ShadowExit:
373                 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
374         }
375
376 Exit:
377         if (race) {
378 #ifdef REPORT_DATA_RACES
379                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
380                 if (raceset->add(race))
381                         assert_race(race);
382                 else model_free(race);
383 #else
384                 model_free(race);
385 #endif
386         }
387 }
388
389 /** This function does race detection for a write on an expanded record. */
390 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
391         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
392         record->numReads = 0;
393         record->writeThread = thread;
394         modelclock_t ourClock = currClock->getClock(thread);
395         record->writeClock = ourClock;
396         record->isAtomic = 1;
397 }
398
399 /** This function does race detection for a write on an expanded record. */
400 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
401         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
402         record->numReads = 0;
403         record->writeThread = thread;
404         modelclock_t ourClock = currClock->getClock(thread);
405         record->writeClock = ourClock;
406         record->isAtomic = 0;
407 }
408
409 /** This function just updates metadata on atomic write. */
410 void recordWrite(thread_id_t thread, void *location) {
411         uint64_t *shadow = lookupAddressEntry(location);
412         uint64_t shadowval = *shadow;
413         ClockVector *currClock = get_execution()->get_cv(thread);
414         /* Do full record */
415         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
416                 fullRecordWrite(thread, location, shadow, currClock);
417                 return;
418         }
419
420         int threadid = id_to_int(thread);
421         modelclock_t ourClock = currClock->getClock(thread);
422
423         /* Thread ID is too large or clock is too large. */
424         if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
425                 expandRecord(shadow);
426                 fullRecordWrite(thread, location, shadow, currClock);
427                 return;
428         }
429
430         *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
431 }
432
433 /** This function just updates metadata on atomic write. */
434 void recordCalloc(void *location, size_t size) {
435         thread_id_t thread = thread_current_id();
436         for(;size != 0;size--) {
437                 uint64_t *shadow = lookupAddressEntry(location);
438                 uint64_t shadowval = *shadow;
439                 ClockVector *currClock = get_execution()->get_cv(thread);
440                 /* Do full record */
441                 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
442                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
443                         return;
444                 }
445
446                 int threadid = id_to_int(thread);
447                 modelclock_t ourClock = currClock->getClock(thread);
448
449                 /* Thread ID is too large or clock is too large. */
450                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
451                         expandRecord(shadow);
452                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
453                         return;
454                 }
455
456                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
457                 location = (void *)(((char *) location) + 1);
458         }
459 }
460
461 /** This function does race detection on a read for an expanded record. */
462 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
463 {
464         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
465         struct DataRace * race = NULL;
466         /* Check for datarace against last write. */
467
468         modelclock_t writeClock = record->writeClock;
469         thread_id_t writeThread = record->writeThread;
470
471         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
472                 /* We have a datarace */
473                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
474         }
475
476         /* Shorten vector when possible */
477
478         int copytoindex = 0;
479
480         for (int i = 0;i < record->numReads;i++) {
481                 modelclock_t readClock = record->readClock[i];
482                 thread_id_t readThread = record->thread[i];
483
484                 /*  Note that is not really a datarace check as reads cannot
485                                 actually race.  It is just determining that this read subsumes
486                                 another in the sense that either this read races or neither
487                                 read races. Note that readClock can't actually be zero, so it
488                                 could be optimized.  */
489
490                 if (clock_may_race(currClock, thread, readClock, readThread)) {
491                         /* Still need this read in vector */
492                         if (copytoindex != i) {
493                                 ASSERT(readThread >= 0);
494                                 record->readClock[copytoindex] = readClock;
495                                 record->thread[copytoindex] = readThread;
496                         }
497                         copytoindex++;
498                 }
499         }
500
501         if (__builtin_popcount(copytoindex) <= 1) {
502                 if (copytoindex == 0 && record->thread == NULL) {
503                         int newCapacity = INITCAPACITY;
504                         record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
505                         record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
506                 } else if (copytoindex>=INITCAPACITY) {
507                         int newCapacity = copytoindex * 2;
508                         thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
509                         modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
510                         real_memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
511                         real_memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
512                         snapshot_free(record->readClock);
513                         snapshot_free(record->thread);
514                         record->readClock = newreadClock;
515                         record->thread = newthread;
516                 }
517         }
518
519         modelclock_t ourClock = currClock->getClock(thread);
520
521         ASSERT(thread >= 0);
522         record->thread[copytoindex] = thread;
523         record->readClock[copytoindex] = ourClock;
524         record->numReads = copytoindex + 1;
525         return race;
526 }
527
528 /** This function does race detection on a read for an expanded record. */
529 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
530 {
531         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
532         struct DataRace * race = NULL;
533         /* Check for datarace against last write. */
534         if (record->isAtomic)
535                 return NULL;
536
537         modelclock_t writeClock = record->writeClock;
538         thread_id_t writeThread = record->writeThread;
539
540         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
541                 /* We have a datarace */
542                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
543         }
544         return race;
545 }
546
547 /** This function does race detection on a read. */
548 void atomraceCheckRead(thread_id_t thread, const void *location)
549 {
550         uint64_t *shadow = lookupAddressEntry(location);
551         uint64_t shadowval = *shadow;
552         ClockVector *currClock = get_execution()->get_cv(thread);
553         if (currClock == NULL)
554                 return;
555
556         struct DataRace * race = NULL;
557
558         /* Do full record */
559         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
560                 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
561                 goto Exit;
562         }
563
564         if (shadowval & ATOMICMASK)
565                 return;
566
567         {
568                 /* Check for datarace against last write. */
569                 modelclock_t writeClock = WRITEVECTOR(shadowval);
570                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
571
572                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
573                         /* We have a datarace */
574                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
575                         goto Exit;
576                 }
577         }
578 Exit:
579         if (race) {
580 #ifdef REPORT_DATA_RACES
581                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
582                 if (raceset->add(race))
583                         assert_race(race);
584                 else model_free(race);
585 #else
586                 model_free(race);
587 #endif
588         }
589 }
590
591 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
592 {
593         uint64_t *shadow = lookupAddressEntry(location);
594         uint64_t shadowval = *shadow;
595
596         ClockVector *currClock = get_execution()->get_cv(thread);
597         if (currClock == NULL)
598                 return shadow;
599
600         struct DataRace * race = NULL;
601
602         /* Do full record */
603         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
604                 race = fullRaceCheckRead(thread, location, shadow, currClock);
605                 goto Exit;
606         }
607
608         {
609                 int threadid = id_to_int(thread);
610                 modelclock_t ourClock = currClock->getClock(thread);
611
612                 /* Thread ID is too large or clock is too large. */
613                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
614                         expandRecord(shadow);
615                         race = fullRaceCheckRead(thread, location, shadow, currClock);
616                         goto Exit;
617                 }
618
619                 /* Check for datarace against last write. */
620                 modelclock_t writeClock = WRITEVECTOR(shadowval);
621                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
622
623                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
624                         /* We have a datarace */
625                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
626                 }
627
628                 modelclock_t readClock = READVECTOR(shadowval);
629                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
630
631                 if (clock_may_race(currClock, thread, readClock, readThread)) {
632                         /* We don't subsume this read... Have to expand record. */
633                         expandRecord(shadow);
634                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
635                         record->thread[1] = thread;
636                         record->readClock[1] = ourClock;
637                         record->numReads++;
638
639                         goto Exit;
640                 }
641
642                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
643
644                 *old_val = shadowval;
645                 *new_val = *shadow;
646         }
647 Exit:
648         if (race) {
649 #ifdef REPORT_DATA_RACES
650                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
651                 if (raceset->add(race))
652                         assert_race(race);
653                 else model_free(race);
654 #else
655                 model_free(race);
656 #endif
657         }
658
659         return shadow;
660 }
661
662 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location)
663 {
664         uint64_t *shadow = lookupAddressEntry(location);
665         uint64_t shadowval = *shadow;
666         ClockVector *currClock = get_execution()->get_cv(thread);
667         if (currClock == NULL)
668                 return;
669
670         struct DataRace * race = NULL;
671
672         /* Do full record */
673         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
674                 race = fullRaceCheckRead(thread, location, shadow, currClock);
675                 goto Exit;
676         }
677
678         {
679                 int threadid = id_to_int(thread);
680                 modelclock_t ourClock = currClock->getClock(thread);
681
682                 /* Thread ID is too large or clock is too large. */
683                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
684                         expandRecord(shadow);
685                         race = fullRaceCheckRead(thread, location, shadow, currClock);
686                         goto Exit;
687                 }
688
689                 /* Check for datarace against last write. */
690                 modelclock_t writeClock = WRITEVECTOR(shadowval);
691                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
692
693                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
694                         /* We have a datarace */
695                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
696                 }
697
698                 modelclock_t readClock = READVECTOR(shadowval);
699                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
700
701                 if (clock_may_race(currClock, thread, readClock, readThread)) {
702                         /* We don't subsume this read... Have to expand record. */
703                         expandRecord(shadow);
704                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
705                         record->thread[1] = thread;
706                         record->readClock[1] = ourClock;
707                         record->numReads++;
708
709                         goto Exit;
710                 }
711
712                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
713         }
714 Exit:
715         if (race) {
716 #ifdef REPORT_DATA_RACES
717                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
718                 if (raceset->add(race))
719                         assert_race(race);
720                 else model_free(race);
721 #else
722                 model_free(race);
723 #endif
724         }
725 }
726
727 void raceCheckRead64(thread_id_t thread, const void *location)
728 {
729         int old_flag = GET_MODEL_FLAG;
730         ENTER_MODEL_FLAG;
731
732         uint64_t old_shadowval, new_shadowval;
733         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
734 #ifdef COLLECT_STAT
735         load64_count++;
736 #endif
737         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
738         if (CHECKBOUNDARY(location, 7)) {
739                 if (shadow[1]==old_shadowval)
740                         shadow[1] = new_shadowval;
741                 else goto L1;
742                 if (shadow[2]==old_shadowval)
743                         shadow[2] = new_shadowval;
744                 else goto L2;
745                 if (shadow[3]==old_shadowval)
746                         shadow[3] = new_shadowval;
747                 else goto L3;
748                 if (shadow[4]==old_shadowval)
749                         shadow[4] = new_shadowval;
750                 else goto L4;
751                 if (shadow[5]==old_shadowval)
752                         shadow[5] = new_shadowval;
753                 else goto L5;
754                 if (shadow[6]==old_shadowval)
755                         shadow[6] = new_shadowval;
756                 else goto L6;
757                 if (shadow[7]==old_shadowval)
758                         shadow[7] = new_shadowval;
759                 else goto L7;
760                 RESTORE_MODEL_FLAG(old_flag);
761                 return;
762         }
763
764 L1:
765         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
766 L2:
767         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
768 L3:
769         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
770 L4:
771         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
772 L5:
773         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
774 L6:
775         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
776 L7:
777         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
778         RESTORE_MODEL_FLAG(old_flag);
779 }
780
781 void raceCheckRead32(thread_id_t thread, const void *location)
782 {
783         int old_flag = GET_MODEL_FLAG;
784         ENTER_MODEL_FLAG;
785
786         uint64_t old_shadowval, new_shadowval;
787         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
788 #ifdef COLLECT_STAT
789         load32_count++;
790 #endif
791         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
792         if (CHECKBOUNDARY(location, 3)) {
793                 if (shadow[1]==old_shadowval)
794                         shadow[1] = new_shadowval;
795                 else goto L1;
796                 if (shadow[2]==old_shadowval)
797                         shadow[2] = new_shadowval;
798                 else goto L2;
799                 if (shadow[3]==old_shadowval)
800                         shadow[3] = new_shadowval;
801                 else goto L3;
802                 RESTORE_MODEL_FLAG(old_flag);
803                 return;
804         }
805
806 L1:
807         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
808 L2:
809         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
810 L3:
811         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
812         RESTORE_MODEL_FLAG(old_flag);
813 }
814
815 void raceCheckRead16(thread_id_t thread, const void *location)
816 {
817         int old_flag = GET_MODEL_FLAG;
818         ENTER_MODEL_FLAG;
819
820         uint64_t old_shadowval, new_shadowval;
821         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
822 #ifdef COLLECT_STAT
823         load16_count++;
824 #endif
825         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
826         if (CHECKBOUNDARY(location, 1)) {
827                 if (shadow[1]==old_shadowval) {
828                         shadow[1] = new_shadowval;
829                         RESTORE_MODEL_FLAG(old_flag);
830                         return;
831                 }
832         }
833         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
834         RESTORE_MODEL_FLAG(old_flag);
835 }
836
837 void raceCheckRead8(thread_id_t thread, const void *location)
838 {
839         int old_flag = GET_MODEL_FLAG;
840         ENTER_MODEL_FLAG;
841
842 #ifdef COLLECT_STAT
843         load8_count++;
844 #endif
845         raceCheckRead_otherIt(thread, location);
846         RESTORE_MODEL_FLAG(old_flag);
847 }
848
849 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
850 {
851         uint64_t *shadow = lookupAddressEntry(location);
852         uint64_t shadowval = *shadow;
853         ClockVector *currClock = get_execution()->get_cv(thread);
854         if (currClock == NULL)
855                 return shadow;
856
857         struct DataRace * race = NULL;
858         /* Do full record */
859         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
860                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
861                 goto Exit;
862         }
863
864         {
865                 int threadid = id_to_int(thread);
866                 modelclock_t ourClock = currClock->getClock(thread);
867
868                 /* Thread ID is too large or clock is too large. */
869                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
870                         expandRecord(shadow);
871                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
872                         goto Exit;
873                 }
874
875                 {
876                         /* Check for datarace against last read. */
877                         modelclock_t readClock = READVECTOR(shadowval);
878                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
879
880                         if (clock_may_race(currClock, thread, readClock, readThread)) {
881                                 /* We have a datarace */
882                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
883                                 goto ShadowExit;
884                         }
885                 }
886
887                 {
888                         /* Check for datarace against last write. */
889                         modelclock_t writeClock = WRITEVECTOR(shadowval);
890                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
891
892                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
893                                 /* We have a datarace */
894                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
895                                 goto ShadowExit;
896                         }
897                 }
898
899 ShadowExit:
900                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
901
902                 *old_val = shadowval;
903                 *new_val = *shadow;
904         }
905
906 Exit:
907         if (race) {
908 #ifdef REPORT_DATA_RACES
909                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
910                 if (raceset->add(race))
911                         assert_race(race);
912                 else model_free(race);
913 #else
914                 model_free(race);
915 #endif
916         }
917
918         return shadow;
919 }
920
921 static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location)
922 {
923         uint64_t *shadow = lookupAddressEntry(location);
924         uint64_t shadowval = *shadow;
925         ClockVector *currClock = get_execution()->get_cv(thread);
926         if (currClock == NULL)
927                 return;
928
929         struct DataRace * race = NULL;
930         /* Do full record */
931         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
932                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
933                 goto Exit;
934         }
935
936         {
937                 int threadid = id_to_int(thread);
938                 modelclock_t ourClock = currClock->getClock(thread);
939
940                 /* Thread ID is too large or clock is too large. */
941                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
942                         expandRecord(shadow);
943                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
944                         goto Exit;
945                 }
946
947                 {
948                         /* Check for datarace against last read. */
949                         modelclock_t readClock = READVECTOR(shadowval);
950                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
951
952                         if (clock_may_race(currClock, thread, readClock, readThread)) {
953                                 /* We have a datarace */
954                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
955                                 goto ShadowExit;
956                         }
957                 }
958
959                 {
960                         /* Check for datarace against last write. */
961                         modelclock_t writeClock = WRITEVECTOR(shadowval);
962                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
963
964                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
965                                 /* We have a datarace */
966                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
967                                 goto ShadowExit;
968                         }
969                 }
970
971 ShadowExit:
972                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
973         }
974
975 Exit:
976         if (race) {
977 #ifdef REPORT_DATA_RACES
978                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
979                 if (raceset->add(race))
980                         assert_race(race);
981                 else model_free(race);
982 #else
983                 model_free(race);
984 #endif
985         }
986 }
987
988 void raceCheckWrite64(thread_id_t thread, const void *location)
989 {
990         int old_flag = GET_MODEL_FLAG;
991         ENTER_MODEL_FLAG;
992         uint64_t old_shadowval, new_shadowval;
993         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
994 #ifdef COLLECT_STAT
995         store64_count++;
996 #endif
997         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
998         if (CHECKBOUNDARY(location, 7)) {
999                 if (shadow[1]==old_shadowval)
1000                         shadow[1] = new_shadowval;
1001                 else goto L1;
1002                 if (shadow[2]==old_shadowval)
1003                         shadow[2] = new_shadowval;
1004                 else goto L2;
1005                 if (shadow[3]==old_shadowval)
1006                         shadow[3] = new_shadowval;
1007                 else goto L3;
1008                 if (shadow[4]==old_shadowval)
1009                         shadow[4] = new_shadowval;
1010                 else goto L4;
1011                 if (shadow[5]==old_shadowval)
1012                         shadow[5] = new_shadowval;
1013                 else goto L5;
1014                 if (shadow[6]==old_shadowval)
1015                         shadow[6] = new_shadowval;
1016                 else goto L6;
1017                 if (shadow[7]==old_shadowval)
1018                         shadow[7] = new_shadowval;
1019                 else goto L7;
1020                 RESTORE_MODEL_FLAG(old_flag);
1021                 return;
1022         }
1023
1024 L1:
1025         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1026 L2:
1027         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1028 L3:
1029         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1030 L4:
1031         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
1032 L5:
1033         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
1034 L6:
1035         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
1036 L7:
1037         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
1038         RESTORE_MODEL_FLAG(old_flag);
1039 }
1040
1041 void raceCheckWrite32(thread_id_t thread, const void *location)
1042 {
1043         int old_flag = GET_MODEL_FLAG;
1044         ENTER_MODEL_FLAG;
1045
1046         uint64_t old_shadowval, new_shadowval;
1047         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1048 #ifdef COLLECT_STAT
1049         store32_count++;
1050 #endif
1051         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1052         if (CHECKBOUNDARY(location, 3)) {
1053                 if (shadow[1]==old_shadowval)
1054                         shadow[1] = new_shadowval;
1055                 else goto L1;
1056                 if (shadow[2]==old_shadowval)
1057                         shadow[2] = new_shadowval;
1058                 else goto L2;
1059                 if (shadow[3]==old_shadowval)
1060                         shadow[3] = new_shadowval;
1061                 else goto L3;
1062                 RESTORE_MODEL_FLAG(old_flag);
1063                 return;
1064         }
1065
1066 L1:
1067         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1068 L2:
1069         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1070 L3:
1071         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1072         RESTORE_MODEL_FLAG(old_flag);
1073 }
1074
1075 void raceCheckWrite16(thread_id_t thread, const void *location)
1076 {
1077         int old_flag = GET_MODEL_FLAG;
1078         ENTER_MODEL_FLAG;
1079
1080         uint64_t old_shadowval, new_shadowval;
1081         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1082 #ifdef COLLECT_STAT
1083         store16_count++;
1084 #endif
1085
1086         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1087         if (CHECKBOUNDARY(location, 1)) {
1088                 if (shadow[1]==old_shadowval) {
1089                         shadow[1] = new_shadowval;
1090                         RESTORE_MODEL_FLAG(old_flag);
1091                         return;
1092                 }
1093         }
1094         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1095         RESTORE_MODEL_FLAG(old_flag);
1096 }
1097
1098 void raceCheckWrite8(thread_id_t thread, const void *location)
1099 {
1100         int old_flag = GET_MODEL_FLAG;
1101         ENTER_MODEL_FLAG;
1102
1103 #ifdef COLLECT_STAT
1104         store8_count++;
1105 #endif
1106         raceCheckWrite_otherIt(thread, location);
1107         RESTORE_MODEL_FLAG(old_flag);
1108 }
1109
1110 void raceCheckWriteMemop(thread_id_t thread, const void *location, size_t size)
1111 {
1112         int old_flag = GET_MODEL_FLAG;
1113         ENTER_MODEL_FLAG;
1114
1115         ClockVector *currClock = get_execution()->get_cv(thread);
1116         if (currClock == NULL) {
1117                 RESTORE_MODEL_FLAG(old_flag);
1118                 return;
1119         }
1120
1121         bool alreadyHasRace = false;
1122         for (uint i = 0; i < size; i++) {
1123                 uint64_t *shadow = lookupAddressEntry(location);
1124                 uint64_t shadowval = *shadow;
1125
1126                 struct DataRace * race = NULL;
1127                 /* Do full record */
1128                 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1129                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
1130                         goto Exit;
1131                 }
1132
1133                 {
1134                         int threadid = id_to_int(thread);
1135                         modelclock_t ourClock = currClock->getClock(thread);
1136
1137                         /* Thread ID is too large or clock is too large. */
1138                         if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1139                                 expandRecord(shadow);
1140                                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1141                                 goto Exit;
1142                         }
1143
1144                         {
1145                                 /* Check for datarace against last read. */
1146                                 modelclock_t readClock = READVECTOR(shadowval);
1147                                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1148
1149                                 if (clock_may_race(currClock, thread, readClock, readThread)) {
1150                                         /* We have a datarace */
1151                                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1152                                         goto ShadowExit;
1153                                 }
1154                         }
1155
1156                         {
1157                                 /* Check for datarace against last write. */
1158                                 modelclock_t writeClock = WRITEVECTOR(shadowval);
1159                                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1160
1161                                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1162                                         /* We have a datarace */
1163                                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1164                                         goto ShadowExit;
1165                                 }
1166                         }
1167
1168 ShadowExit:
1169                         *shadow = ENCODEOP(0, 0, threadid, ourClock);
1170                 }
1171
1172 Exit:
1173                 if (race) {
1174 #ifdef REPORT_DATA_RACES
1175                         if (!alreadyHasRace) {
1176                                 alreadyHasRace = true;
1177                                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1178                                 if (raceset->add(race))
1179                                         assert_race(race);
1180                                 else model_free(race);
1181                         } else {
1182                                 model_free(race);
1183                         }
1184 #else
1185                         model_free(race);
1186 #endif
1187                 }
1188         }
1189         RESTORE_MODEL_FLAG(old_flag);
1190 }
1191
1192 void raceCheckReadMemop(thread_id_t thread, const void * location, size_t size)
1193 {
1194         int old_flag = GET_MODEL_FLAG;
1195         ENTER_MODEL_FLAG;
1196
1197         ClockVector *currClock = get_execution()->get_cv(thread);
1198         if (currClock == NULL) {
1199                 RESTORE_MODEL_FLAG(old_flag);
1200                 return;
1201         }
1202
1203         bool alreadyHasRace = false;
1204         for (uint i = 0; i < size; i++) {
1205                 uint64_t *shadow = lookupAddressEntry(location);
1206                 uint64_t shadowval = *shadow;
1207                 struct DataRace * race = NULL;
1208
1209                 /* Do full record */
1210                 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1211                         race = fullRaceCheckRead(thread, location, shadow, currClock);
1212                         goto Exit;
1213                 }
1214
1215                 {
1216                         int threadid = id_to_int(thread);
1217                         modelclock_t ourClock = currClock->getClock(thread);
1218
1219                         /* Thread ID is too large or clock is too large. */
1220                         if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1221                                 expandRecord(shadow);
1222                                 race = fullRaceCheckRead(thread, location, shadow, currClock);
1223                                 goto Exit;
1224                         }
1225
1226                         /* Check for datarace against last write. */
1227                         modelclock_t writeClock = WRITEVECTOR(shadowval);
1228                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1229
1230                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1231                                 /* We have a datarace */
1232                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
1233                         }
1234
1235                         modelclock_t readClock = READVECTOR(shadowval);
1236                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1237
1238                         if (clock_may_race(currClock, thread, readClock, readThread)) {
1239                                 /* We don't subsume this read... Have to expand record. */
1240                                 expandRecord(shadow);
1241                                 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
1242                                 record->thread[1] = thread;
1243                                 record->readClock[1] = ourClock;
1244                                 record->numReads++;
1245
1246                                 goto Exit;
1247                         }
1248
1249                         *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
1250                 }
1251 Exit:
1252                 if (race) {
1253 #ifdef REPORT_DATA_RACES
1254                         if (!alreadyHasRace) {
1255                                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1256                                 if (raceset->add(race))
1257                                         assert_race(race);
1258                                 else model_free(race);
1259                         } else {
1260                                 model_free(race);
1261                         }
1262 #else
1263                         model_free(race);
1264 #endif
1265                 }
1266         }
1267         RESTORE_MODEL_FLAG(old_flag);
1268 }
1269
1270 #ifdef COLLECT_STAT
1271 void print_normal_accesses()
1272 {
1273         model_print("store 8  count: %u\n", store8_count);
1274         model_print("store 16 count: %u\n", store16_count);
1275         model_print("store 32 count: %u\n", store32_count);
1276         model_print("store 64 count: %u\n", store64_count);
1277
1278         model_print("load  8  count: %u\n", load8_count);
1279         model_print("load  16 count: %u\n", load16_count);
1280         model_print("load  32 count: %u\n", load32_count);
1281         model_print("load  64 count: %u\n", load64_count);
1282 }
1283 #endif