fix memory leak
[c11tester.git] / datarace.cc
index d15ca50cf9d97de15a06c80f881735f1d297566f..cfbe94bff3553761f81069c40ed63739db578e7a 100644 (file)
@@ -60,6 +60,50 @@ static uint64_t * lookupAddressEntry(const void *address)
        return &basetable->array[((uintptr_t)address) & MASK16BIT];
 }
 
+
+bool hasNonAtomicStore(const void *address) {
+       uint64_t * shadow = lookupAddressEntry(address);
+       uint64_t shadowval = *shadow;
+       if (ISSHORTRECORD(shadowval)) {
+               //Do we have a non atomic write with a non-zero clock
+               return !(ATOMICMASK & shadowval);
+       } else {
+               if (shadowval == 0)
+                       return true;
+               struct RaceRecord *record = (struct RaceRecord *)shadowval;
+               return !record->isAtomic;
+       }
+}
+
+void setAtomicStoreFlag(const void *address) {
+       uint64_t * shadow = lookupAddressEntry(address);
+       uint64_t shadowval = *shadow;
+       if (ISSHORTRECORD(shadowval)) {
+               *shadow = shadowval | ATOMICMASK;
+       } else {
+               if (shadowval == 0) {
+                       *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
+                       return;
+               }
+               struct RaceRecord *record = (struct RaceRecord *)shadowval;
+               record->isAtomic = 1;
+       }
+}
+
+void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
+       uint64_t * shadow = lookupAddressEntry(address);
+       uint64_t shadowval = *shadow;
+       if (ISSHORTRECORD(shadowval) || shadowval == 0) {
+               //Do we have a non atomic write with a non-zero clock
+               *thread = WRTHREADID(shadowval);
+               *clock = WRITEVECTOR(shadowval);
+       } else {
+               struct RaceRecord *record = (struct RaceRecord *)shadowval;
+               *thread = record->writeThread;
+               *clock = record->writeClock;
+       }
+}
+
 /**
  * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
  * to check the potential for a data race.
@@ -93,14 +137,17 @@ static void expandRecord(uint64_t *shadow)
        record->writeClock = writeClock;
 
        if (readClock != 0) {
-               record->capacity = INITCAPACITY;
-               record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * record->capacity);
-               record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * record->capacity);
+               record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
+               record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
                record->numReads = 1;
                ASSERT(readThread >= 0);
                record->thread[0] = readThread;
                record->readClock[0] = readClock;
+       } else {
+               record->thread = NULL;
        }
+       if (shadowval & ATOMICMASK)
+               record->isAtomic = 1;
        *shadow = (uint64_t) record;
 }
 
@@ -115,7 +162,6 @@ unsigned int race_hash(struct DataRace *race) {
        return hash;
 }
 
-
 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
        if (r1->numframes != r2->numframes)
                return false;
@@ -149,11 +195,11 @@ static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldc
  */
 void assert_race(struct DataRace *race)
 {
-       model_print("At location: \n");
+       model_print("Race detected at location: \n");
        backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
-       model_print("Data race detected @ address %p:\n"
+       model_print("\nData race detected @ address %p:\n"
                                                        "    Access 1: %5s in thread %2d @ clock %3u\n"
-                                                       "    Access 2: %5s in thread %2d @ clock %3u",
+                                                       "    Access 2: %5s in thread %2d @ clock %3u\n\n",
                                                        race->address,
                                                        race->isoldwrite ? "write" : "read",
                                                        id_to_int(race->oldthread),
@@ -165,7 +211,7 @@ void assert_race(struct DataRace *race)
 }
 
 /** This function does race detection for a write on an expanded record. */
-struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
+struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
 {
        struct RaceRecord *record = (struct RaceRecord *)(*shadow);
        struct DataRace * race = NULL;
@@ -187,7 +233,6 @@ struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_
        }
 
        /* Check for datarace against last write. */
-
        {
                modelclock_t writeClock = record->writeClock;
                thread_id_t writeThread = record->writeThread;
@@ -201,6 +246,7 @@ struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_
 Exit:
        record->numReads = 0;
        record->writeThread = thread;
+       record->isAtomic = 0;
        modelclock_t ourClock = currClock->getClock(thread);
        record->writeClock = ourClock;
        return race;
@@ -212,6 +258,9 @@ void raceCheckWrite(thread_id_t thread, void *location)
        uint64_t *shadow = lookupAddressEntry(location);
        uint64_t shadowval = *shadow;
        ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return;
+
        struct DataRace * race = NULL;
        /* Do full record */
        if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
@@ -230,11 +279,122 @@ void raceCheckWrite(thread_id_t thread, void *location)
                        goto Exit;
                }
 
+               {
+                       /* Check for datarace against last read. */
+                       modelclock_t readClock = READVECTOR(shadowval);
+                       thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
 
+                       if (clock_may_race(currClock, thread, readClock, readThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
 
                {
-                       /* Check for datarace against last read. */
+                       /* Check for datarace against last write. */
+                       modelclock_t writeClock = WRITEVECTOR(shadowval);
+                       thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+                       if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
+
+ShadowExit:
+               *shadow = ENCODEOP(0, 0, threadid, ourClock);
+       }
+
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+}
+
+/** This function does race detection for a write on an expanded record. */
+struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
+{
+       struct RaceRecord *record = (struct RaceRecord *)(*shadow);
+       struct DataRace * race = NULL;
+
+       if (record->isAtomic)
+               goto Exit;
 
+       /* Check for datarace against last read. */
+
+       for (int i = 0;i < record->numReads;i++) {
+               modelclock_t readClock = record->readClock[i];
+               thread_id_t readThread = record->thread[i];
+
+               /* Note that readClock can't actuall be zero here, so it could be
+                        optimized. */
+
+               if (clock_may_race(currClock, thread, readClock, readThread)) {
+                       /* We have a datarace */
+                       race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+                       goto Exit;
+               }
+       }
+
+       /* Check for datarace against last write. */
+
+       {
+               modelclock_t writeClock = record->writeClock;
+               thread_id_t writeThread = record->writeThread;
+
+               if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                       /* We have a datarace */
+                       race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+                       goto Exit;
+               }
+       }
+Exit:
+       record->numReads = 0;
+       record->writeThread = thread;
+       record->isAtomic = 1;
+       modelclock_t ourClock = currClock->getClock(thread);
+       record->writeClock = ourClock;
+       return race;
+}
+
+/** This function does race detection on a write. */
+void atomraceCheckWrite(thread_id_t thread, void *location)
+{
+       uint64_t *shadow = lookupAddressEntry(location);
+       uint64_t shadowval = *shadow;
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return;
+
+       struct DataRace * race = NULL;
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       {
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
+                       goto Exit;
+               }
+
+               /* Can't race with atomic */
+               if (shadowval & ATOMICMASK)
+                       goto ShadowExit;
+
+               {
+                       /* Check for datarace against last read. */
                        modelclock_t readClock = READVECTOR(shadowval);
                        thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
 
@@ -247,7 +407,6 @@ void raceCheckWrite(thread_id_t thread, void *location)
 
                {
                        /* Check for datarace against last write. */
-
                        modelclock_t writeClock = WRITEVECTOR(shadowval);
                        thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
 
@@ -259,7 +418,7 @@ void raceCheckWrite(thread_id_t thread, void *location)
                }
 
 ShadowExit:
-               *shadow = ENCODEOP(0, 0, threadid, ourClock);
+               *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
        }
 
 Exit:
@@ -270,6 +429,79 @@ Exit:
                else model_free(race);
        }
 }
+
+/** This function does race detection for a write on an expanded record. */
+void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
+       struct RaceRecord *record = (struct RaceRecord *)(*shadow);
+       record->numReads = 0;
+       record->writeThread = thread;
+       modelclock_t ourClock = currClock->getClock(thread);
+       record->writeClock = ourClock;
+       record->isAtomic = 1;
+}
+
+/** This function does race detection for a write on an expanded record. */
+void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
+       struct RaceRecord *record = (struct RaceRecord *)(*shadow);
+       record->numReads = 0;
+       record->writeThread = thread;
+       modelclock_t ourClock = currClock->getClock(thread);
+       record->writeClock = ourClock;
+       record->isAtomic = 0;
+}
+
+/** This function just updates metadata on atomic write. */
+void recordWrite(thread_id_t thread, void *location) {
+       uint64_t *shadow = lookupAddressEntry(location);
+       uint64_t shadowval = *shadow;
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               fullRecordWrite(thread, location, shadow, currClock);
+               return;
+       }
+
+       int threadid = id_to_int(thread);
+       modelclock_t ourClock = currClock->getClock(thread);
+
+       /* Thread ID is too large or clock is too large. */
+       if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+               expandRecord(shadow);
+               fullRecordWrite(thread, location, shadow, currClock);
+               return;
+       }
+
+       *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
+}
+
+/** This function just updates metadata on atomic write. */
+void recordCalloc(void *location, size_t size) {
+       thread_id_t thread = thread_current()->get_id();
+       for(;size != 0;size--) {
+               uint64_t *shadow = lookupAddressEntry(location);
+               uint64_t shadowval = *shadow;
+               ClockVector *currClock = get_execution()->get_cv(thread);
+               /* Do full record */
+               if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+                       fullRecordWriteNonAtomic(thread, location, shadow, currClock);
+                       return;
+               }
+
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       fullRecordWriteNonAtomic(thread, location, shadow, currClock);
+                       return;
+               }
+
+               *shadow = ENCODEOP(0, 0, threadid, ourClock);
+               location = (void *)(((char *) location) + 1);
+       }
+}
+
 /** This function does race detection on a read for an expanded record. */
 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
 {
@@ -310,23 +542,21 @@ struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, ui
                }
        }
 
-       if (copytoindex >= record->capacity) {
-               if (record->capacity == 0) {
+       if (__builtin_popcount(copytoindex) <= 1) {
+               if (copytoindex == 0 && record->thread == NULL) {
                        int newCapacity = INITCAPACITY;
                        record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
                        record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
-                       record->capacity = newCapacity;
-               } else {
-                       int newCapacity = record->capacity * 2;
+               } else if (copytoindex>=INITCAPACITY) {
+                       int newCapacity = copytoindex * 2;
                        thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
                        modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
-                       std::memcpy(newthread, record->thread, record->capacity * sizeof(thread_id_t));
-                       std::memcpy(newreadClock, record->readClock, record->capacity * sizeof(modelclock_t));
+                       std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
+                       std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
                        snapshot_free(record->readClock);
                        snapshot_free(record->thread);
                        record->readClock = newreadClock;
                        record->thread = newthread;
-                       record->capacity = newCapacity;
                }
        }
 
@@ -345,6 +575,9 @@ void raceCheckRead(thread_id_t thread, const void *location)
        uint64_t *shadow = lookupAddressEntry(location);
        uint64_t shadowval = *shadow;
        ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return;
+
        struct DataRace * race = NULL;
 
        /* Do full record */
@@ -388,8 +621,429 @@ ShadowExit:
                        }
                }
 
-               *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock);
+               *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
+       }
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+}
+
+
+/** This function does race detection on a read for an expanded record. */
+struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
+{
+       struct RaceRecord *record = (struct RaceRecord *) (*shadow);
+       struct DataRace * race = NULL;
+       /* Check for datarace against last write. */
+       if (record->isAtomic)
+               return NULL;
+
+       modelclock_t writeClock = record->writeClock;
+       thread_id_t writeThread = record->writeThread;
+
+       if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+               /* We have a datarace */
+               race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+       }
+       return race;
+}
+
+/** This function does race detection on a read. */
+void atomraceCheckRead(thread_id_t thread, const void *location)
+{
+       uint64_t *shadow = lookupAddressEntry(location);
+       uint64_t shadowval = *shadow;
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return;
+
+       struct DataRace * race = NULL;
+
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = atomfullRaceCheckRead(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       if (shadowval & ATOMICMASK)
+               return;
+
+       {
+               /* Check for datarace against last write. */
+               modelclock_t writeClock = WRITEVECTOR(shadowval);
+               thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+               if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                       /* We have a datarace */
+                       race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+                       goto Exit;
+               }
+
+
+       }
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+}
+
+static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
+{
+       uint64_t *shadow = lookupAddressEntry(location);
+       uint64_t shadowval = *shadow;
+
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return shadow;
+
+       struct DataRace * race = NULL;
+
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = fullRaceCheckRead(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       {
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       race = fullRaceCheckRead(thread, location, shadow, currClock);
+                       goto Exit;
+               }
+
+               /* Check for datarace against last write. */
+               modelclock_t writeClock = WRITEVECTOR(shadowval);
+               thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+               if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                       /* We have a datarace */
+                       race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+               }
+
+               modelclock_t readClock = READVECTOR(shadowval);
+               thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+               if (clock_may_race(currClock, thread, readClock, readThread)) {
+                       /* We don't subsume this read... Have to expand record. */
+                       expandRecord(shadow);
+                       struct RaceRecord *record = (struct RaceRecord *) (*shadow);
+                       record->thread[1] = thread;
+                       record->readClock[1] = ourClock;
+                       record->numReads++;
+
+                       goto Exit;
+               }
+
+               *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
+
+               *old_val = shadowval;
+               *new_val = *shadow;
+       }
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+
+       return shadow;
+}
+
+static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
+       uint64_t *shadow = lookupAddressEntry(location);
+
+       uint64_t shadowval = *shadow;
+
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return;
+
+       struct DataRace * race = NULL;
+
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = fullRaceCheckRead(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       {
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       race = fullRaceCheckRead(thread, location, shadow, currClock);
+                       goto Exit;
+               }
+
+               /* Check for datarace against last write. */
+               modelclock_t writeClock = WRITEVECTOR(shadowval);
+               thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+               if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                       /* We have a datarace */
+                       race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+               }
+
+               modelclock_t readClock = READVECTOR(shadowval);
+               thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+               if (clock_may_race(currClock, thread, readClock, readThread)) {
+                       /* We don't subsume this read... Have to expand record. */
+                       expandRecord(shadow);
+                       struct RaceRecord *record = (struct RaceRecord *) (*shadow);
+                       record->thread[1] = thread;
+                       record->readClock[1] = ourClock;
+                       record->numReads++;
+
+                       goto Exit;
+               }
+
+               *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
+       }
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+}
+
+void raceCheckRead64(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
+       if (CHECKBOUNDARY(location, 7)) {
+               if (shadow[1]==old_shadowval)
+                       shadow[1] = new_shadowval;
+               else goto L1;
+               if (shadow[2]==old_shadowval)
+                       shadow[2] = new_shadowval;
+               else goto L2;
+               if (shadow[3]==old_shadowval)
+                       shadow[3] = new_shadowval;
+               else goto L3;
+               if (shadow[4]==old_shadowval)
+                       shadow[4] = new_shadowval;
+               else goto L4;
+               if (shadow[5]==old_shadowval)
+                       shadow[5] = new_shadowval;
+               else goto L5;
+               if (shadow[6]==old_shadowval)
+                       shadow[6] = new_shadowval;
+               else goto L6;
+               if (shadow[7]==old_shadowval)
+                       shadow[7] = new_shadowval;
+               else goto L7;
+               return;
+       }
+
+L1:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+L2:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
+L3:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+L4:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
+L5:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
+L6:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
+L7:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
+}
+
+void raceCheckRead32(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
+       if (CHECKBOUNDARY(location, 3)) {
+               if (shadow[1]==old_shadowval)
+                       shadow[1] = new_shadowval;
+               else goto L1;
+               if (shadow[2]==old_shadowval)
+                       shadow[2] = new_shadowval;
+               else goto L2;
+               if (shadow[3]==old_shadowval)
+                       shadow[3] = new_shadowval;
+               else goto L3;
+               return;
+       }
+
+L1:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+L2:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
+L3:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+}
+
+void raceCheckRead16(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+
+       uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
+       if (CHECKBOUNDARY(location, 1)) {
+               if (shadow[1]==old_shadowval) {
+                       shadow[1] = new_shadowval;
+                       return;
+               }
+       }
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+}
+
+void raceCheckRead8(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
+}
+
+static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
+{
+       uint64_t *shadow = lookupAddressEntry(location);
+       uint64_t shadowval = *shadow;
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return shadow;
+
+       struct DataRace * race = NULL;
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = fullRaceCheckWrite(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       {
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       race = fullRaceCheckWrite(thread, location, shadow, currClock);
+                       goto Exit;
+               }
+
+               {
+                       /* Check for datarace against last read. */
+                       modelclock_t readClock = READVECTOR(shadowval);
+                       thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+                       if (clock_may_race(currClock, thread, readClock, readThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
+
+               {
+                       /* Check for datarace against last write. */
+                       modelclock_t writeClock = WRITEVECTOR(shadowval);
+                       thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+                       if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
+
+ShadowExit:
+               *shadow = ENCODEOP(0, 0, threadid, ourClock);
+
+               *old_val = shadowval;
+               *new_val = *shadow;
+       }
+
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+
+       return shadow;
+}
+
+static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
+       uint64_t *shadow = lookupAddressEntry(location);
+
+       uint64_t shadowval = *shadow;
+
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return;
+
+       struct DataRace * race = NULL;
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = fullRaceCheckWrite(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       {
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       race = fullRaceCheckWrite(thread, location, shadow, currClock);
+                       goto Exit;
+               }
+
+               {
+                       /* Check for datarace against last read. */
+                       modelclock_t readClock = READVECTOR(shadowval);
+                       thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+                       if (clock_may_race(currClock, thread, readClock, readThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
+
+               {
+                       /* Check for datarace against last write. */
+                       modelclock_t writeClock = WRITEVECTOR(shadowval);
+                       thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+                       if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
+
+ShadowExit:
+               *shadow = ENCODEOP(0, 0, threadid, ourClock);
        }
+
 Exit:
        if (race) {
                race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
@@ -398,3 +1052,100 @@ Exit:
                else model_free(race);
        }
 }
+
+void raceCheckWrite64(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
+       if (CHECKBOUNDARY(location, 7)) {
+               if (shadow[1]==old_shadowval)
+                       shadow[1] = new_shadowval;
+               else goto L1;
+               if (shadow[2]==old_shadowval)
+                       shadow[2] = new_shadowval;
+               else goto L2;
+               if (shadow[3]==old_shadowval)
+                       shadow[3] = new_shadowval;
+               else goto L3;
+               if (shadow[4]==old_shadowval)
+                       shadow[4] = new_shadowval;
+               else goto L4;
+               if (shadow[5]==old_shadowval)
+                       shadow[5] = new_shadowval;
+               else goto L5;
+               if (shadow[6]==old_shadowval)
+                       shadow[6] = new_shadowval;
+               else goto L6;
+               if (shadow[7]==old_shadowval)
+                       shadow[7] = new_shadowval;
+               else goto L7;
+               return;
+       }
+
+L1:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+L2:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
+L3:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+L4:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
+L5:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
+L6:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
+L7:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
+}
+
+void raceCheckWrite32(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
+       if (CHECKBOUNDARY(location, 3)) {
+               if (shadow[1]==old_shadowval)
+                       shadow[1] = new_shadowval;
+               else goto L1;
+               if (shadow[2]==old_shadowval)
+                       shadow[2] = new_shadowval;
+               else goto L2;
+               if (shadow[3]==old_shadowval)
+                       shadow[3] = new_shadowval;
+               else goto L3;
+               return;
+       }
+
+L1:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+L2:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
+L3:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+}
+
+void raceCheckWrite16(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
+       if (CHECKBOUNDARY(location, 1)) {
+               if (shadow[1]==old_shadowval) {
+                       shadow[1] = new_shadowval;
+                       return;
+               }
+       }
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+}
+
+void raceCheckWrite8(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
+}