fix memory leak
[c11tester.git] / datarace.cc
index dd88c2fe98f095d678e55c3dbf1162d98298b9d7..cfbe94bff3553761f81069c40ed63739db578e7a 100644 (file)
@@ -66,12 +66,12 @@ bool hasNonAtomicStore(const void *address) {
        uint64_t shadowval = *shadow;
        if (ISSHORTRECORD(shadowval)) {
                //Do we have a non atomic write with a non-zero clock
-               return ((WRITEVECTOR(shadowval) != 0) && !(ATOMICMASK & shadowval));
+               return !(ATOMICMASK & shadowval);
        } else {
                if (shadowval == 0)
-                       return false;
+                       return true;
                struct RaceRecord *record = (struct RaceRecord *)shadowval;
-               return !record->isAtomic && record->writeClock != 0;
+               return !record->isAtomic;
        }
 }
 
@@ -81,8 +81,10 @@ void setAtomicStoreFlag(const void *address) {
        if (ISSHORTRECORD(shadowval)) {
                *shadow = shadowval | ATOMICMASK;
        } else {
-               if (shadowval == 0)
+               if (shadowval == 0) {
+                       *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
                        return;
+               }
                struct RaceRecord *record = (struct RaceRecord *)shadowval;
                record->isAtomic = 1;
        }
@@ -91,7 +93,7 @@ void setAtomicStoreFlag(const void *address) {
 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
        uint64_t * shadow = lookupAddressEntry(address);
        uint64_t shadowval = *shadow;
-       if (ISSHORTRECORD(shadowval)) {
+       if (ISSHORTRECORD(shadowval) || shadowval == 0) {
                //Do we have a non atomic write with a non-zero clock
                *thread = WRTHREADID(shadowval);
                *clock = WRITEVECTOR(shadowval);
@@ -141,6 +143,8 @@ static void expandRecord(uint64_t *shadow)
                ASSERT(readThread >= 0);
                record->thread[0] = readThread;
                record->readClock[0] = readClock;
+       } else {
+               record->thread = NULL;
        }
        if (shadowval & ATOMICMASK)
                record->isAtomic = 1;
@@ -158,7 +162,6 @@ unsigned int race_hash(struct DataRace *race) {
        return hash;
 }
 
-
 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
        if (r1->numframes != r2->numframes)
                return false;
@@ -208,7 +211,7 @@ void assert_race(struct DataRace *race)
 }
 
 /** This function does race detection for a write on an expanded record. */
-struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
+struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
 {
        struct RaceRecord *record = (struct RaceRecord *)(*shadow);
        struct DataRace * race = NULL;
@@ -230,7 +233,6 @@ struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_
        }
 
        /* Check for datarace against last write. */
-
        {
                modelclock_t writeClock = record->writeClock;
                thread_id_t writeThread = record->writeThread;
@@ -277,11 +279,122 @@ void raceCheckWrite(thread_id_t thread, void *location)
                        goto Exit;
                }
 
+               {
+                       /* Check for datarace against last read. */
+                       modelclock_t readClock = READVECTOR(shadowval);
+                       thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
 
+                       if (clock_may_race(currClock, thread, readClock, readThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
 
                {
-                       /* Check for datarace against last read. */
+                       /* Check for datarace against last write. */
+                       modelclock_t writeClock = WRITEVECTOR(shadowval);
+                       thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+                       if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
+
+ShadowExit:
+               *shadow = ENCODEOP(0, 0, threadid, ourClock);
+       }
+
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+}
+
+/** This function does race detection for a write on an expanded record. */
+struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
+{
+       struct RaceRecord *record = (struct RaceRecord *)(*shadow);
+       struct DataRace * race = NULL;
+
+       if (record->isAtomic)
+               goto Exit;
+
+       /* Check for datarace against last read. */
+
+       for (int i = 0;i < record->numReads;i++) {
+               modelclock_t readClock = record->readClock[i];
+               thread_id_t readThread = record->thread[i];
+
+               /* Note that readClock can't actuall be zero here, so it could be
+                        optimized. */
+
+               if (clock_may_race(currClock, thread, readClock, readThread)) {
+                       /* We have a datarace */
+                       race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+                       goto Exit;
+               }
+       }
+
+       /* Check for datarace against last write. */
+
+       {
+               modelclock_t writeClock = record->writeClock;
+               thread_id_t writeThread = record->writeThread;
+
+               if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                       /* We have a datarace */
+                       race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+                       goto Exit;
+               }
+       }
+Exit:
+       record->numReads = 0;
+       record->writeThread = thread;
+       record->isAtomic = 1;
+       modelclock_t ourClock = currClock->getClock(thread);
+       record->writeClock = ourClock;
+       return race;
+}
+
+/** This function does race detection on a write. */
+void atomraceCheckWrite(thread_id_t thread, void *location)
+{
+       uint64_t *shadow = lookupAddressEntry(location);
+       uint64_t shadowval = *shadow;
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return;
+
+       struct DataRace * race = NULL;
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       {
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
+                       goto Exit;
+               }
 
+               /* Can't race with atomic */
+               if (shadowval & ATOMICMASK)
+                       goto ShadowExit;
+
+               {
+                       /* Check for datarace against last read. */
                        modelclock_t readClock = READVECTOR(shadowval);
                        thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
 
@@ -294,7 +407,6 @@ void raceCheckWrite(thread_id_t thread, void *location)
 
                {
                        /* Check for datarace against last write. */
-
                        modelclock_t writeClock = WRITEVECTOR(shadowval);
                        thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
 
@@ -306,7 +418,7 @@ void raceCheckWrite(thread_id_t thread, void *location)
                }
 
 ShadowExit:
-               *shadow = ENCODEOP(0, 0, threadid, ourClock);
+               *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
        }
 
 Exit:
@@ -328,6 +440,16 @@ void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, Clock
        record->isAtomic = 1;
 }
 
+/** This function does race detection for a write on an expanded record. */
+void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
+       struct RaceRecord *record = (struct RaceRecord *)(*shadow);
+       record->numReads = 0;
+       record->writeThread = thread;
+       modelclock_t ourClock = currClock->getClock(thread);
+       record->writeClock = ourClock;
+       record->isAtomic = 0;
+}
+
 /** This function just updates metadata on atomic write. */
 void recordWrite(thread_id_t thread, void *location) {
        uint64_t *shadow = lookupAddressEntry(location);
@@ -352,7 +474,33 @@ void recordWrite(thread_id_t thread, void *location) {
        *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
 }
 
+/** This function just updates metadata on atomic write. */
+void recordCalloc(void *location, size_t size) {
+       thread_id_t thread = thread_current()->get_id();
+       for(;size != 0;size--) {
+               uint64_t *shadow = lookupAddressEntry(location);
+               uint64_t shadowval = *shadow;
+               ClockVector *currClock = get_execution()->get_cv(thread);
+               /* Do full record */
+               if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+                       fullRecordWriteNonAtomic(thread, location, shadow, currClock);
+                       return;
+               }
+
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       fullRecordWriteNonAtomic(thread, location, shadow, currClock);
+                       return;
+               }
 
+               *shadow = ENCODEOP(0, 0, threadid, ourClock);
+               location = (void *)(((char *) location) + 1);
+       }
+}
 
 /** This function does race detection on a read for an expanded record. */
 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
@@ -395,7 +543,7 @@ struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, ui
        }
 
        if (__builtin_popcount(copytoindex) <= 1) {
-               if (copytoindex == 0) {
+               if (copytoindex == 0 && record->thread == NULL) {
                        int newCapacity = INITCAPACITY;
                        record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
                        record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
@@ -483,3 +631,521 @@ Exit:
                else model_free(race);
        }
 }
+
+
+/** This function does race detection on a read for an expanded record. */
+struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
+{
+       struct RaceRecord *record = (struct RaceRecord *) (*shadow);
+       struct DataRace * race = NULL;
+       /* Check for datarace against last write. */
+       if (record->isAtomic)
+               return NULL;
+
+       modelclock_t writeClock = record->writeClock;
+       thread_id_t writeThread = record->writeThread;
+
+       if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+               /* We have a datarace */
+               race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+       }
+       return race;
+}
+
+/** This function does race detection on a read. */
+void atomraceCheckRead(thread_id_t thread, const void *location)
+{
+       uint64_t *shadow = lookupAddressEntry(location);
+       uint64_t shadowval = *shadow;
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return;
+
+       struct DataRace * race = NULL;
+
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = atomfullRaceCheckRead(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       if (shadowval & ATOMICMASK)
+               return;
+
+       {
+               /* Check for datarace against last write. */
+               modelclock_t writeClock = WRITEVECTOR(shadowval);
+               thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+               if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                       /* We have a datarace */
+                       race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+                       goto Exit;
+               }
+
+
+       }
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+}
+
+static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
+{
+       uint64_t *shadow = lookupAddressEntry(location);
+       uint64_t shadowval = *shadow;
+
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return shadow;
+
+       struct DataRace * race = NULL;
+
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = fullRaceCheckRead(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       {
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       race = fullRaceCheckRead(thread, location, shadow, currClock);
+                       goto Exit;
+               }
+
+               /* Check for datarace against last write. */
+               modelclock_t writeClock = WRITEVECTOR(shadowval);
+               thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+               if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                       /* We have a datarace */
+                       race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+               }
+
+               modelclock_t readClock = READVECTOR(shadowval);
+               thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+               if (clock_may_race(currClock, thread, readClock, readThread)) {
+                       /* We don't subsume this read... Have to expand record. */
+                       expandRecord(shadow);
+                       struct RaceRecord *record = (struct RaceRecord *) (*shadow);
+                       record->thread[1] = thread;
+                       record->readClock[1] = ourClock;
+                       record->numReads++;
+
+                       goto Exit;
+               }
+
+               *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
+
+               *old_val = shadowval;
+               *new_val = *shadow;
+       }
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+
+       return shadow;
+}
+
+static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
+       uint64_t *shadow = lookupAddressEntry(location);
+
+       uint64_t shadowval = *shadow;
+
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return;
+
+       struct DataRace * race = NULL;
+
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = fullRaceCheckRead(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       {
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       race = fullRaceCheckRead(thread, location, shadow, currClock);
+                       goto Exit;
+               }
+
+               /* Check for datarace against last write. */
+               modelclock_t writeClock = WRITEVECTOR(shadowval);
+               thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+               if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                       /* We have a datarace */
+                       race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+               }
+
+               modelclock_t readClock = READVECTOR(shadowval);
+               thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+               if (clock_may_race(currClock, thread, readClock, readThread)) {
+                       /* We don't subsume this read... Have to expand record. */
+                       expandRecord(shadow);
+                       struct RaceRecord *record = (struct RaceRecord *) (*shadow);
+                       record->thread[1] = thread;
+                       record->readClock[1] = ourClock;
+                       record->numReads++;
+
+                       goto Exit;
+               }
+
+               *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
+       }
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+}
+
+void raceCheckRead64(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
+       if (CHECKBOUNDARY(location, 7)) {
+               if (shadow[1]==old_shadowval)
+                       shadow[1] = new_shadowval;
+               else goto L1;
+               if (shadow[2]==old_shadowval)
+                       shadow[2] = new_shadowval;
+               else goto L2;
+               if (shadow[3]==old_shadowval)
+                       shadow[3] = new_shadowval;
+               else goto L3;
+               if (shadow[4]==old_shadowval)
+                       shadow[4] = new_shadowval;
+               else goto L4;
+               if (shadow[5]==old_shadowval)
+                       shadow[5] = new_shadowval;
+               else goto L5;
+               if (shadow[6]==old_shadowval)
+                       shadow[6] = new_shadowval;
+               else goto L6;
+               if (shadow[7]==old_shadowval)
+                       shadow[7] = new_shadowval;
+               else goto L7;
+               return;
+       }
+
+L1:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+L2:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
+L3:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+L4:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
+L5:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
+L6:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
+L7:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
+}
+
+void raceCheckRead32(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
+       if (CHECKBOUNDARY(location, 3)) {
+               if (shadow[1]==old_shadowval)
+                       shadow[1] = new_shadowval;
+               else goto L1;
+               if (shadow[2]==old_shadowval)
+                       shadow[2] = new_shadowval;
+               else goto L2;
+               if (shadow[3]==old_shadowval)
+                       shadow[3] = new_shadowval;
+               else goto L3;
+               return;
+       }
+
+L1:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+L2:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
+L3:
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+}
+
+void raceCheckRead16(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+
+       uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
+       if (CHECKBOUNDARY(location, 1)) {
+               if (shadow[1]==old_shadowval) {
+                       shadow[1] = new_shadowval;
+                       return;
+               }
+       }
+       raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+}
+
+void raceCheckRead8(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
+}
+
+static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
+{
+       uint64_t *shadow = lookupAddressEntry(location);
+       uint64_t shadowval = *shadow;
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return shadow;
+
+       struct DataRace * race = NULL;
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = fullRaceCheckWrite(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       {
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       race = fullRaceCheckWrite(thread, location, shadow, currClock);
+                       goto Exit;
+               }
+
+               {
+                       /* Check for datarace against last read. */
+                       modelclock_t readClock = READVECTOR(shadowval);
+                       thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+                       if (clock_may_race(currClock, thread, readClock, readThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
+
+               {
+                       /* Check for datarace against last write. */
+                       modelclock_t writeClock = WRITEVECTOR(shadowval);
+                       thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+                       if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
+
+ShadowExit:
+               *shadow = ENCODEOP(0, 0, threadid, ourClock);
+
+               *old_val = shadowval;
+               *new_val = *shadow;
+       }
+
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+
+       return shadow;
+}
+
+static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
+       uint64_t *shadow = lookupAddressEntry(location);
+
+       uint64_t shadowval = *shadow;
+
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return;
+
+       struct DataRace * race = NULL;
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = fullRaceCheckWrite(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       {
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       race = fullRaceCheckWrite(thread, location, shadow, currClock);
+                       goto Exit;
+               }
+
+               {
+                       /* Check for datarace against last read. */
+                       modelclock_t readClock = READVECTOR(shadowval);
+                       thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+                       if (clock_may_race(currClock, thread, readClock, readThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
+
+               {
+                       /* Check for datarace against last write. */
+                       modelclock_t writeClock = WRITEVECTOR(shadowval);
+                       thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+                       if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
+
+ShadowExit:
+               *shadow = ENCODEOP(0, 0, threadid, ourClock);
+       }
+
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+}
+
+void raceCheckWrite64(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
+       if (CHECKBOUNDARY(location, 7)) {
+               if (shadow[1]==old_shadowval)
+                       shadow[1] = new_shadowval;
+               else goto L1;
+               if (shadow[2]==old_shadowval)
+                       shadow[2] = new_shadowval;
+               else goto L2;
+               if (shadow[3]==old_shadowval)
+                       shadow[3] = new_shadowval;
+               else goto L3;
+               if (shadow[4]==old_shadowval)
+                       shadow[4] = new_shadowval;
+               else goto L4;
+               if (shadow[5]==old_shadowval)
+                       shadow[5] = new_shadowval;
+               else goto L5;
+               if (shadow[6]==old_shadowval)
+                       shadow[6] = new_shadowval;
+               else goto L6;
+               if (shadow[7]==old_shadowval)
+                       shadow[7] = new_shadowval;
+               else goto L7;
+               return;
+       }
+
+L1:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+L2:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
+L3:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+L4:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
+L5:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
+L6:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
+L7:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
+}
+
+void raceCheckWrite32(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
+       if (CHECKBOUNDARY(location, 3)) {
+               if (shadow[1]==old_shadowval)
+                       shadow[1] = new_shadowval;
+               else goto L1;
+               if (shadow[2]==old_shadowval)
+                       shadow[2] = new_shadowval;
+               else goto L2;
+               if (shadow[3]==old_shadowval)
+                       shadow[3] = new_shadowval;
+               else goto L3;
+               return;
+       }
+
+L1:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+L2:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
+L3:
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+}
+
+void raceCheckWrite16(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
+       if (CHECKBOUNDARY(location, 1)) {
+               if (shadow[1]==old_shadowval) {
+                       shadow[1] = new_shadowval;
+                       return;
+               }
+       }
+       raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+}
+
+void raceCheckWrite8(thread_id_t thread, const void *location)
+{
+       uint64_t old_shadowval, new_shadowval;
+       old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+
+       raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
+}