Add datarace support for atomics and calloc
authorbdemsky <bdemsky@uci.edu>
Wed, 20 Nov 2019 19:50:00 +0000 (11:50 -0800)
committerbdemsky <bdemsky@uci.edu>
Wed, 20 Nov 2019 19:50:00 +0000 (11:50 -0800)
cmodelint.cc
datarace.cc
datarace.h
mymemory.cc

index 8774c5b3f6a0124cbc3079d7de5bfcee79b5505d..1a03153fbeb908fe782a3cce80d7fcb7074ecab5 100644 (file)
@@ -113,7 +113,7 @@ VOLATILELOAD(64)
                *((volatile uint ## size ## _t *)obj) = val;            \
                thread_id_t tid = thread_current()->get_id();           \
                for(int i=0;i < size / 8;i++) {                         \
                *((volatile uint ## size ## _t *)obj) = val;            \
                thread_id_t tid = thread_current()->get_id();           \
                for(int i=0;i < size / 8;i++) {                         \
-                       recordWrite(tid, (void *)(((char *)obj)+i));          \
+                       atomraceCheckWrite(tid, (void *)(((char *)obj)+i));          \
                }                                                       \
        }
 
                }                                                       \
        }
 
@@ -130,7 +130,7 @@ VOLATILESTORE(64)
                *((volatile uint ## size ## _t *)obj) = val;                                 \
                thread_id_t tid = thread_current()->get_id();           \
                for(int i=0;i < size / 8;i++) {                       \
                *((volatile uint ## size ## _t *)obj) = val;                                 \
                thread_id_t tid = thread_current()->get_id();           \
                for(int i=0;i < size / 8;i++) {                       \
-                       recordWrite(tid, (void *)(((char *)obj)+i));          \
+                       atomraceCheckWrite(tid, (void *)(((char *)obj)+i));          \
                }                                                       \
        }
 
                }                                                       \
        }
 
@@ -143,8 +143,13 @@ CDSATOMICINT(64)
 #define CDSATOMICLOAD(size)                                             \
        uint ## size ## _t cds_atomic_load ## size(void * obj, int atomic_index, const char * position) { \
                ensureModel();                                                      \
 #define CDSATOMICLOAD(size)                                             \
        uint ## size ## _t cds_atomic_load ## size(void * obj, int atomic_index, const char * position) { \
                ensureModel();                                                      \
-               return (uint ## size ## _t)model->switch_to_master( \
+               uint ## size ## _t val = (uint ## size ## _t)model->switch_to_master( \
                        new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj)); \
                        new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj)); \
+               thread_id_t tid = thread_current()->get_id();           \
+               for(int i=0;i < size / 8;i++) {                         \
+                       atomraceCheckRead(tid, (void *)(((char *)obj)+i));    \
+               }                                                       \
+               return val; \
        }
 
 CDSATOMICLOAD(8)
        }
 
 CDSATOMICLOAD(8)
@@ -160,7 +165,7 @@ CDSATOMICLOAD(64)
                *((volatile uint ## size ## _t *)obj) = val;                     \
                thread_id_t tid = thread_current()->get_id();           \
                for(int i=0;i < size / 8;i++) {                       \
                *((volatile uint ## size ## _t *)obj) = val;                     \
                thread_id_t tid = thread_current()->get_id();           \
                for(int i=0;i < size / 8;i++) {                       \
-                       recordWrite(tid, (void *)(((char *)obj)+i));          \
+                       atomraceCheckWrite(tid, (void *)(((char *)obj)+i));          \
                }                                                       \
        }
 
                }                                                       \
        }
 
@@ -180,9 +185,10 @@ CDSATOMICSTORE(64)
                *((volatile uint ## size ## _t *)addr) = _copy;                  \
                thread_id_t tid = thread_current()->get_id();           \
                for(int i=0;i < size / 8;i++) {                       \
                *((volatile uint ## size ## _t *)addr) = _copy;                  \
                thread_id_t tid = thread_current()->get_id();           \
                for(int i=0;i < size / 8;i++) {                       \
+                       atomraceCheckRead(tid,  (void *)(((char *)addr)+i));  \
                        recordWrite(tid, (void *)(((char *)addr)+i));         \
                }                                                       \
                        recordWrite(tid, (void *)(((char *)addr)+i));         \
                }                                                       \
-               return _old;                                                          \
+               return _old;                                            \
        })
 
 // cds atomic exchange
        })
 
 // cds atomic exchange
@@ -336,45 +342,45 @@ void cds_atomic_thread_fence(int atomic_index, const char * position) {
 void cds_func_entry(const char * funcName) {
        ensureModel();
        /*
 void cds_func_entry(const char * funcName) {
        ensureModel();
        /*
-       Thread * th = thread_current();
-       uint32_t func_id;
-
-       ModelHistory *history = model->get_history();
-       if ( !history->getFuncMap()->contains(funcName) ) {
-               // add func id to func map
-               func_id = history->get_func_counter();
-               history->incr_func_counter();
-               history->getFuncMap()->put(funcName, func_id);
-
-               // add func id to reverse func map
-               ModelVector<const char *> * func_map_rev = history->getFuncMapRev();
-               if ( func_map_rev->size() <= func_id )
-                       func_map_rev->resize( func_id + 1 );
-               func_map_rev->at(func_id) = funcName;
-       } else {
-               func_id = history->getFuncMap()->get(funcName);
-       }
-
-       history->enter_function(func_id, th->get_id());
-*/
+          Thread * th = thread_current();
+          uint32_t func_id;
+
+          ModelHistory *history = model->get_history();
+          if ( !history->getFuncMap()->contains(funcName) ) {
+               // add func id to func map
+               func_id = history->get_func_counter();
+               history->incr_func_counter();
+               history->getFuncMap()->put(funcName, func_id);
+
+               // add func id to reverse func map
+               ModelVector<const char *> * func_map_rev = history->getFuncMapRev();
+               if ( func_map_rev->size() <= func_id )
+                       func_map_rev->resize( func_id + 1 );
+               func_map_rev->at(func_id) = funcName;
+          } else {
+               func_id = history->getFuncMap()->get(funcName);
+          }
+
+          history->enter_function(func_id, th->get_id());
+        */
 }
 
 void cds_func_exit(const char * funcName) {
        ensureModel();
 
 /*     Thread * th = thread_current();
 }
 
 void cds_func_exit(const char * funcName) {
        ensureModel();
 
 /*     Thread * th = thread_current();
-       uint32_t func_id;
+        uint32_t func_id;
 
 
-       ModelHistory *history = model->get_history();
-       func_id = history->getFuncMap()->get(funcName);
+        ModelHistory *history = model->get_history();
+        func_id = history->getFuncMap()->get(funcName);
 
 
       * func_id not found; this could happen in the case where a function calls cds_func_entry
       * when the model has been defined yet, but then an atomic inside the function initializes
       * the model. And then cds_func_exit is called upon the function exiting.
       *
-       if (func_id == 0)
-               return;
+ * func_id not found; this could happen in the case where a function calls cds_func_entry
+ * when the model has been defined yet, but then an atomic inside the function initializes
+ * the model. And then cds_func_exit is called upon the function exiting.
+ *
+        if (func_id == 0)
+                return;
 
 
-       history->exit_function(func_id, th->get_id());
-*/
+        history->exit_function(func_id, th->get_id());
+ */
 }
 }
index dd88c2fe98f095d678e55c3dbf1162d98298b9d7..be0fc3b6a3fee6803f95d5f6127706cdabb24c27 100644 (file)
@@ -318,6 +318,123 @@ Exit:
        }
 }
 
        }
 }
 
+
+/** This function does race detection for a write on an expanded record. */
+struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
+{
+       struct RaceRecord *record = (struct RaceRecord *)(*shadow);
+       struct DataRace * race = NULL;
+
+       if (record->isAtomic)
+               goto Exit;
+
+       /* Check for datarace against last read. */
+
+       for (int i = 0;i < record->numReads;i++) {
+               modelclock_t readClock = record->readClock[i];
+               thread_id_t readThread = record->thread[i];
+
+               /* Note that readClock can't actuall be zero here, so it could be
+                        optimized. */
+
+               if (clock_may_race(currClock, thread, readClock, readThread)) {
+                       /* We have a datarace */
+                       race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+                       goto Exit;
+               }
+       }
+
+       /* Check for datarace against last write. */
+
+       {
+               modelclock_t writeClock = record->writeClock;
+               thread_id_t writeThread = record->writeThread;
+
+               if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                       /* We have a datarace */
+                       race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+                       goto Exit;
+               }
+       }
+Exit:
+       record->numReads = 0;
+       record->writeThread = thread;
+       record->isAtomic = 1;
+       modelclock_t ourClock = currClock->getClock(thread);
+       record->writeClock = ourClock;
+       return race;
+}
+
+/** This function does race detection on a write. */
+void atomraceCheckWrite(thread_id_t thread, void *location)
+{
+       uint64_t *shadow = lookupAddressEntry(location);
+       uint64_t shadowval = *shadow;
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return;
+
+       struct DataRace * race = NULL;
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       {
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
+                       goto Exit;
+               }
+
+               /* Can't race with atomic */
+               if (shadowval & ATOMICMASK)
+                       goto ShadowExit;
+
+               {
+                       /* Check for datarace against last read. */
+
+                       modelclock_t readClock = READVECTOR(shadowval);
+                       thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+                       if (clock_may_race(currClock, thread, readClock, readThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
+
+               {
+                       /* Check for datarace against last write. */
+
+                       modelclock_t writeClock = WRITEVECTOR(shadowval);
+                       thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+                       if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                               /* We have a datarace */
+                               race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+                               goto ShadowExit;
+                       }
+               }
+
+ShadowExit:
+               *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
+       }
+
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+}
+
 /** This function does race detection for a write on an expanded record. */
 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
        struct RaceRecord *record = (struct RaceRecord *)(*shadow);
 /** This function does race detection for a write on an expanded record. */
 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
        struct RaceRecord *record = (struct RaceRecord *)(*shadow);
@@ -328,6 +445,16 @@ void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, Clock
        record->isAtomic = 1;
 }
 
        record->isAtomic = 1;
 }
 
+/** This function does race detection for a write on an expanded record. */
+void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
+       struct RaceRecord *record = (struct RaceRecord *)(*shadow);
+       record->numReads = 0;
+       record->writeThread = thread;
+       modelclock_t ourClock = currClock->getClock(thread);
+       record->writeClock = ourClock;
+       record->isAtomic = 0;
+}
+
 /** This function just updates metadata on atomic write. */
 void recordWrite(thread_id_t thread, void *location) {
        uint64_t *shadow = lookupAddressEntry(location);
 /** This function just updates metadata on atomic write. */
 void recordWrite(thread_id_t thread, void *location) {
        uint64_t *shadow = lookupAddressEntry(location);
@@ -352,6 +479,34 @@ void recordWrite(thread_id_t thread, void *location) {
        *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
 }
 
        *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
 }
 
+/** This function just updates metadata on atomic write. */
+void recordCalloc(void *location, size_t size) {
+       thread_id_t thread = thread_current()->get_id();
+       for(;size != 0;size--) {
+               uint64_t *shadow = lookupAddressEntry(location);
+               uint64_t shadowval = *shadow;
+               ClockVector *currClock = get_execution()->get_cv(thread);
+               /* Do full record */
+               if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+                       fullRecordWriteNonAtomic(thread, location, shadow, currClock);
+                       return;
+               }
+
+               int threadid = id_to_int(thread);
+               modelclock_t ourClock = currClock->getClock(thread);
+
+               /* Thread ID is too large or clock is too large. */
+               if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+                       expandRecord(shadow);
+                       fullRecordWriteNonAtomic(thread, location, shadow, currClock);
+                       return;
+               }
+
+               *shadow = ENCODEOP(0, 0, threadid, ourClock);
+               location = (void *)(((char *) location) + 1);
+       }
+}
+
 
 
 /** This function does race detection on a read for an expanded record. */
 
 
 /** This function does race detection on a read for an expanded record. */
@@ -483,3 +638,66 @@ Exit:
                else model_free(race);
        }
 }
                else model_free(race);
        }
 }
+
+
+/** This function does race detection on a read for an expanded record. */
+struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
+{
+       struct RaceRecord *record = (struct RaceRecord *) (*shadow);
+       struct DataRace * race = NULL;
+       /* Check for datarace against last write. */
+       if (record->isAtomic)
+               return NULL;
+
+       modelclock_t writeClock = record->writeClock;
+       thread_id_t writeThread = record->writeThread;
+
+       if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+               /* We have a datarace */
+               race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+       }
+       return race;
+}
+
+/** This function does race detection on a read. */
+void atomraceCheckRead(thread_id_t thread, const void *location)
+{
+       uint64_t *shadow = lookupAddressEntry(location);
+       uint64_t shadowval = *shadow;
+       ClockVector *currClock = get_execution()->get_cv(thread);
+       if (currClock == NULL)
+               return;
+
+       struct DataRace * race = NULL;
+
+       /* Do full record */
+       if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+               race = atomfullRaceCheckRead(thread, location, shadow, currClock);
+               goto Exit;
+       }
+
+       if (shadowval && ATOMICMASK)
+               return;
+
+       {
+               /* Check for datarace against last write. */
+
+               modelclock_t writeClock = WRITEVECTOR(shadowval);
+               thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+               if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+                       /* We have a datarace */
+                       race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+                       goto Exit;
+               }
+
+
+       }
+Exit:
+       if (race) {
+               race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+               if (raceset->add(race))
+                       assert_race(race);
+               else model_free(race);
+       }
+}
index 2de2a3390759f6dafbc24aba83e4b42e0024117e..f026556add051292ea4c57daaac4ad42255f1576 100644 (file)
@@ -44,8 +44,11 @@ struct DataRace {
 
 void initRaceDetector();
 void raceCheckWrite(thread_id_t thread, void *location);
 
 void initRaceDetector();
 void raceCheckWrite(thread_id_t thread, void *location);
+void atomraceCheckWrite(thread_id_t thread, void *location);
 void raceCheckRead(thread_id_t thread, const void *location);
 void raceCheckRead(thread_id_t thread, const void *location);
+void atomraceCheckRead(thread_id_t thread, const void *location);
 void recordWrite(thread_id_t thread, void *location);
 void recordWrite(thread_id_t thread, void *location);
+void recordCalloc(void *location, size_t size);
 void assert_race(struct DataRace *race);
 bool hasNonAtomicStore(const void *location);
 void setAtomicStoreFlag(const void *location);
 void assert_race(struct DataRace *race);
 bool hasNonAtomicStore(const void *location);
 void setAtomicStoreFlag(const void *location);
index 1f8b616020e481e795fd36f74bfa1d2ab56722cc..5185cd0f7993e1655aef2619ea275b06fcd401b4 100644 (file)
@@ -11,6 +11,7 @@
 #include "common.h"
 #include "threads-model.h"
 #include "model.h"
 #include "common.h"
 #include "threads-model.h"
 #include "model.h"
+#include "datarace.h"
 
 #define REQUESTS_BEFORE_ALLOC 1024
 
 
 #define REQUESTS_BEFORE_ALLOC 1024
 
@@ -234,10 +235,12 @@ void * calloc(size_t num, size_t size)
        if (user_snapshot_space) {
                void *tmp = mspace_calloc(user_snapshot_space, num, size);
                ASSERT(tmp);
        if (user_snapshot_space) {
                void *tmp = mspace_calloc(user_snapshot_space, num, size);
                ASSERT(tmp);
+               recordCalloc(tmp, num*size);
                return tmp;
        } else {
                void *tmp = HandleEarlyAllocationRequest(size * num);
                memset(tmp, 0, size * num);
                return tmp;
        } else {
                void *tmp = HandleEarlyAllocationRequest(size * num);
                memset(tmp, 0, size * num);
+               recordCalloc(tmp, num*size);
                return tmp;
        }
 }
                return tmp;
        }
 }