// Different locations commute
if (!same_var(act))
return false;
-
+
// Explore interleavings of seqcst writes to guarantee total order
// of seq_cst operations that don't commute
if (is_write() && is_seqcst() && act->is_write() && act->is_seqcst())
case THREAD_CREATE:
type_str = "thread create";
break;
+ case THREAD_START:
+ type_str = "thread start";
+ break;
case THREAD_YIELD:
type_str = "thread yield";
break;
* ModelAction */
typedef enum action_type {
THREAD_CREATE, /**< A thread creation action */
+ THREAD_START, /**< First action in each thread */
THREAD_YIELD, /**< A thread yield action */
THREAD_JOIN, /**< A thread join action */
ATOMIC_READ, /**< An atomic read action */
/** The thread id that performed this action. */
thread_id_t tid;
-
+
/** The value read or written (if RMW, then the value written). This
* should probably be something longer. */
int value;
/** A back reference to a Node in NodeStack, if this ModelAction is
* saved on the NodeStack. */
Node *node;
-
+
modelclock_t seq_number;
/** The clock vector stored with this action; only needed if this
return false;
}
-/**
+/**
* Gets the clock corresponding to a given thread id from the clock
* vector. */
#include "cyclegraph.h"
#include "action.h"
+/** Initializes a CycleGraph object. */
+
CycleGraph::CycleGraph() {
hasCycles=false;
}
+/** Returns the CycleNode for a given ModelAction. */
+
CycleNode * CycleGraph::getNode(ModelAction * action) {
CycleNode *node=actionToNode.get(action);
if (node==NULL) {
return node;
}
+/** Adds an edge between two ModelActions. */
+
void CycleGraph::addEdge(ModelAction *from, ModelAction *to) {
CycleNode *fromnode=getNode(from);
CycleNode *tonode=getNode(to);
fromnode->addEdge(tonode);
}
+/** Checks whether the first CycleNode can reach the second one. */
+
bool CycleGraph::checkReachable(CycleNode *from, CycleNode *to) {
std::vector<CycleNode *> queue;
HashTable<CycleNode *, CycleNode *, uintptr_t, 4> discovered;
return false;
}
+/** Returns whether a CycleGraph contains cycles. */
+bool CycleGraph::checkForCycles() {
+ return hasCycles;
+}
+
+/** Constructor for a CycleNode. */
+
CycleNode::CycleNode(ModelAction *modelaction) {
action=modelaction;
}
+/** Returns a vector of the edges from a CycleNode. */
+
std::vector<CycleNode *> * CycleNode::getEdges() {
return &edges;
}
+/** Adds an edge to a CycleNode. */
+
void CycleNode::addEdge(CycleNode * node) {
edges.push_back(node);
}
CycleNode * getNode(ModelAction *);
HashTable<ModelAction *, CycleNode *, uintptr_t, 4> actionToNode;
bool checkReachable(CycleNode *from, CycleNode *to);
-
bool hasCycles;
};
struct ShadowTable *root;
+/** This function initialized the data race detector. */
+
void initRaceDetector() {
root=(struct ShadowTable *) calloc(sizeof(struct ShadowTable),1);
}
+/** This function looks up the entry in the shadow table corresponding
+ to a given address.*/
+
static uint64_t * lookupAddressEntry(void * address) {
struct ShadowTable *currtable=root;
#ifdef BIT48
- currtable=(struct ShadowTable *) currtable->array[(((uintptr_t)address)>>32)&0xffff];
+ currtable=(struct ShadowTable *) currtable->array[(((uintptr_t)address)>>32)&MASK16BIT];
if (currtable==NULL) {
currtable=(struct ShadowTable *) (root->array[(((uintptr_t)address)>>32)&MASK16BIT]=calloc(sizeof(struct ShadowTable),1));
}
return &basetable->array[((uintptr_t)address)&MASK16BIT];
}
+/**
+ * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
+ * to check the potential for a data race.
+ * @param clock1 The current clock vector
+ * @param tid1 The current thread; paired with clock1
+ * @param clock2 The clock value for the potentially-racing action
+ * @param tid2 The thread ID for the potentially-racing action
+ * @return true if the current clock allows a race with the event at clock2/tid2
+ */
+static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
+ modelclock_t clock2, thread_id_t tid2)
+{
+ return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
+}
+
+/**
+ * Expands a record from the compact form to the full form. This is
+ * necessary for multiple readers or for very large thread ids or time
+ * stamps. */
+
static void expandRecord(uint64_t * shadow) {
uint64_t shadowval=*shadow;
*shadow=(uint64_t) record;
}
+/** This function is called when we detect a data race.*/
+
static void reportDataRace() {
printf("The reportDataRace method should report useful things about this datarace!\n");
}
+/** This function does race detection for a write on an expanded
+ * record. */
+
void fullRaceCheckWrite(thread_id_t thread, uint64_t * shadow, ClockVector *currClock) {
struct RaceRecord * record=(struct RaceRecord *) (*shadow);
modelclock_t readClock = record->readClock[i];
thread_id_t readThread = record->thread[i];
- if (readThread != thread && readClock != 0 && currClock->getClock(readThread) <= readClock) {
+ /* Note that readClock can't actuall be zero here, so it could be
+ optimized. */
+
+ if (clock_may_race(currClock, thread, readClock, readThread)) {
/* We have a datarace */
reportDataRace();
}
modelclock_t writeClock = record->writeClock;
thread_id_t writeThread = record->writeThread;
- if (writeThread != thread && writeClock != 0 && currClock->getClock(writeThread) <= writeClock) {
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
/* We have a datarace */
reportDataRace();
}
record->writeClock=ourClock;
}
+/** This function does race detection on a write.
+ */
+
void raceCheckWrite(thread_id_t thread, void *location, ClockVector *currClock) {
uint64_t * shadow=lookupAddressEntry(location);
uint64_t shadowval=*shadow;
modelclock_t readClock = READVECTOR(shadowval);
thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
- if (readThread != thread && readClock != 0 && currClock->getClock(readThread) <= readClock) {
+ if (clock_may_race(currClock, thread, readClock, readThread)) {
/* We have a datarace */
reportDataRace();
}
modelclock_t writeClock = WRITEVECTOR(shadowval);
thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
- if (writeThread != thread && writeClock != 0 && currClock->getClock(writeThread) <= writeClock) {
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
/* We have a datarace */
reportDataRace();
}
*shadow = ENCODEOP(0, 0, threadid, ourClock);
}
+/** This function does race detection on a read for an expanded
+ * record. */
+
void fullRaceCheckRead(thread_id_t thread, uint64_t * shadow, ClockVector *currClock) {
struct RaceRecord * record=(struct RaceRecord *) (*shadow);
modelclock_t writeClock = record->writeClock;
thread_id_t writeThread = record->writeThread;
- if (writeThread != thread && writeClock != 0 && currClock->getClock(writeThread) <= writeClock) {
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
/* We have a datarace */
reportDataRace();
}
modelclock_t readClock = record->readClock[i];
thread_id_t readThread = record->thread[i];
- if (readThread != thread && currClock->getClock(readThread) <= readClock) {
+ /* Note that is not really a datarace check as reads cannott
+ actually race. It is just determining that this read subsumes
+ another in the sense that either this read races or neither
+ read races. Note that readClock can't actually be zero, so it
+ could be optimized. */
+
+ if (clock_may_race(currClock, thread, readClock, readThread)) {
/* Still need this read in vector */
if (copytoindex!=i) {
record->readClock[copytoindex]=record->readClock[i];
record->numReads=copytoindex+1;
}
+/** This function does race detection on a read. */
+
void raceCheckRead(thread_id_t thread, void *location, ClockVector *currClock) {
uint64_t * shadow=lookupAddressEntry(location);
uint64_t shadowval=*shadow;
modelclock_t writeClock = WRITEVECTOR(shadowval);
thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
- if (writeThread != thread && writeClock != 0 && currClock->getClock(writeThread) <= writeClock) {
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
/* We have a datarace */
reportDataRace();
}
modelclock_t readClock = READVECTOR(shadowval);
thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
- if (readThread != thread && readClock != 0 && currClock->getClock(readThread) <= readClock) {
+ if (clock_may_race(currClock, thread, readClock, readThread)) {
/* We don't subsume this read... Have to expand record. */
expandRecord(shadow);
fullRaceCheckRead(thread, shadow, currClock);
table = (struct hashlistnode<_Key,_Val> **) calloc(initialcapacity, sizeof(struct hashlistnode<_Key,_Val> *));
loadfactor = factor;
capacity = initialcapacity;
- threshold = initialcapacity*loadfactor;
+ threshold = (unsigned int) (initialcapacity*loadfactor);
mask = (capacity << _Shift)-1;
size = 0; // Initial number of elements in the hash
}
memset(table, 0, capacity*sizeof(struct hashlistnode<_Key, _Val> *));
size=0;
}
-
+
void put(_Key key, _Val val) {
if(size > threshold) {
//Resize
unsigned int newsize = capacity << 1;
resize(newsize);
}
-
+
struct hashlistnode<_Key,_Val> *ptr = table[(((_KeyInt)key) & mask)>>_Shift];
size++;
struct hashlistnode<_Key,_Val> *search = ptr;
_Val get(_Key key) {
struct hashlistnode<_Key,_Val> *search = table[(((_KeyInt)key) & mask)>>_Shift];
-
+
while(search!=NULL) {
if (search->key==key) {
return search->val;
bool contains(_Key key) {
struct hashlistnode<_Key,_Val> *search = table[(((_KeyInt)key) & mask)>>_Shift];
-
+
while(search!=NULL) {
if (search->key==key) {
return true;
}
return false;
}
-
+
void resize(unsigned int newsize) {
struct hashlistnode<_Key,_Val> ** oldtable = table;
struct hashlistnode<_Key,_Val> ** newtable;
unsigned int oldcapacity = capacity;
-
+
if((newtable = (struct hashlistnode<_Key,_Val> **) calloc(newsize, sizeof(struct hashlistnode<_Key,_Val> *))) == NULL) {
printf("Calloc error %s %d\n", __FILE__, __LINE__);
exit(-1);
}
-
+
table = newtable; //Update the global hashtable upon resize()
capacity = newsize;
- threshold = newsize * loadfactor;
+ threshold = (unsigned int) (newsize * loadfactor);
mask = (newsize << _Shift)-1;
-
+
for(unsigned int i = 0; i < oldcapacity; i++) {
struct hashlistnode<_Key, _Val> * bin = oldtable[i];
-
+
while(bin!=NULL) {
_Key key=bin->key;
struct hashlistnode<_Key, _Val> * next=bin->next;
-
+
unsigned int index = (((_KeyInt)key) & mask) >>_Shift;
struct hashlistnode<_Key, _Val> * tmp=newtable[index];
bin->next=tmp;
bin = next;
}
}
-
+
free(oldtable); //Free the memory of the old hash table
}
-
+
private:
struct hashlistnode<_Key,_Val> **table;
unsigned int capacity;
#include "librace.h"
#include "common.h"
+#include "datarace.h"
+#include "model.h"
void store_8(void *addr, uint8_t val)
{
DEBUG("addr = %p, val = %" PRIu8 "\n", addr, val);
+ thread_id_t tid=thread_current()->get_id();
+ ClockVector * cv=model->get_cv(tid);
+ raceCheckWrite(tid, addr, cv);
(*(uint8_t *)addr) = val;
}
void store_16(void *addr, uint16_t val)
{
DEBUG("addr = %p, val = %" PRIu16 "\n", addr, val);
+ thread_id_t tid=thread_current()->get_id();
+ ClockVector * cv=model->get_cv(tid);
+ raceCheckWrite(tid, addr, cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr)+1), cv);
(*(uint16_t *)addr) = val;
}
void store_32(void *addr, uint32_t val)
{
DEBUG("addr = %p, val = %" PRIu32 "\n", addr, val);
+ thread_id_t tid=thread_current()->get_id();
+ ClockVector * cv=model->get_cv(tid);
+ raceCheckWrite(tid, addr, cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr)+1), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr)+2), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr)+3), cv);
(*(uint32_t *)addr) = val;
}
void store_64(void *addr, uint64_t val)
{
DEBUG("addr = %p, val = %" PRIu64 "\n", addr, val);
+ thread_id_t tid=thread_current()->get_id();
+ ClockVector * cv=model->get_cv(tid);
+ raceCheckWrite(tid, addr, cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr)+1), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr)+2), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr)+3), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr)+4), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr)+5), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr)+6), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr)+7), cv);
(*(uint64_t *)addr) = val;
}
uint8_t load_8(void *addr)
{
DEBUG("addr = %p\n", addr);
+ thread_id_t tid=thread_current()->get_id();
+ ClockVector * cv=model->get_cv(tid);
+ raceCheckRead(tid, addr, cv);
return *((uint8_t *)addr);
}
uint16_t load_16(void *addr)
{
DEBUG("addr = %p\n", addr);
+ thread_id_t tid=thread_current()->get_id();
+ ClockVector * cv=model->get_cv(tid);
+ raceCheckRead(tid, addr, cv);
+ raceCheckRead(tid, (void *)(((uintptr_t)addr)+1), cv);
return *((uint16_t *)addr);
}
uint32_t load_32(void *addr)
{
DEBUG("addr = %p\n", addr);
+ thread_id_t tid=thread_current()->get_id();
+ ClockVector * cv=model->get_cv(tid);
+ raceCheckRead(tid, addr, cv);
+ raceCheckRead(tid, (void *)(((uintptr_t)addr)+1), cv);
+ raceCheckRead(tid, (void *)(((uintptr_t)addr)+2), cv);
+ raceCheckRead(tid, (void *)(((uintptr_t)addr)+3), cv);
return *((uint32_t *)addr);
}
uint64_t load_64(void *addr)
{
DEBUG("addr = %p\n", addr);
+ thread_id_t tid=thread_current()->get_id();
+ ClockVector * cv=model->get_cv(tid);
+ raceCheckRead(tid, addr, cv);
+ raceCheckRead(tid, (void *)(((uintptr_t)addr)+1), cv);
+ raceCheckRead(tid, (void *)(((uintptr_t)addr)+2), cv);
+ raceCheckRead(tid, (void *)(((uintptr_t)addr)+3), cv);
+ raceCheckRead(tid, (void *)(((uintptr_t)addr)+4), cv);
+ raceCheckRead(tid, (void *)(((uintptr_t)addr)+5), cv);
+ raceCheckRead(tid, (void *)(((uintptr_t)addr)+6), cv);
+ raceCheckRead(tid, (void *)(((uintptr_t)addr)+7), cv);
return *((uint64_t *)addr);
}
#include "common.h"
#include "threads.h"
+#include "datarace.h"
+
/* global "model" object */
#include "model.h"
#include "snapshot-interface.h"
thrd_t user_thread;
ucontext_t main_context;
+ //Initialize race detector
+ initRaceDetector();
+
//Create the singleton SnapshotStack object
snapshotObject = new SnapshotStack();
*/
void ModelChecker::add_action_to_lists(ModelAction *act)
{
+ int tid = id_to_int(act->get_tid());
action_trace->push_back(act);
std::vector<action_list_t> *vec = &(*obj_thrd_map)[act->get_location()];
- if (id_to_int(act->get_tid()) >= (int)vec->size())
+ if (tid >= (int)vec->size())
vec->resize(next_thread_id);
- (*vec)[id_to_int(act->get_tid())].push_back(act);
+ (*vec)[tid].push_back(act);
- (*thrd_last_action)[id_to_int(act->get_tid())] = act;
+ if ((int)thrd_last_action->size() <= tid)
+ thrd_last_action->resize(get_num_threads());
+ (*thrd_last_action)[tid] = act;
}
ModelAction * ModelChecker::get_last_action(thread_id_t tid)
return parent;
}
+ClockVector * ModelChecker::get_cv(thread_id_t tid) {
+ return get_parent_action(tid)->get_cv();
+}
+
/**
* Build up an initial set of all past writes that this 'read' action may read
* from. This set is determined by the clock vector's "happens before"
/** @file model.h
- * @brief Core model checker.
+ * @brief Core model checker.
*/
#ifndef __MODEL_H__
modelclock_t get_next_seq_num();
int switch_to_master(ModelAction *act);
-
+ ClockVector * get_cv(thread_id_t tid);
bool next_execution();
MEMALLOC
static void *(*mallocp)(size_t size);
char *error;
void *ptr;
-
+
/* get address of libc malloc */
if (!mallocp) {
mallocp = ( void * ( * )( size_t ) )dlsym(RTLD_NEXT, "malloc");
exit(EXIT_FAILURE);
}
}
- ptr = mallocp(size);
+ ptr = mallocp(size);
return ptr;
#else
if( !sTheRecord ){
//Skip out at the end of the section
if (buf[0]=='\n')
break;
-
+
sscanf(buf, "%22s %p-%p [%5dK] %c%c%c/%c%c%c SM=%3s %200s\n", &type, &begin, &end, &size, &r, &w, &x, &mr, &mw, &mx, smstr, regionname);
if (w == 'w' && (strstr(regionname, MYBINARYNAME) || strstr(regionname, MYLIBRARYNAME))) {
/** This method returns to the last snapshot before the inputted
* sequence number. This function must be called from the model
- * checking thread and not from a snapshotted stack.
- * @param seqindex is the sequence number to rollback before.
+ * checking thread and not from a snapshotted stack.
+ * @param seqindex is the sequence number to rollback before.
* @return is the sequence number we actually rolled back to.
*/
-
+
int SnapshotStack::backTrackBeforeStep(int seqindex) {
while(true) {
if (stack->index<=seqindex) {
int backTrackBeforeStep(int seq_index);
void snapshotStep(int seq_index);
- private:
+ private:
struct stackEntry * stack;
};
#if !USE_MPROTECT_SNAPSHOT
/** @statics
-* These variables are necessary because the stack is shared region and
+* These variables are necessary because the stack is shared region and
* there exists a race between all processes executing the same function.
* To avoid the problem above, we require variables allocated in 'safe' regions.
* The bug was actually observed with the forkID, these variables below are
}
/** The initSnapShotRecord method initialized the snapshotting data
- * structures for the mprotect based snapshot.
+ * structures for the mprotect based snapshot.
*/
static void initSnapShotRecord(unsigned int numbackingpages, unsigned int numsnapshots, unsigned int nummemoryregions) {
snapshotrecord=( struct SnapShot * )MYMALLOC(sizeof(struct SnapShot));
#endif
}
-/** The addMemoryRegionToSnapShot function assumes that addr is page aligned.
+/** The addMemoryRegionToSnapShot function assumes that addr is page aligned.
*/
void addMemoryRegionToSnapShot( void * addr, unsigned int numPages) {
#if USE_MPROTECT_SNAPSHOT
getcontext( &sTheRecord->mContextToRollback );
/*
* This is used to quit the process on rollback, so that
- * the process which needs to rollback can quit allowing the process whose snapshotid matches the rollbackid to switch to
+ * the process which needs to rollback can quit allowing the process whose snapshotid matches the rollbackid to switch to
* this context and continue....
*/
if( !sTemp ){
return model->scheduler->get_current_thread();
}
-/* This method just gets around makecontext not being 64-bit clean */
-
+/**
+ * Provides a startup wrapper for each thread, allowing some initial
+ * model-checking data to be recorded. This method also gets around makecontext
+ * not being 64-bit clean
+ */
void thread_startup() {
Thread * curr_thread = thread_current();
+
+ /* Add dummy "start" action, just to create a first clock vector */
+ model->switch_to_master(new ModelAction(THREAD_START, memory_order_seq_cst, curr_thread));
+
+ /* Call the actual thread function */
curr_thread->start_routine(curr_thread->arg);
}
}
Thread::Thread(thrd_t *t, void (*func)(void *), void *a) :
+ start_routine(func),
+ arg(a),
+ user_thread(t),
+ state(THREAD_CREATED),
last_action_val(VALUE_NONE)
{
int ret;
- user_thread = t;
- start_routine = func;
- arg = a;
-
/* Initialize state */
ret = create_context();
if (ret)
printf("Error in create_context\n");
- state = THREAD_CREATED;
id = model->get_next_id();
*user_thread = id;
parent = thread_current();