snapshot.o malloc.o mymemory.o common.o mutex.o conditionvariable.o \
context.o execution.o libannotate.o plugins.o pthread.o futex.o fuzzer.o \
sleeps.o history.o funcnode.o funcinst.o predicate.o printf.o newfuzzer.o \
- concretepredicate.o waitobj.o hashfunction.o pipe.o
+ concretepredicate.o waitobj.o hashfunction.o pipe.o actionlist.o
CPPFLAGS += -Iinclude -I.
LDFLAGS := -ldl -lrt -rdynamic -lpthread
last_fence_release(NULL),
cv(NULL),
rf_cv(NULL),
- trace_ref(NULL),
- thrdmap_ref(NULL),
action_ref(NULL),
value(value),
type(type),
last_fence_release(NULL),
cv(NULL),
rf_cv(NULL),
- trace_ref(NULL),
- thrdmap_ref(NULL),
action_ref(NULL),
value(value),
type(type),
last_fence_release(NULL),
cv(NULL),
rf_cv(NULL),
- trace_ref(NULL),
- thrdmap_ref(NULL),
action_ref(NULL),
value(value),
type(type),
last_fence_release(NULL),
cv(NULL),
rf_cv(NULL),
- trace_ref(NULL),
- thrdmap_ref(NULL),
action_ref(NULL),
value(value),
type(type),
last_fence_release(NULL),
cv(NULL),
rf_cv(NULL),
- trace_ref(NULL),
- thrdmap_ref(NULL),
action_ref(NULL),
value(value),
type(type),
* vectors which have already been rolled back to an unallocated state.
*/
- /*
- if (cv)
- delete cv; */
+
+ if (cv)
+ delete cv;
+ if (rf_cv)
+ delete rf_cv;
}
int ModelAction::getSize() const {
* iteself does not indicate no value.
*/
#define VALUE_NONE 0xdeadbeef
+#define WRITE_REFERENCED ((void *)0x1)
/**
* @brief The "location" at which a fence occurs
/* to accomodate pthread create and join */
Thread * thread_operand;
void set_thread_operand(Thread *th) { thread_operand = th; }
- void setTraceRef(sllnode<ModelAction *> *ref) { trace_ref = ref; }
- void setThrdMapRef(sllnode<ModelAction *> *ref) { thrdmap_ref = ref; }
+
void setActionRef(sllnode<ModelAction *> *ref) { action_ref = ref; }
- sllnode<ModelAction *> * getTraceRef() { return trace_ref; }
- sllnode<ModelAction *> * getThrdMapRef() { return thrdmap_ref; }
sllnode<ModelAction *> * getActionRef() { return action_ref; }
+
SNAPSHOTALLOC
private:
const char * get_type_str() const;
*/
ClockVector *cv;
ClockVector *rf_cv;
- sllnode<ModelAction *> * trace_ref;
- sllnode<ModelAction *> * thrdmap_ref;
sllnode<ModelAction *> * action_ref;
-
/** @brief The value written (for write or RMW; undefined for read) */
uint64_t value;
--- /dev/null
+#include "actionlist.h"
+#include "action.h"
+#include <string.h>
+#include "stl-model.h"
+#include <limits.h>
+
+actionlist::actionlist() :
+ root(),
+ head(NULL),
+ tail(NULL),
+ _size(0)
+{
+}
+
+actionlist::~actionlist() {
+ clear();
+}
+
+allnode::allnode() :
+ parent(NULL),
+ count(0) {
+ bzero(children, sizeof(children));
+}
+
+allnode::~allnode() {
+ if (count != 0)
+ for(int i=0;i<ALLNODESIZE;i++) {
+ if (children[i] != NULL && !(((uintptr_t) children[i]) & ISACT))
+ delete children[i];
+ }
+}
+
+sllnode<ModelAction *> * allnode::findPrev(modelclock_t index) {
+ allnode * ptr = this;
+ modelclock_t increment = 1;
+ int totalshift = 0;
+ index -= increment;
+
+ while(1) {
+ modelclock_t currindex = (index >> totalshift) & ALLMASK;
+
+ //See if we went negative...
+ if (currindex != ALLMASK) {
+ if (ptr->children[currindex] == NULL) {
+ //need to keep searching at this level
+ index -= increment;
+ continue;
+ } else {
+ //found non-null...
+ if (totalshift == 0)
+ return reinterpret_cast<sllnode<ModelAction *> *>(((uintptr_t)ptr->children[currindex])& ACTMASK);
+ //need to increment here...
+ ptr = ptr->children[currindex];
+ increment = increment >> ALLBITS;
+ totalshift -= ALLBITS;
+ break;
+ }
+ }
+ //If we get here, we already did the decrement earlier...no need to decrement again
+ ptr = ptr->parent;
+ increment = increment << ALLBITS;
+ totalshift += ALLBITS;
+
+ if (ptr == NULL) {
+ return NULL;
+ }
+ }
+
+ while(1) {
+ while(1) {
+ modelclock_t currindex = (index >> totalshift) & ALLMASK;
+ if (ptr->children[currindex] != NULL) {
+ if (totalshift != 0) {
+ ptr = ptr->children[currindex];
+ break;
+ } else {
+ allnode * act = ptr->children[currindex];
+ sllnode<ModelAction *> * node = reinterpret_cast<sllnode<ModelAction *>*>(((uintptr_t)act) & ACTMASK);
+ return node;
+ }
+ }
+ index -= increment;
+ }
+
+ increment = increment >> ALLBITS;
+ totalshift -= ALLBITS;
+ }
+}
+
+void actionlist::addAction(ModelAction * act) {
+ _size++;
+ int shiftbits = MODELCLOCKBITS - ALLBITS;
+ modelclock_t clock = act->get_seq_number();
+
+ allnode * ptr = &root;
+ do {
+ modelclock_t currindex = (clock >> shiftbits) & ALLMASK;
+ allnode * tmp = ptr->children[currindex];
+ if (shiftbits == 0) {
+ sllnode<ModelAction *> * llnode = new sllnode<ModelAction *>();
+ llnode->val = act;
+ if (tmp == NULL) {
+ sllnode<ModelAction *> * llnodeprev = ptr->findPrev(clock);
+ if (llnodeprev != NULL) {
+ llnode->next = llnodeprev->next;
+ llnode->prev = llnodeprev;
+
+ //see if we are the new tail
+ if (llnode->next != NULL)
+ llnode->next->prev = llnode;
+ else
+ tail = llnode;
+ llnodeprev->next = llnode;
+ } else {
+ //We are the begining
+ llnode->next = head;
+ llnode->prev = NULL;
+ if (head != NULL) {
+ head->prev = llnode;
+ } else {
+ //we are the first node
+ tail = llnode;
+ }
+
+ head = llnode;
+ }
+ ptr->children[currindex] = reinterpret_cast<allnode *>(((uintptr_t) llnode) | ISACT);
+
+ //need to find next link
+ ptr->count++;
+ } else {
+ //handle case where something else is here
+
+ sllnode<ModelAction *> * llnodeprev = reinterpret_cast<sllnode<ModelAction *>*>(((uintptr_t) tmp) & ACTMASK);
+ llnode->next = llnodeprev->next;
+ llnode->prev = llnodeprev;
+ if (llnode->next != NULL)
+ llnode->next->prev = llnode;
+ else
+ tail = llnode;
+ llnodeprev->next = llnode;
+ ptr->children[currindex] = reinterpret_cast<allnode *>(((uintptr_t) llnode) | ISACT);
+ }
+ return;
+ } else if (tmp == NULL) {
+ tmp = new allnode();
+ ptr->children[currindex] = tmp;
+ tmp->parent=ptr;
+ ptr->count++;
+ }
+ shiftbits -= ALLBITS;
+ ptr = tmp;
+ } while(1);
+
+}
+
+void decrementCount(allnode * ptr) {
+ ptr->count--;
+ if (ptr->count == 0) {
+ if (ptr->parent != NULL) {
+ for(uint i=0;i<ALLNODESIZE;i++) {
+ if (ptr->parent->children[i]==ptr) {
+ ptr->parent->children[i] = NULL;
+ decrementCount(ptr->parent);
+ break;
+ }
+ }
+ delete ptr;
+ }
+ }
+}
+
+void actionlist::removeAction(ModelAction * act) {
+ int shiftbits = MODELCLOCKBITS;
+ modelclock_t clock = act->get_seq_number();
+ allnode * ptr = &root;
+ allnode * oldptr;
+ modelclock_t currindex;
+
+ while(shiftbits != 0) {
+ shiftbits -= ALLBITS;
+ currindex = (clock >> shiftbits) & ALLMASK;
+ oldptr = ptr;
+ ptr = ptr->children[currindex];
+ if (ptr == NULL)
+ return;
+ }
+
+ sllnode<ModelAction *> * llnode = reinterpret_cast<sllnode<ModelAction *> *>(((uintptr_t) ptr) & ACTMASK);
+ bool first = true;
+ do {
+ if (llnode->val == act) {
+ //found node to remove
+ sllnode<ModelAction *> * llnodeprev = llnode->prev;
+ sllnode<ModelAction *> * llnodenext = llnode->next;
+ if (llnodeprev != NULL) {
+ llnodeprev->next = llnodenext;
+ } else {
+ head = llnodenext;
+ }
+ if (llnodenext != NULL) {
+ llnodenext->prev = llnodeprev;
+ } else {
+ tail = llnodeprev;
+ }
+ if (first) {
+ //see if previous node has same clock as us...
+ if (llnodeprev != NULL && llnodeprev->val->get_seq_number() == clock) {
+ oldptr->children[currindex] = reinterpret_cast<allnode *>(((uintptr_t)llnodeprev) | ISACT);
+ } else {
+ //remove ourselves and go up tree
+ oldptr->children[currindex] = NULL;
+ decrementCount(oldptr);
+ }
+ }
+ delete llnode;
+ _size--;
+ return;
+ }
+ llnode = llnode->prev;
+ first = false;
+ } while(llnode != NULL && llnode->val->get_seq_number() == clock);
+ //node not found in list... no deletion
+ return;
+}
+
+void actionlist::clear() {
+ for(uint i = 0;i < ALLNODESIZE;i++) {
+ if (root.children[i] != NULL) {
+ delete root.children[i];
+ root.children[i] = NULL;
+ }
+ }
+
+ while(head != NULL) {
+ sllnode<ModelAction *> *tmp=head->next;
+ delete head;
+ head = tmp;
+ }
+ tail = NULL;
+
+ root.count = 0;
+ _size = 0;
+}
+
+bool actionlist::isEmpty() {
+ return root.count == 0;
+}
+
+/**
+ * Fix the parent pointer of root when root address changes (possible
+ * due to vector<action_list_t> resize)
+ */
+void actionlist::fixupParent()
+{
+ for (int i = 0; i < ALLNODESIZE; i++) {
+ allnode * child = root.children[i];
+ if (child != NULL && child->parent != &root)
+ child->parent = &root;
+ }
+}
--- /dev/null
+#ifndef ACTIONLIST_H
+#define ACTIONLIST_H
+
+#include "classlist.h"
+#include "stl-model.h"
+
+#define ISACT ((uintptr_t) 1ULL)
+#define ACTMASK (~ISACT)
+
+#define ALLBITS 4
+#define ALLNODESIZE (1 << ALLBITS)
+#define ALLMASK ((1 << ALLBITS)-1)
+#define MODELCLOCKBITS 32
+
+class allnode;
+void decrementCount(allnode *);
+
+class allnode {
+public:
+ allnode();
+ ~allnode();
+ SNAPSHOTALLOC;
+
+private:
+ allnode * parent;
+ allnode * children[ALLNODESIZE];
+ int count;
+ sllnode<ModelAction *> * findPrev(modelclock_t index);
+ friend class actionlist;
+ friend void decrementCount(allnode *);
+};
+
+class actionlist {
+public:
+ actionlist();
+ ~actionlist();
+ void addAction(ModelAction * act);
+ void removeAction(ModelAction * act);
+ void clear();
+ bool isEmpty();
+ uint size() {return _size;}
+ sllnode<ModelAction *> * begin() {return head;}
+ sllnode<ModelAction *> * end() {return tail;}
+ void fixupParent();
+
+ SNAPSHOTALLOC;
+
+private:
+ allnode root;
+ sllnode<ModelAction *> * head;
+ sllnode<ModelAction* > * tail;
+
+ uint _size;
+};
+#endif
class Predicate;
class ConcretePredicate;
class WaitObj;
+class actionlist;
+
+#include "actionlist.h"
struct model_snapshot_members;
struct bug_message;
-typedef SnapList<ModelAction *> action_list_t;
+typedef SnapList<ModelAction *> simple_action_list_t;
+typedef actionlist action_list_t;
typedef SnapList<uint32_t> func_id_list_t;
typedef SnapList<FuncInst *> func_inst_list_t;
-typedef HashTable<FuncInst *, ModelAction *, uintptr_t, 0> inst_act_map_t;
typedef HashSet<Predicate *, uintptr_t, 0, model_malloc, model_calloc, model_free> PredSet;
typedef HSIterator<Predicate *, uintptr_t, 0, model_malloc, model_calloc, model_free> PredSetIter;
num_threads = cv->num_threads;
}
- /* Element-wise maximum */
+ /* Element-wise minimum */
for (int i = 0;i < cv->num_threads;i++)
if (cv->clock[i] < clock[i]) {
clock[i] = cv->clock[i];
#define FORK_HANDLER_HACK
/** Enable smart fuzzer */
-#define NEWFUZZER
+//#define NEWFUZZER
/** Define semantics of volatile memory operations. */
#define memory_order_volatile_load memory_order_acquire
//#define memory_order_volatile_load memory_order_relaxed
//#define memory_order_volatile_store memory_order_relaxed
+//#define COLLECT_STAT
+#define REPORT_DATA_RACES
+
#endif
static void *memory_top;
static RaceSet * raceset;
+#ifdef COLLECT_STAT
+static unsigned int store8_count = 0;
+static unsigned int store16_count = 0;
+static unsigned int store32_count = 0;
+static unsigned int store64_count = 0;
+
+static unsigned int load8_count = 0;
+static unsigned int load16_count = 0;
+static unsigned int load32_count = 0;
+static unsigned int load64_count = 0;
+#endif
+
static const ModelExecution * get_execution()
{
return model->get_execution();
ASSERT(readThread >= 0);
record->thread[0] = readThread;
record->readClock[0] = readClock;
+ } else {
+ record->thread = NULL;
}
if (shadowval & ATOMICMASK)
record->isAtomic = 1;
return hash;
}
-
bool race_equals(struct DataRace *r1, struct DataRace *r2) {
if (r1->numframes != r2->numframes)
return false;
/** This function is called when we detect a data race.*/
static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
{
+#ifdef REPORT_DATA_RACES
struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
race->oldthread = oldthread;
race->oldclock = oldclock;
race->isnewwrite = isnewwrite;
race->address = address;
return race;
+#else
+ return NULL;
+#endif
}
/**
}
/** This function does race detection for a write on an expanded record. */
-struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
+struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
{
struct RaceRecord *record = (struct RaceRecord *)(*shadow);
struct DataRace * race = NULL;
}
/* Check for datarace against last write. */
-
{
modelclock_t writeClock = record->writeClock;
thread_id_t writeThread = record->writeThread;
goto Exit;
}
-
-
{
/* Check for datarace against last read. */
-
modelclock_t readClock = READVECTOR(shadowval);
thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
{
/* Check for datarace against last write. */
-
modelclock_t writeClock = WRITEVECTOR(shadowval);
thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
-
/** This function does race detection for a write on an expanded record. */
-struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
+struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
{
struct RaceRecord *record = (struct RaceRecord *)(*shadow);
struct DataRace * race = NULL;
{
/* Check for datarace against last read. */
-
modelclock_t readClock = READVECTOR(shadowval);
thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
{
/* Check for datarace against last write. */
-
modelclock_t writeClock = WRITEVECTOR(shadowval);
thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
}
}
-
-
/** This function does race detection on a read for an expanded record. */
struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
{
}
if (__builtin_popcount(copytoindex) <= 1) {
- if (copytoindex == 0) {
+ if (copytoindex == 0 && record->thread == NULL) {
int newCapacity = INITCAPACITY;
record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
}
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
{
/* Check for datarace against last write. */
+ modelclock_t writeClock = WRITEVECTOR(shadowval);
+ thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+ /* We have a datarace */
+ race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+ goto Exit;
+ }
+
+
+ }
+Exit:
+ if (race) {
+#ifdef REPORT_DATA_RACES
+ race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+ if (raceset->add(race))
+ assert_race(race);
+ else model_free(race);
+#else
+ model_free(race);
+#endif
+ }
+}
+
+static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
+{
+ uint64_t *shadow = lookupAddressEntry(location);
+ uint64_t shadowval = *shadow;
+
+ ClockVector *currClock = get_execution()->get_cv(thread);
+ if (currClock == NULL)
+ return shadow;
+
+ struct DataRace * race = NULL;
+
+ /* Do full record */
+ if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+ race = fullRaceCheckRead(thread, location, shadow, currClock);
+ goto Exit;
+ }
+
+ {
+ int threadid = id_to_int(thread);
+ modelclock_t ourClock = currClock->getClock(thread);
+
+ /* Thread ID is too large or clock is too large. */
+ if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+ expandRecord(shadow);
+ race = fullRaceCheckRead(thread, location, shadow, currClock);
+ goto Exit;
+ }
+
+ /* Check for datarace against last write. */
+ modelclock_t writeClock = WRITEVECTOR(shadowval);
+ thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+ /* We have a datarace */
+ race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+ }
+
+ modelclock_t readClock = READVECTOR(shadowval);
+ thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, readClock, readThread)) {
+ /* We don't subsume this read... Have to expand record. */
+ expandRecord(shadow);
+ struct RaceRecord *record = (struct RaceRecord *) (*shadow);
+ record->thread[1] = thread;
+ record->readClock[1] = ourClock;
+ record->numReads++;
+
+ goto Exit;
+ }
+
+ *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
+
+ *old_val = shadowval;
+ *new_val = *shadow;
+ }
+Exit:
+ if (race) {
+#ifdef REPORT_DATA_RACES
+ race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+ if (raceset->add(race))
+ assert_race(race);
+ else model_free(race);
+#else
+ model_free(race);
+#endif
+ }
+
+ return shadow;
+}
+
+static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
+ uint64_t *shadow = lookupAddressEntry(location);
+
+ uint64_t shadowval = *shadow;
+
+ ClockVector *currClock = get_execution()->get_cv(thread);
+ if (currClock == NULL)
+ return;
+
+ struct DataRace * race = NULL;
+
+ /* Do full record */
+ if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+ race = fullRaceCheckRead(thread, location, shadow, currClock);
+ goto Exit;
+ }
+
+ {
+ int threadid = id_to_int(thread);
+ modelclock_t ourClock = currClock->getClock(thread);
+
+ /* Thread ID is too large or clock is too large. */
+ if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+ expandRecord(shadow);
+ race = fullRaceCheckRead(thread, location, shadow, currClock);
+ goto Exit;
+ }
+ /* Check for datarace against last write. */
modelclock_t writeClock = WRITEVECTOR(shadowval);
thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
if (clock_may_race(currClock, thread, writeClock, writeThread)) {
/* We have a datarace */
race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+ }
+
+ modelclock_t readClock = READVECTOR(shadowval);
+ thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, readClock, readThread)) {
+ /* We don't subsume this read... Have to expand record. */
+ expandRecord(shadow);
+ struct RaceRecord *record = (struct RaceRecord *) (*shadow);
+ record->thread[1] = thread;
+ record->readClock[1] = ourClock;
+ record->numReads++;
+
+ goto Exit;
+ }
+
+ *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
+ }
+Exit:
+ if (race) {
+#ifdef REPORT_DATA_RACES
+ race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+ if (raceset->add(race))
+ assert_race(race);
+ else model_free(race);
+#else
+ model_free(race);
+#endif
+ }
+}
+
+void raceCheckRead64(thread_id_t thread, const void *location)
+{
+ uint64_t old_shadowval, new_shadowval;
+ old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+#ifdef COLLECT_STAT
+ load64_count++;
+#endif
+ uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
+ if (CHECKBOUNDARY(location, 7)) {
+ if (shadow[1]==old_shadowval)
+ shadow[1] = new_shadowval;
+ else goto L1;
+ if (shadow[2]==old_shadowval)
+ shadow[2] = new_shadowval;
+ else goto L2;
+ if (shadow[3]==old_shadowval)
+ shadow[3] = new_shadowval;
+ else goto L3;
+ if (shadow[4]==old_shadowval)
+ shadow[4] = new_shadowval;
+ else goto L4;
+ if (shadow[5]==old_shadowval)
+ shadow[5] = new_shadowval;
+ else goto L5;
+ if (shadow[6]==old_shadowval)
+ shadow[6] = new_shadowval;
+ else goto L6;
+ if (shadow[7]==old_shadowval)
+ shadow[7] = new_shadowval;
+ else goto L7;
+ return;
+ }
+
+L1:
+ raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+L2:
+ raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
+L3:
+ raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+L4:
+ raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
+L5:
+ raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
+L6:
+ raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
+L7:
+ raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
+}
+
+void raceCheckRead32(thread_id_t thread, const void *location)
+{
+ uint64_t old_shadowval, new_shadowval;
+ old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+#ifdef COLLECT_STAT
+ load32_count++;
+#endif
+ uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
+ if (CHECKBOUNDARY(location, 3)) {
+ if (shadow[1]==old_shadowval)
+ shadow[1] = new_shadowval;
+ else goto L1;
+ if (shadow[2]==old_shadowval)
+ shadow[2] = new_shadowval;
+ else goto L2;
+ if (shadow[3]==old_shadowval)
+ shadow[3] = new_shadowval;
+ else goto L3;
+ return;
+ }
+
+L1:
+ raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+L2:
+ raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
+L3:
+ raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+}
+
+void raceCheckRead16(thread_id_t thread, const void *location)
+{
+ uint64_t old_shadowval, new_shadowval;
+ old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+#ifdef COLLECT_STAT
+ load16_count++;
+#endif
+ uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
+ if (CHECKBOUNDARY(location, 1)) {
+ if (shadow[1]==old_shadowval) {
+ shadow[1] = new_shadowval;
+ return;
+ }
+ }
+ raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+}
+
+void raceCheckRead8(thread_id_t thread, const void *location)
+{
+ uint64_t old_shadowval, new_shadowval;
+ old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+#ifdef COLLECT_STAT
+ load8_count++;
+#endif
+ raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
+}
+
+static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
+{
+ uint64_t *shadow = lookupAddressEntry(location);
+ uint64_t shadowval = *shadow;
+ ClockVector *currClock = get_execution()->get_cv(thread);
+ if (currClock == NULL)
+ return shadow;
+
+ struct DataRace * race = NULL;
+ /* Do full record */
+ if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+ race = fullRaceCheckWrite(thread, location, shadow, currClock);
+ goto Exit;
+ }
+
+ {
+ int threadid = id_to_int(thread);
+ modelclock_t ourClock = currClock->getClock(thread);
+
+ /* Thread ID is too large or clock is too large. */
+ if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+ expandRecord(shadow);
+ race = fullRaceCheckWrite(thread, location, shadow, currClock);
+ goto Exit;
+ }
+
+ {
+ /* Check for datarace against last read. */
+ modelclock_t readClock = READVECTOR(shadowval);
+ thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, readClock, readThread)) {
+ /* We have a datarace */
+ race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+ goto ShadowExit;
+ }
+ }
+
+ {
+ /* Check for datarace against last write. */
+ modelclock_t writeClock = WRITEVECTOR(shadowval);
+ thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+ /* We have a datarace */
+ race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+ goto ShadowExit;
+ }
+ }
+
+ShadowExit:
+ *shadow = ENCODEOP(0, 0, threadid, ourClock);
+
+ *old_val = shadowval;
+ *new_val = *shadow;
+ }
+
+Exit:
+ if (race) {
+#ifdef REPORT_DATA_RACES
+ race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+ if (raceset->add(race))
+ assert_race(race);
+ else model_free(race);
+#else
+ model_free(race);
+#endif
+ }
+
+ return shadow;
+}
+
+static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
+ uint64_t *shadow = lookupAddressEntry(location);
+
+ uint64_t shadowval = *shadow;
+
+ ClockVector *currClock = get_execution()->get_cv(thread);
+ if (currClock == NULL)
+ return;
+
+ struct DataRace * race = NULL;
+ /* Do full record */
+ if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+ race = fullRaceCheckWrite(thread, location, shadow, currClock);
+ goto Exit;
+ }
+
+ {
+ int threadid = id_to_int(thread);
+ modelclock_t ourClock = currClock->getClock(thread);
+
+ /* Thread ID is too large or clock is too large. */
+ if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+ expandRecord(shadow);
+ race = fullRaceCheckWrite(thread, location, shadow, currClock);
goto Exit;
}
+ {
+ /* Check for datarace against last read. */
+ modelclock_t readClock = READVECTOR(shadowval);
+ thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, readClock, readThread)) {
+ /* We have a datarace */
+ race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+ goto ShadowExit;
+ }
+ }
+
+ {
+ /* Check for datarace against last write. */
+ modelclock_t writeClock = WRITEVECTOR(shadowval);
+ thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+ /* We have a datarace */
+ race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+ goto ShadowExit;
+ }
+ }
+ShadowExit:
+ *shadow = ENCODEOP(0, 0, threadid, ourClock);
}
+
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
+ }
+}
+
+void raceCheckWrite64(thread_id_t thread, const void *location)
+{
+ uint64_t old_shadowval, new_shadowval;
+ old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+#ifdef COLLECT_STAT
+ store64_count++;
+#endif
+ uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
+ if (CHECKBOUNDARY(location, 7)) {
+ if (shadow[1]==old_shadowval)
+ shadow[1] = new_shadowval;
+ else goto L1;
+ if (shadow[2]==old_shadowval)
+ shadow[2] = new_shadowval;
+ else goto L2;
+ if (shadow[3]==old_shadowval)
+ shadow[3] = new_shadowval;
+ else goto L3;
+ if (shadow[4]==old_shadowval)
+ shadow[4] = new_shadowval;
+ else goto L4;
+ if (shadow[5]==old_shadowval)
+ shadow[5] = new_shadowval;
+ else goto L5;
+ if (shadow[6]==old_shadowval)
+ shadow[6] = new_shadowval;
+ else goto L6;
+ if (shadow[7]==old_shadowval)
+ shadow[7] = new_shadowval;
+ else goto L7;
+ return;
}
+
+L1:
+ raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+L2:
+ raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
+L3:
+ raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+L4:
+ raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
+L5:
+ raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
+L6:
+ raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
+L7:
+ raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
}
+
+void raceCheckWrite32(thread_id_t thread, const void *location)
+{
+ uint64_t old_shadowval, new_shadowval;
+ old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+#ifdef COLLECT_STAT
+ store32_count++;
+#endif
+ uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
+ if (CHECKBOUNDARY(location, 3)) {
+ if (shadow[1]==old_shadowval)
+ shadow[1] = new_shadowval;
+ else goto L1;
+ if (shadow[2]==old_shadowval)
+ shadow[2] = new_shadowval;
+ else goto L2;
+ if (shadow[3]==old_shadowval)
+ shadow[3] = new_shadowval;
+ else goto L3;
+ return;
+ }
+
+L1:
+ raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+L2:
+ raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
+L3:
+ raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+}
+
+void raceCheckWrite16(thread_id_t thread, const void *location)
+{
+ uint64_t old_shadowval, new_shadowval;
+ old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+#ifdef COLLECT_STAT
+ store16_count++;
+#endif
+
+ uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
+ if (CHECKBOUNDARY(location, 1)) {
+ if (shadow[1]==old_shadowval) {
+ shadow[1] = new_shadowval;
+ return;
+ }
+ }
+ raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+}
+
+void raceCheckWrite8(thread_id_t thread, const void *location)
+{
+ uint64_t old_shadowval, new_shadowval;
+ old_shadowval = new_shadowval = INVALIDSHADOWVAL;
+#ifdef COLLECT_STAT
+ store8_count++;
+#endif
+ raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
+}
+
+#ifdef COLLECT_STAT
+void print_normal_accesses()
+{
+ model_print("store 8 count: %u\n", store8_count);
+ model_print("store 16 count: %u\n", store16_count);
+ model_print("store 32 count: %u\n", store32_count);
+ model_print("store 64 count: %u\n", store64_count);
+
+ model_print("load 8 count: %u\n", load8_count);
+ model_print("load 16 count: %u\n", load16_count);
+ model_print("load 32 count: %u\n", load32_count);
+ model_print("load 64 count: %u\n", load64_count);
+}
+#endif
void raceCheckWrite(thread_id_t thread, void *location);
void atomraceCheckWrite(thread_id_t thread, void *location);
void raceCheckRead(thread_id_t thread, const void *location);
+
void atomraceCheckRead(thread_id_t thread, const void *location);
void recordWrite(thread_id_t thread, void *location);
void recordCalloc(void *location, size_t size);
void setAtomicStoreFlag(const void *location);
void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock);
+void raceCheckRead8(thread_id_t thread, const void *location);
+void raceCheckRead16(thread_id_t thread, const void *location);
+void raceCheckRead32(thread_id_t thread, const void *location);
+void raceCheckRead64(thread_id_t thread, const void *location);
+
+void raceCheckWrite8(thread_id_t thread, const void *location);
+void raceCheckWrite16(thread_id_t thread, const void *location);
+void raceCheckWrite32(thread_id_t thread, const void *location);
+void raceCheckWrite64(thread_id_t thread, const void *location);
+
+#ifdef COLLECT_STAT
+void print_normal_accesses();
+#endif
+
/**
* @brief A record of information for detecting data races
*/
#define MAXREADVECTOR (READMASK-1)
#define MAXWRITEVECTOR (WRITEMASK-1)
+#define INVALIDSHADOWVAL 0x2ULL
+#define CHECKBOUNDARY(location, bits) ((((uintptr_t)location & MASK16BIT) + bits) <= MASK16BIT)
+
typedef HashSet<struct DataRace *, uintptr_t, 0, model_malloc, model_calloc, model_free, race_hash, race_equals> RaceSet;
#endif /* __DATARACE_H__ */
#define INITIAL_THREAD_ID 0
+#ifdef COLLECT_STAT
+static unsigned int atomic_load_count = 0;
+static unsigned int atomic_store_count = 0;
+static unsigned int atomic_rmw_count = 0;
+
+static unsigned int atomic_fence_count = 0;
+static unsigned int atomic_lock_count = 0;
+static unsigned int atomic_trylock_count = 0;
+static unsigned int atomic_unlock_count = 0;
+static unsigned int atomic_notify_count = 0;
+static unsigned int atomic_wait_count = 0;
+static unsigned int atomic_timedwait_count = 0;
+#endif
+
/**
* Structure for holding small ModelChecker members that should be snapshotted
*/
scheduler(scheduler),
thread_map(2), /* We'll always need at least 2 threads */
pthread_map(0),
- pthread_counter(1),
+ pthread_counter(2),
action_trace(),
obj_map(),
condvar_waiters_map(),
obj_thrd_map(),
+ obj_wr_thrd_map(),
+ obj_last_sc_map(),
mutex_map(),
+ cond_map(),
thrd_last_action(1),
thrd_last_fence_release(),
priv(new struct model_snapshot_members ()),
return model->get_execution_number();
}
-static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 2> * hash, void * ptr)
+static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
{
- action_list_t *tmp = hash->get(ptr);
+ SnapVector<action_list_t> *tmp = hash->get(ptr);
if (tmp == NULL) {
- tmp = new action_list_t();
+ tmp = new SnapVector<action_list_t>();
hash->put(ptr, tmp);
}
return tmp;
}
-static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
+static simple_action_list_t * get_safe_ptr_action(HashTable<const void *, simple_action_list_t *, uintptr_t, 2> * hash, void * ptr)
{
- SnapVector<action_list_t> *tmp = hash->get(ptr);
+ simple_action_list_t *tmp = hash->get(ptr);
if (tmp == NULL) {
- tmp = new SnapVector<action_list_t>();
+ tmp = new simple_action_list_t();
hash->put(ptr, tmp);
}
return tmp;
}
+static SnapVector<simple_action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<simple_action_list_t> *, uintptr_t, 2> * hash, void * ptr)
+{
+ SnapVector<simple_action_list_t> *tmp = hash->get(ptr);
+ if (tmp == NULL) {
+ tmp = new SnapVector<simple_action_list_t>();
+ hash->put(ptr, tmp);
+ }
+ return tmp;
+}
+
+/**
+ * When vectors of action lists are reallocated due to resize, the root address of
+ * action lists may change. Hence we need to fix the parent pointer of the children
+ * of root.
+ */
+static void fixup_action_list(SnapVector<action_list_t> * vec)
+{
+ for (uint i = 0;i < vec->size();i++) {
+ action_list_t * list = &(*vec)[i];
+ if (list != NULL)
+ list->fixupParent();
+ }
+}
+
+#ifdef COLLECT_STAT
+static inline void record_atomic_stats(ModelAction * act)
+{
+ switch (act->get_type()) {
+ case ATOMIC_WRITE:
+ atomic_store_count++;
+ break;
+ case ATOMIC_RMW:
+ atomic_load_count++;
+ break;
+ case ATOMIC_READ:
+ atomic_rmw_count++;
+ break;
+ case ATOMIC_FENCE:
+ atomic_fence_count++;
+ break;
+ case ATOMIC_LOCK:
+ atomic_lock_count++;
+ break;
+ case ATOMIC_TRYLOCK:
+ atomic_trylock_count++;
+ break;
+ case ATOMIC_UNLOCK:
+ atomic_unlock_count++;
+ break;
+ case ATOMIC_NOTIFY_ONE:
+ case ATOMIC_NOTIFY_ALL:
+ atomic_notify_count++;
+ break;
+ case ATOMIC_WAIT:
+ atomic_wait_count++;
+ break;
+ case ATOMIC_TIMEDWAIT:
+ atomic_timedwait_count++;
+ default:
+ return;
+ }
+}
+
+void print_atomic_accesses()
+{
+ model_print("atomic store count: %u\n", atomic_store_count);
+ model_print("atomic load count: %u\n", atomic_load_count);
+ model_print("atomic rmw count: %u\n", atomic_rmw_count);
+
+ model_print("atomic fence count: %u\n", atomic_fence_count);
+ model_print("atomic lock count: %u\n", atomic_lock_count);
+ model_print("atomic trylock count: %u\n", atomic_trylock_count);
+ model_print("atomic unlock count: %u\n", atomic_unlock_count);
+ model_print("atomic notify count: %u\n", atomic_notify_count);
+ model_print("atomic wait count: %u\n", atomic_wait_count);
+ model_print("atomic timedwait count: %u\n", atomic_timedwait_count);
+}
+#endif
/** @return a thread ID for a new Thread */
thread_id_t ModelExecution::get_next_id()
{
read_from(curr, rf);
get_thread(curr)->set_return_value(curr->get_return_value());
delete priorset;
+ //Update acquire fence clock vector
+ ClockVector * hbcv = get_hb_from_write(curr->get_reads_from());
+ if (hbcv != NULL)
+ get_thread(curr)->get_acq_fence_cv()->merge(hbcv);
return canprune && (curr->get_type() == ATOMIC_READ);
}
priorset->clear();
//TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
//if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
// assert_bug("Lock access before initialization");
+
+ // TODO: lock count for recursive mutexes
state->locked = get_thread(curr);
ModelAction *unlock = get_last_unlock(curr);
//synchronize with the previous unlock statement
/* unlock the lock - after checking who was waiting on it */
state->locked = NULL;
- /* disable this thread */
- get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
+ /* remove old wait action and disable this thread */
+ simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
+ for (sllnode<ModelAction *> * it = waiters->begin();it != NULL;it = it->getNext()) {
+ ModelAction * wait = it->getVal();
+ if (wait->get_tid() == curr->get_tid()) {
+ waiters->erase(it);
+ break;
+ }
+ }
+
+ waiters->push_back(curr);
scheduler->sleep(get_thread(curr));
}
//FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
//MUST EVENMTUALLY RELEASE...
+ // TODO: lock count for recursive mutexes
/* wake up the other threads */
for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *t = get_thread(int_to_id(i));
break;
}
case ATOMIC_NOTIFY_ALL: {
- action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
+ simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
//activate all the waiting threads
for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
scheduler->wake(get_thread(rit->getVal()));
break;
}
case ATOMIC_NOTIFY_ONE: {
- action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
+ simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
if (waiters->size() != 0) {
Thread * thread = fuzzer->selectNotify(waiters);
scheduler->wake(thread);
* @param curr The ModelAction to process
* @return True if synchronization was updated
*/
-bool ModelExecution::process_fence(ModelAction *curr)
+void ModelExecution::process_fence(ModelAction *curr)
{
/*
* fence-relaxed: no-op
* sequences
* fence-seq-cst: MO constraints formed in {r,w}_modification_order
*/
- bool updated = false;
if (curr->is_acquire()) {
- action_list_t *list = &action_trace;
- sllnode<ModelAction *> * rit;
- /* Find X : is_read(X) && X --sb-> curr */
- for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
- ModelAction *act = rit->getVal();
- if (act == curr)
- continue;
- if (act->get_tid() != curr->get_tid())
- continue;
- /* Stop at the beginning of the thread */
- if (act->is_thread_start())
- break;
- /* Stop once we reach a prior fence-acquire */
- if (act->is_fence() && act->is_acquire())
- break;
- if (!act->is_read())
- continue;
- /* read-acquire will find its own release sequences */
- if (act->is_acquire())
- continue;
-
- /* Establish hypothetical release sequences */
- ClockVector *cv = get_hb_from_write(act->get_reads_from());
- if (cv != NULL && curr->get_cv()->merge(cv))
- updated = true;
- }
+ curr->get_cv()->merge(get_thread(curr)->get_acq_fence_cv());
}
- return updated;
}
/**
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- break; // WL: to be add (modified)
+ break;
}
case THREADONLY_FINISH:
if (curr->is_lock()) {
cdsc::mutex *lock = curr->get_mutex();
struct cdsc::mutex_state *state = lock->get_state();
- if (state->locked)
+ if (state->locked) {
+ Thread *lock_owner = (Thread *)state->locked;
+ Thread *curr_thread = get_thread(curr);
+ if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) {
+ return true;
+ }
+
return false;
+ }
} else if (curr->is_thread_join()) {
Thread *blocking = curr->get_thread_operand();
if (!blocking->is_complete()) {
ASSERT(rf_set == NULL);
/* Add the action to lists */
- if (!second_part_of_rmw)
+ if (!second_part_of_rmw) {
+#ifdef COLLECT_STAT
+ record_atomic_stats(curr);
+#endif
add_action_to_lists(curr, canprune);
+ }
if (curr->is_write())
add_write_to_lists(curr);
thrd_lists->resize(priv->next_thread_id);
for(uint i = oldsize;i < priv->next_thread_id;i++)
new (&(*thrd_lists)[i]) action_list_t();
+
+ fixup_action_list(thrd_lists);
}
ModelAction *prev_same_thread = NULL;
//operation that isn't release
if (rf->get_last_fence_release()) {
if (vec == NULL)
- vec = rf->get_last_fence_release()->get_cv();
+ vec = new ClockVector(rf->get_last_fence_release()->get_cv(), NULL);
else
(vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
+ } else {
+ if (vec == NULL) {
+ if (rf->is_rmw()) {
+ vec = new ClockVector(NULL, NULL);
+ }
+ } else {
+ vec = new ClockVector(vec, NULL);
+ }
}
rf->set_rfcv(vec);
}
{
int tid = id_to_int(act->get_tid());
if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
- action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
+ simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
act->setActionRef(list->add_back(act));
}
// Update action trace, a total order of all actions
- act->setTraceRef(action_trace.add_back(act));
+ action_trace.addAction(act);
// Update obj_thrd_map, a per location, per thread, order of actions
vec->resize(priv->next_thread_id);
for(uint i = oldsize;i < priv->next_thread_id;i++)
new (&(*vec)[i]) action_list_t();
+
+ fixup_action_list(vec);
}
if (!canprune && (act->is_read() || act->is_write()))
- act->setThrdMapRef((*vec)[tid].add_back(act));
+ (*vec)[tid].addAction(act);
// Update thrd_last_action, the last action taken by each thread
if ((int)thrd_last_action.size() <= tid)
}
}
-sllnode<ModelAction *>* insertIntoActionList(action_list_t *list, ModelAction *act) {
- sllnode<ModelAction*> * rit = list->end();
- modelclock_t next_seq = act->get_seq_number();
- if (rit == NULL || (rit->getVal()->get_seq_number() <= next_seq))
- return list->add_back(act);
- else {
- for(;rit != NULL;rit=rit->getPrev()) {
- if (rit->getVal()->get_seq_number() <= next_seq) {
- return list->insertAfter(rit, act);
- }
- }
- return list->add_front(act);
- }
+void insertIntoActionList(action_list_t *list, ModelAction *act) {
+ list->addAction(act);
}
-sllnode<ModelAction *>* insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
- sllnode<ModelAction*> * rit = list->end();
- modelclock_t next_seq = act->get_seq_number();
- if (rit == NULL) {
- act->create_cv(NULL);
- return list->add_back(act);
- } else if (rit->getVal()->get_seq_number() <= next_seq) {
- act->create_cv(rit->getVal());
- return list->add_back(act);
- } else {
- for(;rit != NULL;rit=rit->getPrev()) {
- if (rit->getVal()->get_seq_number() <= next_seq) {
- act->create_cv(rit->getVal());
- return list->insertAfter(rit, act);
- }
- }
- return list->add_front(act);
- }
+void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
+ act->create_cv(NULL);
+ list->addAction(act);
}
/**
void ModelExecution::add_normal_write_to_lists(ModelAction *act)
{
int tid = id_to_int(act->get_tid());
- act->setTraceRef(insertIntoActionListAndSetCV(&action_trace, act));
+ insertIntoActionListAndSetCV(&action_trace, act);
// Update obj_thrd_map, a per location, per thread, order of actions
SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
vec->resize(priv->next_thread_id);
for(uint i=oldsize;i<priv->next_thread_id;i++)
new (&(*vec)[i]) action_list_t();
+
+ fixup_action_list(vec);
}
- act->setThrdMapRef(insertIntoActionList(&(*vec)[tid],act));
+ insertIntoActionList(&(*vec)[tid],act);
ModelAction * lastact = thrd_last_action[tid];
// Update thrd_last_action, the last action taken by each thrad
void ModelExecution::add_write_to_lists(ModelAction *write) {
- SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
+ SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
int tid = id_to_int(write->get_tid());
if (tid >= (int)vec->size()) {
uint oldsize =vec->size();
vec->resize(priv->next_thread_id);
for(uint i=oldsize;i<priv->next_thread_id;i++)
- new (&(*vec)[i]) action_list_t();
+ new (&(*vec)[i]) simple_action_list_t();
}
write->setActionRef((*vec)[tid].add_back(write));
}
ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
{
/* All fences should have location FENCE_LOCATION */
- action_list_t *list = obj_map.get(FENCE_LOCATION);
+ simple_action_list_t *list = obj_map.get(FENCE_LOCATION);
if (!list)
return NULL;
{
void *location = curr->get_location();
- action_list_t *list = obj_map.get(location);
+ simple_action_list_t *list = obj_map.get(location);
if (list == NULL)
return NULL;
*/
SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
{
- SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
+ SnapVector<simple_action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
unsigned int i;
ASSERT(curr->is_read());
if (thrd_lists != NULL)
for (i = 0;i < thrd_lists->size();i++) {
/* Iterate over actions in thread, starting from most recent */
- action_list_t *list = &(*thrd_lists)[i];
+ simple_action_list_t *list = &(*thrd_lists)[i];
sllnode<ModelAction *> * rit;
for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
ModelAction *act = rit->getVal();
}
+void ModelExecution::print_tail()
+{
+ model_print("Execution trace %d:\n", get_execution_number());
+
+ sllnode<ModelAction*> *it;
+
+ model_print("------------------------------------------------------------------------------------\n");
+ model_print("# t Action type MO Location Value Rf CV\n");
+ model_print("------------------------------------------------------------------------------------\n");
+
+ unsigned int hash = 0;
+
+ int length = 25;
+ int counter = 0;
+ SnapList<ModelAction *> list;
+ for (it = action_trace.end();it != NULL;it = it->getPrev()) {
+ if (counter > length)
+ break;
+
+ ModelAction * act = it->getVal();
+ list.push_front(act);
+ counter++;
+ }
+
+ for (it = list.begin();it != NULL;it=it->getNext()) {
+ const ModelAction *act = it->getVal();
+ if (act->get_seq_number() > 0)
+ act->print();
+ hash = hash^(hash<<3)^(it->getVal()->hash());
+ }
+ model_print("HASH %u\n", hash);
+ model_print("------------------------------------------------------------------------------------\n");
+}
+
/**
* Add a Thread to the system for the first time. Should only be called once
* per thread.
* @return A Thread reference
*/
Thread * ModelExecution::get_pthread(pthread_t pid) {
+ // pid 1 is reserved for the main thread, pthread ids should start from 2
+ if (pid == 1)
+ return get_thread(pid);
+
union {
pthread_t p;
uint32_t v;
} x;
x.p = pid;
uint32_t thread_id = x.v;
- if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
- else return NULL;
+
+ if (thread_id < pthread_counter + 1)
+ return pthread_map[thread_id];
+ else
+ return NULL;
}
/**
Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
{
/* Do not split atomic RMW */
- if (curr->is_rmwr() && !paused_by_fuzzer(curr))
+ if (curr->is_rmwr())
return get_thread(curr);
/* Follow CREATE with the created thread */
/* which is not needed, because model.cc takes care of this */
return NULL;
}
-/** @param act A read atomic action */
-bool ModelExecution::paused_by_fuzzer(const ModelAction * act) const
-{
- ASSERT(act->is_read());
-
- // Actions paused by fuzzer have their sequence number reset to 0
- return act->get_seq_number() == 0;
-}
-
/**
* Takes the next step in the execution, if possible.
* @param curr The current step to take
void ModelExecution::removeAction(ModelAction *act) {
{
- sllnode<ModelAction *> * listref = act->getTraceRef();
- if (listref != NULL) {
- action_trace.erase(listref);
- }
+ action_trace.removeAction(act);
}
{
- sllnode<ModelAction *> * listref = act->getThrdMapRef();
- if (listref != NULL) {
- SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
- (*vec)[act->get_tid()].erase(listref);
- }
+ SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
+ (*vec)[act->get_tid()].removeAction(act);
}
if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
sllnode<ModelAction *> * listref = act->getActionRef();
if (listref != NULL) {
- action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
+ simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
list->erase(listref);
}
} else if (act->is_wait()) {
} else if (act->is_free()) {
sllnode<ModelAction *> * listref = act->getActionRef();
if (listref != NULL) {
- SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
+ SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
(*vec)[act->get_tid()].erase(listref);
}
+
//Clear it from last_sc_map
if (obj_last_sc_map.get(act->get_location()) == act) {
obj_last_sc_map.remove(act->get_location());
/** Compute which actions to free. */
void ModelExecution::collectActions() {
+ if (priv->used_sequence_numbers < params->traceminsize)
+ return;
+
//Compute minimal clock vector for all live threads
ClockVector *cvmin = computeMinimalCV();
SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
if (islastact) {
fixupLastAct(act);
}
+
delete act;
continue;
}
modelclock_t tid_clock = cvmin->getClock(act_tid);
if (actseq <= tid_clock) {
removeAction(act);
+ // Remove reference to act from thrd_last_fence_release
+ int thread_id = id_to_int( act->get_tid() );
+ if (thrd_last_fence_release[thread_id] == act) {
+ thrd_last_fence_release[thread_id] = NULL;
+ }
delete act;
continue;
}
delete act;
continue;
}
+ }
- //If we don't delete the action, we should remove references to release fences
- const ModelAction *rel_fence =act->get_last_fence_release();
- if (rel_fence != NULL) {
- modelclock_t relfenceseq = rel_fence->get_seq_number();
- thread_id_t relfence_tid = rel_fence->get_tid();
- modelclock_t tid_clock = cvmin->getClock(relfence_tid);
- //Remove references to irrelevant release fences
- if (relfenceseq <= tid_clock)
- act->set_last_fence_release(NULL);
- }
-
+ //If we don't delete the action, we should remove references to release fences
+ const ModelAction *rel_fence =act->get_last_fence_release();
+ if (rel_fence != NULL) {
+ modelclock_t relfenceseq = rel_fence->get_seq_number();
+ thread_id_t relfence_tid = rel_fence->get_tid();
+ modelclock_t tid_clock = cvmin->getClock(relfence_tid);
+ //Remove references to irrelevant release fences
+ if (relfenceseq <= tid_clock)
+ act->set_last_fence_release(NULL);
}
}
#include <condition_variable>
#include "classlist.h"
-typedef SnapList<ModelAction *> action_list_t;
-
struct PendingFutureValue {
PendingFutureValue(ModelAction *writer, ModelAction *reader) :
writer(writer), reader(reader)
ModelAction *reader;
};
+#ifdef COLLECT_STAT
+void print_atomic_accesses();
+#endif
+
/** @brief The central structure for model-checking */
class ModelExecution {
public:
Thread * take_step(ModelAction *curr);
void print_summary();
+ void print_tail();
#if SUPPORT_MOD_ORDER_DUMP
void dumpGraph(char *filename);
#endif
bool initialize_curr_action(ModelAction **curr);
bool process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set);
void process_write(ModelAction *curr);
- bool process_fence(ModelAction *curr);
+ void process_fence(ModelAction *curr);
bool process_mutex(ModelAction *curr);
void process_thread_action(ModelAction *curr);
void read_from(ModelAction *act, ModelAction *rf);
* to a trace of all actions performed on the object.
* Used only for SC fences, unlocks, & wait.
*/
- HashTable<const void *, action_list_t *, uintptr_t, 2> obj_map;
+ HashTable<const void *, simple_action_list_t *, uintptr_t, 2> obj_map;
/** Per-object list of actions. Maps an object (i.e., memory location)
* to a trace of all actions performed on the object. */
- HashTable<const void *, action_list_t *, uintptr_t, 2> condvar_waiters_map;
+ HashTable<const void *, simple_action_list_t *, uintptr_t, 2> condvar_waiters_map;
/** Per-object list of actions that each thread performed. */
HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> obj_thrd_map;
/** Per-object list of writes that each thread performed. */
- HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> obj_wr_thrd_map;
+ HashTable<const void *, SnapVector<simple_action_list_t> *, uintptr_t, 2> obj_wr_thrd_map;
HashTable<const void *, ModelAction *, uintptr_t, 4> obj_last_sc_map;
Fuzzer * fuzzer;
Thread * action_select_next_thread(const ModelAction *curr) const;
- bool paused_by_fuzzer(const ModelAction * act) const;
bool isfinished;
};
FuncInst::FuncInst(ModelAction *act, FuncNode *func_node) :
single_location(true),
execution_number(0),
- action_marker(0) /* The marker for FuncNode starts from 1 */
+ associated_reads(),
+ thrd_markers()
{
ASSERT(act);
ASSERT(func_node);
return true;
}
-void FuncInst::set_associated_act(ModelAction * act, uint32_t marker)
+void FuncInst::set_associated_read(thread_id_t tid, int index, uint32_t marker, uint64_t read_val)
{
- associated_act = act;
- action_marker = marker;
+ int thread_id = id_to_int(tid);
+
+ if (associated_reads.size() < (uint) thread_id + 1) {
+ int old_size = associated_reads.size();
+ int new_size = thread_id + 1;
+
+ associated_reads.resize(new_size);
+ thrd_markers.resize(new_size);
+
+ for (int i = old_size; i < new_size; i++ ) {
+ associated_reads[i] = new ModelVector<uint64_t>();
+ thrd_markers[i] = new ModelVector<uint32_t>();
+ }
+ }
+
+ ModelVector<uint64_t> * read_values = associated_reads[thread_id];
+ ModelVector<uint32_t> * markers = thrd_markers[thread_id];
+ if (read_values->size() < (uint) index + 1) {
+ int old_size = read_values->size();
+
+ for (int i = old_size; i < index + 1; i++) {
+ read_values->push_back(VALUE_NONE);
+ markers->push_back(0);
+ }
+ }
+
+ (*read_values)[index] = read_val;
+ (*markers)[index] = marker;
}
-ModelAction * FuncInst::get_associated_act(uint32_t marker)
+uint64_t FuncInst::get_associated_read(thread_id_t tid, int index, uint32_t marker)
{
- if (action_marker == marker)
- return associated_act;
+ int thread_id = id_to_int(tid);
+
+ if ( (*thrd_markers[thread_id])[index] == marker)
+ return (*associated_reads[thread_id])[index];
else
- return NULL;
+ return VALUE_NONE;
}
/* Search the FuncInst that has the same type as act in the collision list */
#define __FUNCINST_H__
#include "action.h"
+#include "classlist.h"
#include "hashtable.h"
-
-class ModelAction;
+#include "threads-model.h"
typedef ModelList<FuncInst *> func_inst_list_mt;
void set_execution_number(int new_number) { execution_number = new_number; }
int get_execution_number() { return execution_number; }
- void set_associated_act(ModelAction * act, uint32_t marker);
- ModelAction * get_associated_act(uint32_t marker);
+ void set_associated_read(thread_id_t tid, int index, uint32_t marker, uint64_t read_val);
+ uint64_t get_associated_read(thread_id_t tid, int index, uint32_t marker);
void print();
bool single_location;
int execution_number;
- ModelAction * associated_act;
- uint32_t action_marker;
+ ModelVector< ModelVector<uint64_t> * > associated_reads;
+ ModelVector< ModelVector<uint32_t> * > thrd_markers;
/**
* Collisions store a list of FuncInsts with the same position
#include "concretepredicate.h"
#include "model.h"
+#include "execution.h"
+#include "newfuzzer.h"
#include <cmath>
FuncNode::FuncNode(ModelHistory * history) :
+ func_id(0),
+ func_name(NULL),
history(history),
- exit_count(0),
+ inst_counter(1),
marker(1),
+ exit_count(0),
+ thrd_markers(),
+ thrd_recursion_depth(),
func_inst_map(),
inst_list(),
entry_insts(),
- inst_pred_map(128),
- inst_id_map(128),
- loc_act_map(128),
- predicate_tree_position(),
- predicate_leaves(),
- leaves_tmp_storage(),
- weight_debug_vec(),
- failed_predicates(),
+ thrd_inst_pred_maps(),
+ thrd_inst_id_maps(),
+ thrd_loc_inst_maps(),
+ likely_null_set(),
+ thrd_predicate_tree_position(),
+ thrd_predicate_trace(),
edge_table(32),
out_edges()
{
predicate_tree_exit->set_depth(MAX_DEPTH);
/* Snapshot data structures below */
- action_list_buffer = new SnapList<action_list_t *>();
read_locations = new loc_set_t();
write_locations = new loc_set_t();
val_loc_map = new HashTable<uint64_t, loc_set_t *, uint64_t, 0, snapshot_malloc, snapshot_calloc, snapshot_free, int64_hash>();
/* Reallocate snapshotted memories when new executions start */
void FuncNode::set_new_exec_flag()
{
- action_list_buffer = new SnapList<action_list_t *>();
read_locations = new loc_set_t();
write_locations = new loc_set_t();
val_loc_map = new HashTable<uint64_t, loc_set_t *, uint64_t, 0, snapshot_malloc, snapshot_calloc, snapshot_free, int64_hash>();
}
}
-
void FuncNode::add_entry_inst(FuncInst * inst)
{
if (inst == NULL)
entry_insts.push_back(inst);
}
+void FuncNode::function_entry_handler(thread_id_t tid)
+{
+ init_marker(tid);
+ init_local_maps(tid);
+ init_predicate_tree_data_structure(tid);
+}
+
+void FuncNode::function_exit_handler(thread_id_t tid)
+{
+ int thread_id = id_to_int(tid);
+
+ reset_local_maps(tid);
+
+ thrd_recursion_depth[thread_id]--;
+ thrd_markers[thread_id]->pop_back();
+
+ Predicate * exit_pred = get_predicate_tree_position(tid);
+ if (exit_pred->get_exit() == NULL) {
+ // Exit predicate is unset yet
+ exit_pred->set_exit(predicate_tree_exit);
+ }
+
+ update_predicate_tree_weight(tid);
+ reset_predicate_tree_data_structure(tid);
+
+ exit_count++;
+ //model_print("exit count: %d\n", exit_count);
+
+// print_predicate_tree();
+}
+
/**
* @brief Convert ModelAdtion list to FuncInst list
* @param act_list A list of ModelActions
*/
-void FuncNode::update_tree(action_list_t * act_list)
+void FuncNode::update_tree(ModelAction * act)
{
- if (act_list == NULL || act_list->size() == 0)
+ bool should_process = act->is_read() || act->is_write();
+ if (!should_process)
return;
HashTable<void *, value_set_t *, uintptr_t, 0> * write_history = history->getWriteHistory();
/* build inst_list from act_list for later processing */
- func_inst_list_t inst_list;
- action_list_t rw_act_list;
+// func_inst_list_t inst_list;
- for (sllnode<ModelAction *> * it = act_list->begin();it != NULL;it = it->getNext()) {
- ModelAction * act = it->getVal();
- FuncInst * func_inst = get_inst(act);
- void * loc = act->get_location();
+ FuncInst * func_inst = get_inst(act);
+ void * loc = act->get_location();
- if (func_inst == NULL)
- continue;
+ if (func_inst == NULL)
+ return;
- inst_list.push_back(func_inst);
- bool act_added = false;
+// inst_list.push_back(func_inst);
- if (act->is_write()) {
- rw_act_list.push_back(act);
- act_added = true;
- if (!write_locations->contains(loc)) {
- write_locations->add(loc);
- history->update_loc_wr_func_nodes_map(loc, this);
- }
+ if (act->is_write()) {
+ if (!write_locations->contains(loc)) {
+ write_locations->add(loc);
+ history->update_loc_wr_func_nodes_map(loc, this);
}
+ }
- if (act->is_read()) {
- if (!act_added)
- rw_act_list.push_back(act);
-
- /* If func_inst may only read_from a single location, then:
- *
- * The first time an action reads from some location,
- * import all the values that have been written to this
- * location from ModelHistory and notify ModelHistory
- * that this FuncNode may read from this location.
- */
- if (!read_locations->contains(loc) && func_inst->is_single_location()) {
- read_locations->add(loc);
- value_set_t * write_values = write_history->get(loc);
- add_to_val_loc_map(write_values, loc);
- history->update_loc_rd_func_nodes_map(loc, this);
- }
+ if (act->is_read()) {
+ /* If func_inst may only read_from a single location, then:
+ *
+ * The first time an action reads from some location,
+ * import all the values that have been written to this
+ * location from ModelHistory and notify ModelHistory
+ * that this FuncNode may read from this location.
+ */
+ if (!read_locations->contains(loc) && func_inst->is_single_location()) {
+ read_locations->add(loc);
+ value_set_t * write_values = write_history->get(loc);
+ add_to_val_loc_map(write_values, loc);
+ history->update_loc_rd_func_nodes_map(loc, this);
}
- }
-// model_print("function %s\n", func_name);
-// print_val_loc_map();
+ // Keep a has-been-zero-set record
+ if ( likely_reads_from_null(act) )
+ likely_null_set.put(func_inst, true);
+ }
- update_inst_tree(&inst_list);
- update_predicate_tree(&rw_act_list);
+// update_inst_tree(&inst_list); TODO
-// print_predicate_tree();
+ update_predicate_tree(act);
}
/**
}
}
-void FuncNode::update_predicate_tree(action_list_t * act_list)
+void FuncNode::update_predicate_tree(ModelAction * next_act)
{
- if (act_list == NULL || act_list->size() == 0)
- return;
-
- incr_marker();
- uint32_t inst_counter = 0;
+ thread_id_t tid = next_act->get_tid();
+ int thread_id = id_to_int(tid);
+ uint32_t this_marker = thrd_markers[thread_id]->back();
+ int recursion_depth = thrd_recursion_depth[thread_id];
- // Clear hashtables
- loc_act_map.reset();
- inst_pred_map.reset();
- inst_id_map.reset();
+ loc_inst_map_t * loc_inst_map = thrd_loc_inst_maps[thread_id]->back();
+ inst_pred_map_t * inst_pred_map = thrd_inst_pred_maps[thread_id]->back();
+ inst_id_map_t * inst_id_map = thrd_inst_id_maps[thread_id]->back();
- // Clear the set of leaves encountered in this path
- leaves_tmp_storage.clear();
+ Predicate * curr_pred = get_predicate_tree_position(tid);
+ NewFuzzer * fuzzer = (NewFuzzer *)model->get_execution()->getFuzzer();
+ Predicate * selected_branch = fuzzer->get_selected_child_branch(tid);
- sllnode<ModelAction *> *it = act_list->begin();
- Predicate * curr_pred = predicate_tree_entry;
- while (it != NULL) {
- ModelAction * next_act = it->getVal();
+ bool amended;
+ while (true) {
FuncInst * next_inst = get_inst(next_act);
- next_inst->set_associated_act(next_act, marker);
Predicate * unset_predicate = NULL;
bool branch_found = follow_branch(&curr_pred, next_inst, next_act, &unset_predicate);
// A branch with unset predicate expression is detected
if (!branch_found && unset_predicate != NULL) {
- bool amended = amend_predicate_expr(curr_pred, next_inst, next_act);
+ amended = amend_predicate_expr(curr_pred, next_inst, next_act);
if (amended)
continue;
else {
}
// Detect loops
- if (!branch_found && inst_id_map.contains(next_inst)) {
+ if (!branch_found && inst_id_map->contains(next_inst)) {
FuncInst * curr_inst = curr_pred->get_func_inst();
- uint32_t curr_id = inst_id_map.get(curr_inst);
- uint32_t next_id = inst_id_map.get(next_inst);
+ uint32_t curr_id = inst_id_map->get(curr_inst);
+ uint32_t next_id = inst_id_map->get(next_inst);
if (curr_id >= next_id) {
- Predicate * old_pred = inst_pred_map.get(next_inst);
+ Predicate * old_pred = inst_pred_map->get(next_inst);
Predicate * back_pred = old_pred->get_parent();
- // For updating weights
- leaves_tmp_storage.push_back(curr_pred);
-
// Add to the set of backedges
curr_pred->add_backedge(back_pred);
curr_pred = back_pred;
+
continue;
}
}
continue;
}
- if (next_act->is_write())
+ if (next_act->is_write()) {
curr_pred->set_write(true);
+ }
if (next_act->is_read()) {
/* Only need to store the locations of read actions */
- loc_act_map.put(next_act->get_location(), next_act);
+ loc_inst_map->put(next_act->get_location(), next_inst);
}
- inst_pred_map.put(next_inst, curr_pred);
- if (!inst_id_map.contains(next_inst))
- inst_id_map.put(next_inst, inst_counter++);
+ inst_pred_map->put(next_inst, curr_pred);
+ set_predicate_tree_position(tid, curr_pred);
+
+ if (!inst_id_map->contains(next_inst))
+ inst_id_map->put(next_inst, inst_counter++);
- it = it->getNext();
curr_pred->incr_expl_count();
- }
+ add_predicate_to_trace(tid, curr_pred);
+ if (next_act->is_read())
+ next_inst->set_associated_read(tid, recursion_depth, this_marker, next_act->get_reads_from_value());
- if (curr_pred->get_exit() == NULL) {
- // Exit predicate is unset yet
- curr_pred->set_exit(predicate_tree_exit);
+ break;
}
- leaves_tmp_storage.push_back(curr_pred);
- update_predicate_tree_weight();
+ // A check
+ if (next_act->is_read()) {
+// if (selected_branch != NULL && !amended)
+// ASSERT(selected_branch == curr_pred);
+ }
}
/* Given curr_pred and next_inst, find the branch following curr_pred that
{
/* Check if a branch with func_inst and corresponding predicate exists */
bool branch_found = false;
+ thread_id_t tid = next_act->get_tid();
+
ModelVector<Predicate *> * branches = (*curr_pred)->get_children();
for (uint i = 0;i < branches->size();i++) {
Predicate * branch = (*branches)[i];
/* Only read and rmw actions my have unset predicate expressions */
if (pred_expressions->getSize() == 0) {
predicate_correct = false;
+
if (*unset_predicate == NULL)
*unset_predicate = branch;
else
break;
case EQUALITY:
FuncInst * to_be_compared;
- ModelAction * last_act;
-
to_be_compared = pred_expression->func_inst;
- last_act = to_be_compared->get_associated_act(marker);
- last_read = last_act->get_reads_from_value();
+ last_read = get_associated_read(tid, to_be_compared);
+ if (last_read == VALUE_NONE)
+ predicate_correct = false;
+ // ASSERT(last_read != VALUE_NONE);
+
next_read = next_act->get_reads_from_value();
equality = (last_read == next_read);
if (equality != pred_expression->value)
break;
case NULLITY:
- next_read = next_act->get_reads_from_value();
// TODO: implement likely to be null
- equality = ( (void*) (next_read & 0xffffffff) == NULL);
+ equality = likely_reads_from_null(next_act);
if (equality != pred_expression->value)
predicate_correct = false;
break;
SnapVector<struct half_pred_expr *> * half_pred_expressions)
{
void * loc = next_act->get_location();
+ int thread_id = id_to_int(next_act->get_tid());
+ loc_inst_map_t * loc_inst_map = thrd_loc_inst_maps[thread_id]->back();
if (next_inst->is_read()) {
/* read + rmw */
- if ( loc_act_map.contains(loc) ) {
- ModelAction * last_act = loc_act_map.get(loc);
- FuncInst * last_inst = get_inst(last_act);
+ if ( loc_inst_map->contains(loc) ) {
+ FuncInst * last_inst = loc_inst_map->get(loc);
struct half_pred_expr * expression = new half_pred_expr(EQUALITY, last_inst);
half_pred_expressions->push_back(expression);
} else if ( next_inst->is_single_location() ) {
loc_set_iter * loc_it = loc_may_equal->iterator();
while (loc_it->hasNext()) {
void * neighbor = loc_it->next();
- if (loc_act_map.contains(neighbor)) {
- ModelAction * last_act = loc_act_map.get(neighbor);
- FuncInst * last_inst = get_inst(last_act);
+ if (loc_inst_map->contains(neighbor)) {
+ FuncInst * last_inst = loc_inst_map->get(neighbor);
struct half_pred_expr * expression = new half_pred_expr(EQUALITY, last_inst);
half_pred_expressions->push_back(expression);
delete loc_it;
}
- } else {
- // next_inst is not single location
- uint64_t read_val = next_act->get_reads_from_value();
+ }
- // only infer NULLITY predicate when it is actually NULL.
- if ( (void*)read_val == NULL) {
- struct half_pred_expr * expression = new half_pred_expr(NULLITY, NULL);
- half_pred_expressions->push_back(expression);
- }
+ // next_inst is not single location and has been null
+ bool likely_null = likely_null_set.contains(next_inst);
+ if ( !next_inst->is_single_location() && likely_null ) {
+ struct half_pred_expr * expression = new half_pred_expr(NULLITY, NULL);
+ half_pred_expressions->push_back(expression);
}
} else {
/* Pure writes */
curr_pred->add_child(new_pred);
new_pred->set_parent(curr_pred);
- /* Maintain predicate leaves */
- predicate_leaves.add(new_pred);
- predicate_leaves.remove(curr_pred);
-
/* entry predicates and predicates containing pure write actions
* have no predicate expressions */
if ( curr_pred->is_entry_predicate() )
Predicate * pred= predicates[i];
curr_pred->add_child(pred);
pred->set_parent(curr_pred);
-
- /* Add new predicate leaves */
- predicate_leaves.add(pred);
}
- /* Remove predicate node that has children */
- predicate_leaves.remove(curr_pred);
-
/* Free memories allocated by infer_predicate */
for (uint i = 0;i < half_pred_expressions->size();i++) {
struct half_pred_expr * tmp = (*half_pred_expressions)[i];
}
}
- uint64_t read_val = next_act->get_reads_from_value();
+ bool likely_null = likely_null_set.contains(next_inst);
// only generate NULLITY predicate when it is actually NULL.
- if ( !next_inst->is_single_location() && (void*)read_val == NULL ) {
+ if ( !next_inst->is_single_location() && likely_null ) {
Predicate * new_pred = new Predicate(next_inst);
curr_pred->add_child(new_pred);
delete loc_it;
}
-/* Every time a thread enters a function, set its position to the predicate tree entry */
-void FuncNode::init_predicate_tree_position(thread_id_t tid)
+bool FuncNode::likely_reads_from_null(ModelAction * read)
{
- int thread_id = id_to_int(tid);
- if (predicate_tree_position.size() <= (uint) thread_id)
- predicate_tree_position.resize(thread_id + 1);
+ uint64_t read_val = read->get_reads_from_value();
+ if ( (void *)(read_val && 0xffffffff) == NULL )
+ return true;
- predicate_tree_position[thread_id] = predicate_tree_entry;
+ return false;
}
void FuncNode::set_predicate_tree_position(thread_id_t tid, Predicate * pred)
{
int thread_id = id_to_int(tid);
- predicate_tree_position[thread_id] = pred;
+ ModelVector<Predicate *> * stack = thrd_predicate_tree_position[thread_id];
+ (*stack)[stack->size() - 1] = pred;
}
/* @return The position of a thread in a predicate tree */
Predicate * FuncNode::get_predicate_tree_position(thread_id_t tid)
{
int thread_id = id_to_int(tid);
- return predicate_tree_position[thread_id];
+ return thrd_predicate_tree_position[thread_id]->back();
+}
+
+void FuncNode::add_predicate_to_trace(thread_id_t tid, Predicate * pred)
+{
+ int thread_id = id_to_int(tid);
+ thrd_predicate_trace[thread_id]->back()->push_back(pred);
+}
+
+void FuncNode::init_marker(thread_id_t tid)
+{
+ marker++;
+
+ int thread_id = id_to_int(tid);
+ int old_size = thrd_markers.size();
+
+ if (old_size < thread_id + 1) {
+ thrd_markers.resize(thread_id + 1);
+
+ for (int i = old_size; i < thread_id + 1; i++) {
+ thrd_markers[i] = new ModelVector<uint32_t>();
+ thrd_recursion_depth.push_back(-1);
+ }
+ }
+
+ thrd_markers[thread_id]->push_back(marker);
+ thrd_recursion_depth[thread_id]++;
+}
+
+uint64_t FuncNode::get_associated_read(thread_id_t tid, FuncInst * inst)
+{
+ int thread_id = id_to_int(tid);
+ int recursion_depth = thrd_recursion_depth[thread_id];
+ uint marker = thrd_markers[thread_id]->back();
+
+ return inst->get_associated_read(tid, recursion_depth, marker);
}
-/* Make sure elements of thrd_inst_act_map are initialized properly when threads enter functions */
-void FuncNode::init_inst_act_map(thread_id_t tid)
+/* Make sure elements of maps are initialized properly when threads enter functions */
+void FuncNode::init_local_maps(thread_id_t tid)
{
int thread_id = id_to_int(tid);
- SnapVector<inst_act_map_t *> * thrd_inst_act_map = history->getThrdInstActMap(func_id);
- uint old_size = thrd_inst_act_map->size();
+ int old_size = thrd_loc_inst_maps.size();
+
+ if (old_size < thread_id + 1) {
+ int new_size = thread_id + 1;
- if (thrd_inst_act_map->size() <= (uint) thread_id) {
- uint new_size = thread_id + 1;
- thrd_inst_act_map->resize(new_size);
+ thrd_loc_inst_maps.resize(new_size);
+ thrd_inst_id_maps.resize(new_size);
+ thrd_inst_pred_maps.resize(new_size);
- for (uint i = old_size;i < new_size;i++)
- (*thrd_inst_act_map)[i] = new inst_act_map_t(128);
+ for (int i = old_size; i < new_size; i++) {
+ thrd_loc_inst_maps[i] = new ModelVector<loc_inst_map_t *>;
+ thrd_inst_id_maps[i] = new ModelVector<inst_id_map_t *>;
+ thrd_inst_pred_maps[i] = new ModelVector<inst_pred_map_t *>;
+ }
}
+
+ ModelVector<loc_inst_map_t *> * map = thrd_loc_inst_maps[thread_id];
+ int index = thrd_recursion_depth[thread_id];
+
+ // If there are recursive calls, push more hashtables into the vector.
+ if (map->size() < (uint) index + 1) {
+ thrd_loc_inst_maps[thread_id]->push_back(new loc_inst_map_t(64));
+ thrd_inst_id_maps[thread_id]->push_back(new inst_id_map_t(64));
+ thrd_inst_pred_maps[thread_id]->push_back(new inst_pred_map_t(64));
+ }
+
+ ASSERT(map->size() == (uint) index + 1);
}
-/* Reset elements of thrd_inst_act_map when threads exit functions */
-void FuncNode::reset_inst_act_map(thread_id_t tid)
+/* Reset elements of maps when threads exit functions */
+void FuncNode::reset_local_maps(thread_id_t tid)
{
int thread_id = id_to_int(tid);
- SnapVector<inst_act_map_t *> * thrd_inst_act_map = history->getThrdInstActMap(func_id);
+ int index = thrd_recursion_depth[thread_id];
+
+ // When recursive call ends, keep only one hashtable in the vector
+ if (index > 0) {
+ delete thrd_loc_inst_maps[thread_id]->back();
+ delete thrd_inst_id_maps[thread_id]->back();
+ delete thrd_inst_pred_maps[thread_id]->back();
- inst_act_map_t * map = (*thrd_inst_act_map)[thread_id];
- map->reset();
+ thrd_loc_inst_maps[thread_id]->pop_back();
+ thrd_inst_id_maps[thread_id]->pop_back();
+ thrd_inst_pred_maps[thread_id]->pop_back();
+ } else {
+ thrd_loc_inst_maps[thread_id]->back()->reset();
+ thrd_inst_id_maps[thread_id]->back()->reset();
+ thrd_inst_pred_maps[thread_id]->back()->reset();
+ }
}
-void FuncNode::update_inst_act_map(thread_id_t tid, ModelAction * read_act)
+void FuncNode::init_predicate_tree_data_structure(thread_id_t tid)
{
int thread_id = id_to_int(tid);
- SnapVector<inst_act_map_t *> * thrd_inst_act_map = history->getThrdInstActMap(func_id);
+ int old_size = thrd_predicate_tree_position.size();
+
+ if (old_size < thread_id + 1) {
+ thrd_predicate_tree_position.resize(thread_id + 1);
+ thrd_predicate_trace.resize(thread_id + 1);
- inst_act_map_t * map = (*thrd_inst_act_map)[thread_id];
- FuncInst * read_inst = get_inst(read_act);
- map->put(read_inst, read_act);
+ for (int i = old_size; i < thread_id + 1; i++) {
+ thrd_predicate_tree_position[i] = new ModelVector<Predicate *>();
+ thrd_predicate_trace[i] = new ModelVector<predicate_trace_t *>();
+ }
+ }
+
+ thrd_predicate_tree_position[thread_id]->push_back(predicate_tree_entry);
+ thrd_predicate_trace[thread_id]->push_back(new predicate_trace_t());
}
-inst_act_map_t * FuncNode::get_inst_act_map(thread_id_t tid)
+void FuncNode::reset_predicate_tree_data_structure(thread_id_t tid)
{
int thread_id = id_to_int(tid);
- SnapVector<inst_act_map_t *> * thrd_inst_act_map = history->getThrdInstActMap(func_id);
+ thrd_predicate_tree_position[thread_id]->pop_back();
- return (*thrd_inst_act_map)[thread_id];
+ // Free memories allocated in init_predicate_tree_data_structure
+ delete thrd_predicate_trace[thread_id]->back();
+ thrd_predicate_trace[thread_id]->pop_back();
}
/* Add FuncNodes that this node may follow */
else if (target == this)
return 0;
+ // Be careful with memory
SnapList<FuncNode *> queue;
HashTable<FuncNode *, int, uintptr_t, 0> distances(128);
return -1;
}
-void FuncNode::add_failed_predicate(Predicate * pred)
-{
- failed_predicates.add(pred);
-}
-
-/* Implement quick sort to sort leaves before assigning base scores */
-template<typename _Tp>
-static int partition(ModelVector<_Tp *> * arr, int low, int high)
-{
- unsigned int pivot = (*arr)[high] -> get_depth();
- int i = low - 1;
-
- for (int j = low;j <= high - 1;j ++) {
- if ( (*arr)[j] -> get_depth() < pivot ) {
- i ++;
- _Tp * tmp = (*arr)[i];
- (*arr)[i] = (*arr)[j];
- (*arr)[j] = tmp;
- }
- }
-
- _Tp * tmp = (*arr)[i + 1];
- (*arr)[i + 1] = (*arr)[high];
- (*arr)[high] = tmp;
-
- return i + 1;
-}
-
-/* Implement quick sort to sort leaves before assigning base scores */
-template<typename _Tp>
-static void quickSort(ModelVector<_Tp *> * arr, int low, int high)
-{
- if (low < high) {
- int pi = partition(arr, low, high);
-
- quickSort(arr, low, pi - 1);
- quickSort(arr, pi + 1, high);
- }
-}
-
-void FuncNode::assign_initial_weight()
-{
- PredSetIter * it = predicate_leaves.iterator();
- leaves_tmp_storage.clear();
-
- while (it->hasNext()) {
- Predicate * pred = it->next();
- double weight = 100.0 / sqrt(pred->get_expl_count() + pred->get_fail_count() + 1);
- pred->set_weight(weight);
- leaves_tmp_storage.push_back(pred);
- }
- delete it;
-
- quickSort(&leaves_tmp_storage, 0, leaves_tmp_storage.size() - 1);
-
- // assign scores for internal nodes;
- while ( !leaves_tmp_storage.empty() ) {
- Predicate * leaf = leaves_tmp_storage.back();
- leaves_tmp_storage.pop_back();
-
- Predicate * curr = leaf->get_parent();
- while (curr != NULL) {
- if (curr->get_weight() != 0) {
- // Has been exlpored
- break;
- }
-
- ModelVector<Predicate *> * children = curr->get_children();
- double weight_sum = 0;
- bool has_unassigned_node = false;
-
- for (uint i = 0;i < children->size();i++) {
- Predicate * child = (*children)[i];
-
- // If a child has unassigned weight
- double weight = child->get_weight();
- if (weight == 0) {
- has_unassigned_node = true;
- break;
- } else
- weight_sum += weight;
- }
-
- if (!has_unassigned_node) {
- double average_weight = (double) weight_sum / (double) children->size();
- double weight = average_weight * pow(0.9, curr->get_depth());
- curr->set_weight(weight);
- } else
- break;
-
- curr = curr->get_parent();
- }
- }
-}
-
-void FuncNode::update_predicate_tree_weight()
+void FuncNode::update_predicate_tree_weight(thread_id_t tid)
{
- if (marker == 2) {
- // Predicate tree is initially built
- assign_initial_weight();
- return;
- }
-
- weight_debug_vec.clear();
-
- PredSetIter * it = failed_predicates.iterator();
- while (it->hasNext()) {
- Predicate * pred = it->next();
- leaves_tmp_storage.push_back(pred);
- }
- delete it;
- failed_predicates.reset();
-
- quickSort(&leaves_tmp_storage, 0, leaves_tmp_storage.size() - 1);
- for (uint i = 0;i < leaves_tmp_storage.size();i++) {
- Predicate * pred = leaves_tmp_storage[i];
- double weight = 100.0 / sqrt(pred->get_expl_count() + pred->get_fail_count() + 1);
- pred->set_weight(weight);
- }
-
- // Update weights in internal nodes
- while ( !leaves_tmp_storage.empty() ) {
- Predicate * leaf = leaves_tmp_storage.back();
- leaves_tmp_storage.pop_back();
+ predicate_trace_t * trace = thrd_predicate_trace[id_to_int(tid)]->back();
- Predicate * curr = leaf->get_parent();
- while (curr != NULL) {
- ModelVector<Predicate *> * children = curr->get_children();
- double weight_sum = 0;
- bool has_unassigned_node = false;
+ // Update predicate weights based on prediate trace
+ for (int i = trace->size() - 1; i >= 0; i--) {
+ Predicate * node = (*trace)[i];
+ ModelVector<Predicate *> * children = node->get_children();
+ if (children->size() == 0) {
+ double weight = 100.0 / sqrt(node->get_expl_count() + node->get_fail_count() + 1);
+ node->set_weight(weight);
+ } else {
+ double weight_sum = 0.0;
for (uint i = 0;i < children->size();i++) {
Predicate * child = (*children)[i];
-
double weight = child->get_weight();
- if (weight != 0)
- weight_sum += weight;
- else if ( predicate_leaves.contains(child) ) {
- // If this child is a leaf
- double weight = 100.0 / sqrt(child->get_expl_count() + 1);
- child->set_weight(weight);
- weight_sum += weight;
- } else {
- has_unassigned_node = true;
- weight_debug_vec.push_back(child); // For debugging purpose
- break;
- }
+ weight_sum += weight;
}
- if (!has_unassigned_node) {
- double average_weight = (double) weight_sum / (double) children->size();
- double weight = average_weight * pow(0.9, curr->get_depth());
- curr->set_weight(weight);
- } else
- break;
-
- curr = curr->get_parent();
+ double average_weight = (double) weight_sum / (double) children->size();
+ double weight = average_weight * pow(0.9, node->get_depth());
+ node->set_weight(weight);
}
}
-
- for (uint i = 0;i < weight_debug_vec.size();i++) {
- Predicate * tmp = weight_debug_vec[i];
- ASSERT( tmp->get_weight() != 0 );
- }
}
void FuncNode::print_predicate_tree()
#define MAX_DIST 10
typedef ModelList<FuncInst *> func_inst_list_mt;
+typedef ModelVector<Predicate *> predicate_trace_t;
+
+typedef HashTable<void *, FuncInst *, uintptr_t, 0, model_malloc, model_calloc, model_free> loc_inst_map_t;
+typedef HashTable<FuncInst *, uint32_t, uintptr_t, 0, model_malloc, model_calloc, model_free> inst_id_map_t;
+typedef HashTable<FuncInst *, Predicate *, uintptr_t, 0, model_malloc, model_calloc, model_free> inst_pred_map_t;
+
typedef enum edge_type {
IN_EDGE, OUT_EDGE, BI_EDGE
} edge_type_t;
FuncInst * create_new_inst(ModelAction *act);
FuncInst * get_inst(ModelAction *act);
- HashTable<const char *, FuncInst *, uintptr_t, 4, model_malloc, model_calloc, model_free> * getFuncInstMap() { return &func_inst_map; }
func_inst_list_mt * get_inst_list() { return &inst_list; }
func_inst_list_mt * get_entry_insts() { return &entry_insts; }
void add_entry_inst(FuncInst * inst);
- void update_tree(action_list_t * act_list);
- void update_inst_tree(func_inst_list_t * inst_list);
- void update_predicate_tree(action_list_t * act_list);
- bool follow_branch(Predicate ** curr_pred, FuncInst * next_inst, ModelAction * next_act, Predicate ** unset_predicate);
-
- void incr_exit_count() { exit_count++; }
- uint32_t get_exit_count() { return exit_count; }
+ void function_entry_handler(thread_id_t tid);
+ void function_exit_handler(thread_id_t tid);
- SnapList<action_list_t *> * get_action_list_buffer() { return action_list_buffer; }
+ void update_tree(ModelAction * act);
void add_to_val_loc_map(uint64_t val, void * loc);
void add_to_val_loc_map(value_set_t * values, void * loc);
void update_loc_may_equal_map(void * new_loc, loc_set_t * old_locations);
- void init_predicate_tree_position(thread_id_t tid);
- void set_predicate_tree_position(thread_id_t tid, Predicate * pred);
Predicate * get_predicate_tree_position(thread_id_t tid);
- void init_inst_act_map(thread_id_t tid);
- void reset_inst_act_map(thread_id_t tid);
- void update_inst_act_map(thread_id_t tid, ModelAction * read_act);
- inst_act_map_t * get_inst_act_map(thread_id_t tid);
+ void add_predicate_to_trace(thread_id_t tid, Predicate *pred);
+
+ uint64_t get_associated_read(thread_id_t tid, FuncInst * inst);
void add_out_edge(FuncNode * other);
ModelList<FuncNode *> * get_out_edges() { return &out_edges; }
int compute_distance(FuncNode * target, int max_step = MAX_DIST);
- void add_failed_predicate(Predicate * pred);
-
void print_predicate_tree();
MEMALLOC
Predicate * predicate_tree_entry; // A dummy node whose children are the real entries
Predicate * predicate_tree_exit; // A dummy node
- uint32_t exit_count;
+ uint32_t inst_counter;
uint32_t marker;
+ uint32_t exit_count;
+ ModelVector< ModelVector<uint32_t> *> thrd_markers;
+ ModelVector<int> thrd_recursion_depth; // Recursion depth starts from 0 to match with vector indexes.
- void incr_marker() { marker++; }
+ void init_marker(thread_id_t tid);
/* Use source line number as the key of hashtable, to check if
* atomic operation with this line number has been added or not
func_inst_list_mt entry_insts;
/* Map a FuncInst to the its predicate when updating predicate trees */
- HashTable<FuncInst *, Predicate *, uintptr_t, 0, model_malloc, model_calloc, model_free> inst_pred_map;
+ ModelVector< ModelVector<inst_pred_map_t *> * > thrd_inst_pred_maps;
/* Number FuncInsts to detect loops when updating predicate trees */
- HashTable<FuncInst *, uint32_t, uintptr_t, 0, model_malloc, model_calloc, model_free> inst_id_map;
+ ModelVector< ModelVector<inst_id_map_t *> *> thrd_inst_id_maps;
- /* Delect read actions at the same locations when updating predicate trees */
- HashTable<void *, ModelAction *, uintptr_t, 0, model_malloc, model_calloc, model_free> loc_act_map;
+ /* Detect read actions at the same locations when updating predicate trees */
+ ModelVector< ModelVector<loc_inst_map_t *> *> thrd_loc_inst_maps;
+
+ void init_local_maps(thread_id_t tid);
+ void reset_local_maps(thread_id_t tid);
+
+ void update_inst_tree(func_inst_list_t * inst_list);
+ void update_predicate_tree(ModelAction * act);
+ bool follow_branch(Predicate ** curr_pred, FuncInst * next_inst, ModelAction * next_act, Predicate ** unset_predicate);
void infer_predicates(FuncInst * next_inst, ModelAction * next_act, SnapVector<struct half_pred_expr *> * half_pred_expressions);
void generate_predicates(Predicate * curr_pred, FuncInst * next_inst, SnapVector<struct half_pred_expr *> * half_pred_expressions);
bool amend_predicate_expr(Predicate * curr_pred, FuncInst * next_inst, ModelAction * next_act);
- /* Store action_lists when calls to update_tree are deferred */
- SnapList<action_list_t *> * action_list_buffer;
-
/* Set of locations read by this FuncNode */
loc_set_t * read_locations;
/* Keeps track of locations that may share the same value as key, deduced from val_loc_map */
HashTable<void *, loc_set_t *, uintptr_t, 0> * loc_may_equal_map;
+ HashTable<FuncInst *, bool, uintptr_t, 0, model_malloc, model_calloc, model_free> likely_null_set;
+
+ bool likely_reads_from_null(ModelAction * read);
// value_set_t * values_may_read_from;
- /* Run-time position in the predicate tree for each thread */
- ModelVector<Predicate *> predicate_tree_position;
+ /* Run-time position in the predicate tree for each thread
+ * The inner vector is used to deal with recursive functions. */
+ ModelVector< ModelVector<Predicate *> * > thrd_predicate_tree_position;
+
+ ModelVector< ModelVector<predicate_trace_t *> * > thrd_predicate_trace;
+
+ void set_predicate_tree_position(thread_id_t tid, Predicate * pred);
- PredSet predicate_leaves;
- ModelVector<Predicate *> leaves_tmp_storage;
- ModelVector<Predicate *> weight_debug_vec;
- PredSet failed_predicates;
+ void init_predicate_tree_data_structure(thread_id_t tid);
+ void reset_predicate_tree_data_structure(thread_id_t tid);
/* Store the relation between this FuncNode and other FuncNodes */
HashTable<FuncNode *, edge_type_t, uintptr_t, 0, model_malloc, model_calloc, model_free> edge_table;
/* FuncNodes that follow this node */
ModelList<FuncNode *> out_edges;
- void assign_initial_weight();
- void update_predicate_tree_weight();
+ void update_predicate_tree_weight(thread_id_t tid);
};
#endif /* __FUNCNODE_H__ */
return model->get_thread(curr_tid);
}
-Thread * Fuzzer::selectNotify(action_list_t * waiters) {
+Thread * Fuzzer::selectNotify(simple_action_list_t * waiters) {
int numwaiters = waiters->size();
int random_index = random() % numwaiters;
sllnode<ModelAction*> * it = waiters->begin();
bool Fuzzer::shouldWait(const ModelAction * act)
{
- return random() & 1;
+ return true;
}
virtual bool has_paused_threads() { return false; }
virtual Thread * selectThread(int * threadlist, int numthreads);
- Thread * selectNotify(action_list_t * waiters);
+ Thread * selectNotify(simple_action_list_t * waiters);
bool shouldSleep(const ModelAction *sleep);
bool shouldWake(const ModelAction *sleep);
virtual bool shouldWait(const ModelAction *wait);
*/
_Val remove(_Key key) {
struct hashlistnode<_Key, _Val> *search;
+ struct hashlistnode<_Key, _Val> *replace;
/* HashTable cannot handle 0 as a key */
if (!key) {
if (!search->key) {
if (!search->val)
break;
- } else
- if (equals(search->key, key)) {
- _Val v=search->val;
- //empty out this bin
- search->val=(_Val) 1;
- search->key=0;
- size--;
- return v;
+ } else {
+ // The case where an item is found
+ if (equals(search->key, key)) {
+ unsigned int j = index;
+ _Val v = search->val;
+ size--;
+
+ // Idea: keep bins contiguous
+ while (true) {
+ search->val = 0;
+ search->key = 0;
+
+ while (true) {
+ j = (j + 1) & capacitymask;
+ replace = &table[j];
+
+ if (!replace->key && !replace->val) {
+ return v;
+ }
+
+ unsigned int hash = hash_function(replace->key) & capacitymask;
+ if (index <= j && index < hash && hash <= j)
+ continue;
+ else if (index > j && (index < hash || hash <= j) )
+ continue;
+ else
+ break;
+ }
+
+ table[index] = table[j];
+ index = j;
+ search = &table[index];
+ }
+ }
}
index++;
} while (true);
/** @brief Constructor */
ModelHistory::ModelHistory() :
func_counter(1), /* function id starts with 1 */
+ last_seq_number(INIT_SEQ_NUMBER),
func_map(),
func_map_rev(),
func_nodes()
loc_rd_func_nodes_map = new HashTable<void *, SnapVector<FuncNode *> *, uintptr_t, 0>();
loc_wr_func_nodes_map = new HashTable<void *, SnapVector<FuncNode *> *, uintptr_t, 0>();
loc_waiting_writes_map = new HashTable<void *, SnapVector<ConcretePredicate *> *, uintptr_t, 0>();
- thrd_func_act_lists = new SnapVector< SnapList<action_list_t *> *>();
thrd_func_list = new SnapVector<func_id_list_t>();
thrd_last_entered_func = new SnapVector<uint32_t>();
thrd_waiting_write = new SnapVector<ConcretePredicate *>();
thrd_wait_obj = new SnapVector<WaitObj *>();
- func_inst_act_maps = new HashTable<uint32_t, SnapVector<inst_act_map_t *> *, int, 0>(128);
}
ModelHistory::~ModelHistory()
if ( thrd_func_list->size() <= id ) {
uint oldsize = thrd_func_list->size();
thrd_func_list->resize( id + 1 );
- thrd_func_act_lists->resize( id + 1 );
for (uint i = oldsize;i < id + 1;i++) {
// push 0 as a dummy function id to a void seg fault
new (&(*thrd_func_list)[i]) func_id_list_t();
(*thrd_func_list)[i].push_back(0);
-
- (*thrd_func_act_lists)[i] = new SnapList<action_list_t *>();
thrd_last_entered_func->push_back(0);
}
}
- SnapList<action_list_t *> * func_act_lists = (*thrd_func_act_lists)[id];
- func_act_lists->push_back( new action_list_t() );
-
uint32_t last_entered_func_id = (*thrd_last_entered_func)[id];
(*thrd_last_entered_func)[id] = func_id;
(*thrd_func_list)[id].push_back(func_id);
resize_func_nodes( func_id + 1 );
FuncNode * func_node = func_nodes[func_id];
- func_node->init_predicate_tree_position(tid);
- func_node->init_inst_act_map(tid);
+ func_node->function_entry_handler(tid);
/* Add edges between FuncNodes */
if (last_entered_func_id != 0) {
}
/* Monitor the statuses of threads waiting for tid */
- monitor_waiting_thread(func_id, tid);
+ // monitor_waiting_thread(func_id, tid);
}
/* @param func_id a non-zero value */
void ModelHistory::exit_function(const uint32_t func_id, thread_id_t tid)
{
uint32_t id = id_to_int(tid);
-
- SnapList<action_list_t *> * func_act_lists = (*thrd_func_act_lists)[id];
uint32_t last_func_id = (*thrd_func_list)[id].back();
if (last_func_id == func_id) {
FuncNode * func_node = func_nodes[func_id];
- func_node->set_predicate_tree_position(tid, NULL);
- func_node->reset_inst_act_map(tid);
-
- action_list_t * curr_act_list = func_act_lists->back();
-
- /* defer the processing of curr_act_list until the function has exits a few times
- * (currently twice) so that more information can be gathered to infer nullity predicates.
- */
- func_node->incr_exit_count();
- if (func_node->get_exit_count() >= 2) {
- SnapList<action_list_t *> * action_list_buffer = func_node->get_action_list_buffer();
- while (action_list_buffer->size() > 0) {
- action_list_t * act_list = action_list_buffer->back();
- action_list_buffer->pop_back();
- func_node->update_tree(act_list);
- }
-
- func_node->update_tree(curr_act_list);
- } else
- func_node->get_action_list_buffer()->push_front(curr_act_list);
+ func_node->function_exit_handler(tid);
(*thrd_func_list)[id].pop_back();
- func_act_lists->pop_back();
} else {
- model_print("trying to exit with a wrong function id\n");
- model_print("--- last_func: %d, func_id: %d\n", last_func_id, func_id);
+ ASSERT(false);
}
//model_print("thread %d exiting func %d\n", tid, func_id);
}
return;
/* Monitor the statuses of threads waiting for tid */
- monitor_waiting_thread_counter(tid);
+ // monitor_waiting_thread_counter(tid);
/* Every write action should be processed, including
* nonatomic writes (which have no position) */
if (func_id == 0 || act->get_position() == NULL)
return;
- SnapList<action_list_t *> * func_act_lists = (*thrd_func_act_lists)[thread_id];
-
- /* The list of actions that thread tid has taken in its current function */
- action_list_t * curr_act_list = func_act_lists->back();
-
- if (skip_action(act, curr_act_list))
+ if (skip_action(act))
return;
- /* Add to curr_inst_list */
- curr_act_list->push_back(act);
-
FuncNode * func_node = func_nodes[func_id];
func_node->add_inst(act);
- if (act->is_read()) {
- func_node->update_inst_act_map(tid, act);
-
- Fuzzer * fuzzer = model->get_execution()->getFuzzer();
- Predicate * selected_branch = ((NewFuzzer *)fuzzer)->get_selected_child_branch(tid);
- func_node->set_predicate_tree_position(tid, selected_branch);
- }
-
- if (act->is_write()) {
- Predicate * curr_pred = func_node->get_predicate_tree_position(tid);
- FuncInst * curr_inst = func_node->get_inst(act);
-
- if (curr_pred) {
- // Follow child
- curr_pred = curr_pred->follow_write_child(curr_inst);
- }
- func_node->set_predicate_tree_position(tid, curr_pred);
- }
+ func_node->update_tree(act);
+ last_seq_number = act->get_seq_number();
}
/* Return the FuncNode given its func_id */
}
}
-SnapVector<inst_act_map_t *> * ModelHistory::getThrdInstActMap(uint32_t func_id)
-{
- ASSERT(func_id != 0);
-
- SnapVector<inst_act_map_t *> * maps = func_inst_act_maps->get(func_id);
- if (maps == NULL) {
- maps = new SnapVector<inst_act_map_t *>();
- func_inst_act_maps->put(func_id, maps);
- }
-
- return maps;
-}
-
-bool ModelHistory::skip_action(ModelAction * act, SnapList<ModelAction *> * curr_act_list)
+bool ModelHistory::skip_action(ModelAction * act)
{
- ASSERT(curr_act_list != NULL);
-
bool second_part_of_rmw = act->is_rmwc() || act->is_rmw();
modelclock_t curr_seq_number = act->get_seq_number();
return true;
/* Skip actions with the same sequence number */
- if (curr_act_list->size() != 0) {
- ModelAction * last_act = curr_act_list->back();
- if (last_act->get_seq_number() == curr_seq_number)
- return true;
- }
+ if (last_seq_number != INIT_SEQ_NUMBER && last_seq_number == curr_seq_number)
+ return true;
/* Skip actions that are paused by fuzzer (sequence number is 0) */
if (curr_seq_number == 0)
func_node->print_predicate_tree();
/*
- func_inst_list_mt * entry_insts = func_node->get_entry_insts();
- model_print("function %s has entry actions\n", func_node->get_func_name());
-
- mllnode<FuncInst*>* it;
- for (it = entry_insts->begin();it != NULL;it=it->getNext()) {
- FuncInst *inst = it->getVal();
- model_print("type: %d, at: %s\n", inst->get_type(), inst->get_position());
- }
- */
+ func_inst_list_mt * entry_insts = func_node->get_entry_insts();
+ model_print("function %s has entry actions\n", func_node->get_func_name());
+
+ mllnode<FuncInst*>* it;
+ for (it = entry_insts->begin();it != NULL;it=it->getNext()) {
+ FuncInst *inst = it->getVal();
+ model_print("type: %d, at: %s\n", inst->get_type(), inst->get_position());
+ }
+*/
}
}
#include "hashtable.h"
#include "threads-model.h"
+#define INIT_SEQ_NUMBER 0xffffffff
+
class ModelHistory {
public:
ModelHistory();
void remove_waiting_thread(thread_id_t tid);
void stop_waiting_for_node(thread_id_t self_id, thread_id_t waiting_for_id, FuncNode * target_node);
- SnapVector<inst_act_map_t *> * getThrdInstActMap(uint32_t func_id);
-
void set_new_exec_flag();
void dump_func_node_graph();
void print_func_node();
MEMALLOC
private:
uint32_t func_counter;
+ modelclock_t last_seq_number;
/* Map function names to integer ids */
HashTable<const char *, uint32_t, uintptr_t, 4, model_malloc, model_calloc, model_free> func_map;
HashTable<void *, SnapVector<ConcretePredicate *> *, uintptr_t, 0> * loc_waiting_writes_map;
- /* Keeps track of atomic actions that thread i has performed in some
- * function. Index of SnapVector is thread id. SnapList simulates
- * the call stack.
- */
- SnapVector< SnapList<action_list_t *> *> * thrd_func_act_lists;
-
/* thrd_func_list stores a list of function ids for each thread.
* Each element in thrd_func_list stores the functions that
* thread i has entered and yet to exit from
SnapVector<ConcretePredicate *> * thrd_waiting_write;
SnapVector<WaitObj *> * thrd_wait_obj;
- /* A run-time map from FuncInst to ModelAction per thread, per FuncNode.
- * Manipulated by FuncNode, and needed by NewFuzzer */
- HashTable<uint32_t, SnapVector<inst_act_map_t *> *, int, 0> * func_inst_act_maps;
-
- bool skip_action(ModelAction * act, SnapList<ModelAction *> * curr_act_list);
+ bool skip_action(ModelAction * act);
void monitor_waiting_thread(uint32_t func_id, thread_id_t tid);
void monitor_waiting_thread_counter(thread_id_t tid);
#include "modeltypes.h"
#include "mymemory.h"
+#include "mypthread.h"
namespace cdsc {
struct mutex_state {
void *locked; /* Thread holding the lock */
thread_id_t alloc_tid;
modelclock_t alloc_clock;
- int init; // WL
+ int type;
+ int lock_count;
};
class mutex {
public:
- mutex();
+ mutex(int type = PTHREAD_MUTEX_DEFAULT);
~mutex() {}
void lock();
bool try_lock();
class snapmutex : public mutex {
public:
- snapmutex() : mutex()
+ snapmutex(int type = 0) : mutex(type)
{ }
SNAPSHOTALLOC
};
#include <sched.h>
#include <pthread.h>
+/* pthread mutex types
+enum
+{
+ PTHREAD_MUTEX_NORMAL
+ PTHREAD_MUTEX_RECURSIVE
+ PTHREAD_MUTEX_ERRORCHECK
+ PTHREAD_MUTEX_DEFAULT
+};*/
+
typedef void *(*pthread_start_t)(void *);
struct pthread_params {
void *arg;
};
+struct pthread_attr
+{
+ /* Scheduler parameters and priority. */
+ struct sched_param schedparam;
+ int schedpolicy;
+ /* Various flags like detachstate, scope, etc. */
+ int flags;
+ /* Size of guard area. */
+ size_t guardsize;
+ /* Stack handling. */
+ void *stackaddr;
+ size_t stacksize;
+ /* Affinity map. */
+ cpu_set_t *cpuset;
+ size_t cpusetsize;
+};
+
extern "C" {
int user_main(int, char**);
}
if (!model)
return;
thread_id_t tid = thread_current()->get_id();
- raceCheckWrite(tid, addr);
+ raceCheckWrite8(tid, addr);
}
void cds_store16(void *addr)
if (!model)
return;
thread_id_t tid = thread_current()->get_id();
- raceCheckWrite(tid, addr);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 1));
+ raceCheckWrite16(tid, addr);
}
void cds_store32(void *addr)
if (!model)
return;
thread_id_t tid = thread_current()->get_id();
- raceCheckWrite(tid, addr);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 1));
- raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 2));
- raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 3));
+ raceCheckWrite32(tid, addr);
}
void cds_store64(void *addr)
if (!model)
return;
thread_id_t tid = thread_current()->get_id();
- raceCheckWrite(tid, addr);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 1));
- raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 2));
- raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 3));
- raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 4));
- raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 5));
- raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 6));
- raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 7));
+ raceCheckWrite64(tid, addr);
}
void cds_load8(const void *addr) {
if (!model)
return;
thread_id_t tid = thread_current()->get_id();
- raceCheckRead(tid, addr);
+ raceCheckRead8(tid, addr);
}
void cds_load16(const void *addr) {
if (!model)
return;
thread_id_t tid = thread_current()->get_id();
- raceCheckRead(tid, addr);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 1));
+ raceCheckRead16(tid, addr);
}
void cds_load32(const void *addr) {
if (!model)
return;
thread_id_t tid = thread_current()->get_id();
- raceCheckRead(tid, addr);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 1));
- raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 2));
- raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 3));
+ raceCheckRead32(tid, addr);
}
void cds_load64(const void *addr) {
if (!model)
return;
thread_id_t tid = thread_current()->get_id();
- raceCheckRead(tid, addr);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 1));
- raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 2));
- raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 3));
- raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 4));
- raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 5));
- raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 6));
- raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 7));
+ raceCheckRead64(tid, addr);
}
ASSERT(0);
}
+#include <signal.h>
+
+#define SIGSTACKSIZE 65536
+static void mprot_handle_pf(int sig, siginfo_t *si, void *unused)
+{
+ model_print("Segmentation fault at %p\n", si->si_addr);
+ model_print("For debugging, place breakpoint at: %s:%d\n",
+ __FILE__, __LINE__);
+ print_trace(); // Trace printing may cause dynamic memory allocation
+ while(1)
+ ;
+}
+
+void install_handler() {
+ stack_t ss;
+ ss.ss_sp = model_malloc(SIGSTACKSIZE);
+ ss.ss_size = SIGSTACKSIZE;
+ ss.ss_flags = 0;
+ sigaltstack(&ss, NULL);
+ struct sigaction sa;
+ sa.sa_flags = SA_SIGINFO | SA_NODEFER | SA_RESTART | SA_ONSTACK;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_sigaction = mprot_handle_pf;
+
+ if (sigaction(SIGSEGV, &sa, NULL) == -1) {
+ perror("sigaction(SIGSEGV)");
+ exit(EXIT_FAILURE);
+ }
+
+}
+
/** @brief Constructor */
ModelChecker::ModelChecker() :
/* Initialize default scheduler */
parse_options(¶ms);
initRaceDetector();
/* Configure output redirection for the model-checker */
- redirect_output();
- install_trace_analyses(get_execution());
+ install_handler();
}
/** @brief Destructor */
execution_number ++;
if (more_executions)
reset_to_initial_state();
+
history->set_new_exec_flag();
}
void ModelChecker::startChecker() {
startExecution(get_system_context(), runChecker);
snapshot = take_snapshot();
+
+ install_trace_analyses(get_execution());
+ redirect_output();
initMainThread();
}
#include "classlist.h"
#include "snapshot-interface.h"
-typedef SnapList<ModelAction *> action_list_t;
-
/** @brief Model checker execution stats */
struct execution_stats {
int num_total; /**< @brief Total number of executions */
namespace cdsc {
-mutex::mutex()
+mutex::mutex(int type)
{
state.locked = NULL;
thread_id_t tid = thread_current()->get_id();
state.alloc_tid = tid;
ClockVector *cv = model->get_execution()->get_cv(tid);
state.alloc_clock = cv == NULL ? 0 : cv->getClock(tid);
+
+ // For recursive mutex
+ state.type = type;
+ state.lock_count = 0;
}
void mutex::lock()
thrd_pruned_writes(),
paused_thread_list(),
paused_thread_table(128),
- failed_predicates(32),
dist_info_vec()
{}
int NewFuzzer::selectWrite(ModelAction *read, SnapVector<ModelAction *> * rf_set)
{
-// return random() % rf_set->size();
+ return random() % rf_set->size();
thread_id_t tid = read->get_tid();
int thread_id = id_to_int(tid);
FuncNode * func_node = history->get_curr_func_node(tid);
Predicate * curr_pred = func_node->get_predicate_tree_position(tid);
FuncInst * read_inst = func_node->get_inst(read);
- inst_act_map_t * inst_act_map = func_node->get_inst_act_map(tid);
if (curr_pred != NULL) {
Predicate * selected_branch = NULL;
- if (check_branch_inst(curr_pred, read_inst, inst_act_map, rf_set))
+ if (check_branch_inst(curr_pred, read_inst, rf_set))
selected_branch = selectBranch(tid, curr_pred, read_inst);
else {
// no child of curr_pred matches read_inst, check back edges
while (it->hasNext()) {
curr_pred = it->next();
- if (check_branch_inst(curr_pred, read_inst, inst_act_map, rf_set)) {
+ if (check_branch_inst(curr_pred, read_inst, rf_set)) {
selected_branch = selectBranch(tid, curr_pred, read_inst);
break;
}
delete it;
}
- prune_writes(tid, selected_branch, rf_set, inst_act_map);
+ thrd_selected_child_branch[thread_id] = selected_branch;
+ prune_writes(tid, selected_branch, rf_set);
}
- if (!failed_predicates.isEmpty())
- failed_predicates.reset();
-
thrd_last_read_act[thread_id] = read;
thrd_last_func_inst[thread_id] = read_inst;
}
Predicate * selected_branch = get_selected_child_branch(tid);
FuncNode * func_node = history->get_curr_func_node(tid);
- // Add failed predicate to NewFuzzer and FuncNode
- failed_predicates.put(selected_branch, true);
- func_node->add_failed_predicate(selected_branch);
+ // Increment failure count
selected_branch->incr_fail_count();
+ func_node->add_predicate_to_trace(tid, selected_branch); // For updating predicate weight
//model_print("the %d read action of thread %d at %p is unsuccessful\n", read->get_seq_number(), read_thread->get_id(), read->get_location());
Predicate * curr_pred = selected_branch->get_parent();
FuncInst * read_inst = thrd_last_func_inst[thread_id];
selected_branch = selectBranch(tid, curr_pred, read_inst);
+ thrd_selected_child_branch[thread_id] = selected_branch;
- inst_act_map_t * inst_act_map = func_node->get_inst_act_map(tid);
- prune_writes(tid, selected_branch, rf_set, inst_act_map);
+ prune_writes(tid, selected_branch, rf_set);
ASSERT(selected_branch);
}
* @return False if no child matches read_inst
*/
bool NewFuzzer::check_branch_inst(Predicate * curr_pred, FuncInst * read_inst,
- inst_act_map_t * inst_act_map, SnapVector<ModelAction *> * rf_set)
+ SnapVector<ModelAction *> * rf_set)
{
available_branches_tmp_storage.clear();
}
int index = choose_branch_index(&available_branches_tmp_storage);
- Predicate * random_branch = available_branches_tmp_storage[ index ];
- thrd_selected_child_branch[thread_id] = random_branch;
+ Predicate * selected_branch = available_branches_tmp_storage[ index ];
/* Remove the chosen branch from vec in case that this
* branch fails and need to choose another one */
available_branches_tmp_storage[index] = available_branches_tmp_storage.back();
available_branches_tmp_storage.pop_back();
- return random_branch;
+ return selected_branch;
}
/**
*
* @return true if rf_set is pruned
*/
-bool NewFuzzer::prune_writes(thread_id_t tid, Predicate * pred,
- SnapVector<ModelAction *> * rf_set, inst_act_map_t * inst_act_map)
+bool NewFuzzer::prune_writes(thread_id_t tid, Predicate * pred, SnapVector<ModelAction *> * rf_set)
{
if (pred == NULL)
return false;
pruned_writes->clear(); // clear the old pruned_writes set
bool pruned = false;
- uint index = 0;
+ uint rf_index = 0;
- while ( index < rf_set->size() ) {
- ModelAction * write_act = (*rf_set)[index];
+ while ( rf_index < rf_set->size() ) {
+ ModelAction * write_act = (*rf_set)[rf_index];
uint64_t write_val = write_act->get_write_value();
bool no_predicate = false;
- bool satisfy_predicate = check_predicate_expressions(pred_expressions, inst_act_map, write_val, &no_predicate);
+ bool satisfy_predicate = true;
+
+ // Check if the write value satisfies the predicates
+ PredExprSetIter * pred_expr_it = pred_expressions->iterator();
+ while (pred_expr_it->hasNext()) {
+ struct pred_expr * expression = pred_expr_it->next();
+ bool equality;
+
+ switch (expression->token) {
+ case NOPREDICATE:
+ no_predicate = true;
+ break;
+ case EQUALITY:
+ FuncInst * to_be_compared;
+ FuncNode * func_node;
+ uint64_t last_read;
+
+ to_be_compared = expression->func_inst;
+ func_node = history->get_curr_func_node(tid);
+ last_read = func_node->get_associated_read(tid, to_be_compared);
+ ASSERT(last_read != VALUE_NONE);
+
+ equality = (write_val == last_read);
+ if (equality != expression->value)
+ satisfy_predicate = false;
+ break;
+ case NULLITY:
+ // TODO: implement likely to be null
+ equality = ((void*) (write_val & 0xffffffff) == NULL);
+ if (equality != expression->value)
+ satisfy_predicate = false;
+ break;
+ default:
+ model_print("unknown predicate token\n");
+ break;
+ }
+
+ if (!satisfy_predicate)
+ break;
+ }
+ delete pred_expr_it;
if (no_predicate)
return false;
if (!satisfy_predicate) {
ASSERT(rf_set != NULL);
- (*rf_set)[index] = rf_set->back();
+ (*rf_set)[rf_index] = rf_set->back();
rf_set->pop_back();
pruned_writes->push_back(write_act);
pruned = true;
} else
- index++;
+ rf_index++;
}
return pruned;
*/
void NewFuzzer::conditional_sleep(Thread * thread)
{
+/*
int index = paused_thread_list.size();
model->getScheduler()->add_sleep(thread);
paused_thread_list.push_back(thread);
paused_thread_table.put(thread, index); // Update table
- /* Add the waiting condition to ModelHistory */
+ // Add the waiting condition to ModelHistory
ModelAction * read = thread->get_pending();
thread_id_t tid = thread->get_id();
FuncNode * func_node = history->get_curr_func_node(tid);
- inst_act_map_t * inst_act_map = func_node->get_inst_act_map(tid);
+// inst_act_map_t * inst_act_map = func_node->get_inst_act_map(tid);
Predicate * selected_branch = get_selected_child_branch(tid);
- ConcretePredicate * concrete = selected_branch->evaluate(inst_act_map, tid);
+// ConcretePredicate * concrete = selected_branch->evaluate(inst_act_map, tid);
concrete->set_location(read->get_location());
- history->add_waiting_write(concrete);
- /* history->add_waiting_thread is already called in find_threads */
+ ASSERT(false);
+
+// history->add_waiting_write(concrete);
+ // history->add_waiting_thread is already called in find_threads
+*/
}
bool NewFuzzer::has_paused_threads()
wake_up_paused_threads(threadlist, &numthreads);
//model_print("list size: %d, active t id: %d\n", numthreads, threadlist[0]);
}
-
int random_index = random() % numthreads;
int thread = threadlist[random_index];
thread_id_t curr_tid = int_to_id(thread);
return finds_waiting_for;
}
-bool NewFuzzer::check_predicate_expressions(PredExprSet * pred_expressions,
- inst_act_map_t * inst_act_map, uint64_t write_val, bool * no_predicate)
-{
- bool satisfy_predicate = true;
-
- PredExprSetIter * pred_expr_it = pred_expressions->iterator();
- while (pred_expr_it->hasNext()) {
- struct pred_expr * expression = pred_expr_it->next();
- bool equality;
-
- switch (expression->token) {
- case NOPREDICATE:
- *no_predicate = true;
- break;
- case EQUALITY:
- FuncInst * to_be_compared;
- ModelAction * last_act;
- uint64_t last_read;
-
- to_be_compared = expression->func_inst;
- last_act = inst_act_map->get(to_be_compared);
- last_read = last_act->get_reads_from_value();
-
- equality = (write_val == last_read);
- if (equality != expression->value)
- satisfy_predicate = false;
- break;
- case NULLITY:
- // TODO: implement likely to be null
- equality = ((void*) (write_val & 0xffffffff) == NULL);
- if (equality != expression->value)
- satisfy_predicate = false;
- break;
- default:
- model_print("unknown predicate token\n");
- break;
- }
-
- if (!satisfy_predicate)
- break;
- }
-
- delete pred_expr_it;
- return satisfy_predicate;
-}
-
bool NewFuzzer::shouldWait(const ModelAction * act)
{
- return random() & 1;
+ return true;
}
void notify_paused_thread(Thread * thread);
Thread * selectThread(int * threadlist, int numthreads);
- Thread * selectNotify(action_list_t * waiters);
+ Thread * selectNotify(simple_action_list_t * waiters);
bool shouldSleep(const ModelAction * sleep);
bool shouldWake(const ModelAction * sleep);
bool shouldWait(const ModelAction * wait);
SnapVector<Predicate *> thrd_selected_child_branch;
SnapVector< SnapVector<ModelAction *> *> thrd_pruned_writes;
- bool check_branch_inst(Predicate * curr_pred, FuncInst * read_inst, inst_act_map_t * inst_act_map, SnapVector<ModelAction *> * rf_set);
+ bool check_branch_inst(Predicate * curr_pred, FuncInst * read_inst, SnapVector<ModelAction *> * rf_set);
Predicate * selectBranch(thread_id_t tid, Predicate * curr_pred, FuncInst * read_inst);
- bool prune_writes(thread_id_t tid, Predicate * pred, SnapVector<ModelAction *> * rf_set, inst_act_map_t * inst_act_map);
+ bool prune_writes(thread_id_t tid, Predicate * pred, SnapVector<ModelAction *> * rf_set);
int choose_branch_index(SnapVector<Predicate *> * branches);
/* The set of Threads put to sleep by NewFuzzer because no writes in rf_set satisfies the selected predicate. Only used by selectWrite.
*/
SnapVector<Thread *> paused_thread_list; //-- (not in use)
HashTable<Thread *, int, uintptr_t, 0> paused_thread_table; //--
- HashTable<Predicate *, bool, uintptr_t, 0> failed_predicates;
SnapVector<struct node_dist_info> dist_info_vec; //--
void wake_up_paused_threads(int * threadlist, int * numthreads); //--
bool find_threads(ModelAction * pending_read); //--
-
- bool check_predicate_expressions(PredExprSet * pred_expressions, inst_act_map_t * inst_act_map, uint64_t write_val, bool * no_predicate);
};
#endif /* end of __NEWFUZZER_H__ */
exit_predicate(is_exit),
does_write(false),
depth(0),
- weight(0),
+ weight(100),
exploration_count(0),
store_visible_count(0),
total_checking_count(0),
delete it;
}
-/* Follow the child if any child whose FuncInst matches with inst
- *
- * @param inst must be an ATOMIC_WRITE FuncInst
- * @return NULL if no such child is found.
- */
-Predicate * Predicate::follow_write_child(FuncInst * inst)
-{
- ASSERT(inst->get_type() == ATOMIC_WRITE);
-
- for (uint i = 0;i < children.size();i++) {
- Predicate * child = children[i];
- if (child->get_func_inst() == inst)
- return child;
- }
-
- return NULL;
-}
-
/* Evaluate predicate expressions against the given inst_act_map */
-ConcretePredicate * Predicate::evaluate(inst_act_map_t * inst_act_map, thread_id_t tid)
+ConcretePredicate * Predicate::evaluate(thread_id_t tid)
{
+ /*
ConcretePredicate * concrete = new ConcretePredicate(tid);
PredExprSetIter * it = pred_expressions.iterator();
delete it;
return concrete;
+ */
+
+ return NULL;
}
void Predicate::print_predicate()
bool is_write() { return does_write; }
void set_write(bool is_write) { does_write = is_write; }
- ConcretePredicate * evaluate(inst_act_map_t * inst_act_map, thread_id_t tid);
+ ConcretePredicate * evaluate(thread_id_t tid);
uint32_t get_expl_count() { return exploration_count; }
uint32_t get_fail_count() { return failure_count; }
real_pthread_exit(NULL);
}
-int pthread_mutex_init(pthread_mutex_t *p_mutex, const pthread_mutexattr_t *) {
+int pthread_mutex_init(pthread_mutex_t *p_mutex, const pthread_mutexattr_t * attr) {
if (!model) {
snapshot_system_init(10000, 1024, 1024, 40000);
model = new ModelChecker();
model->startChecker();
}
- cdsc::snapmutex *m = new cdsc::snapmutex();
+
+ int mutex_type = PTHREAD_MUTEX_DEFAULT;
+ if (attr != NULL)
+ pthread_mutexattr_gettype(attr, &mutex_type);
+
+ cdsc::snapmutex *m = new cdsc::snapmutex(mutex_type);
ModelExecution *execution = model->get_execution();
execution->getMutexMap()->put(p_mutex, m);
}
pthread_t pthread_self() {
+ if (!model) {
+ snapshot_system_init(10000, 1024, 1024, 40000);
+ model = new ModelChecker();
+ model->startChecker();
+ }
+
Thread* th = model->get_current_thread();
return (pthread_t)th->get_id();
}
}
return 0;
}
+
+/* https://github.com/lattera/glibc/blob/master/nptl/pthread_getattr_np.c */
+int pthread_getattr_np(pthread_t t, pthread_attr_t *attr)
+{
+ ModelExecution *execution = model->get_execution();
+ Thread *th = execution->get_pthread(t);
+
+ struct pthread_attr *iattr = (struct pthread_attr *) attr;
+
+ /* The sizes are subject to alignment. */
+ if (th != NULL) {
+#if _STACK_GROWS_DOWN
+ ASSERT(false);
+#else
+ iattr->stackaddr = (char *) th->get_stack_addr();
+#endif
+
+ } else {
+ ASSERT(false);
+ }
+
+ return 0;
+}
+
+int pthread_setname_np(pthread_t t, const char *name)
+{
+ ModelExecution *execution = model->get_execution();
+ Thread *th = execution->get_pthread(t);
+
+ if (th != NULL)
+ return 0;
+
+ return 1;
+}
+++ /dev/null
-#!/bin/sh
-
-# C test cases
-# clang -Xclang -load -Xclang /scratch/llvm/build/lib/libCDSPass.so -c -I/scratch/random-fuzzer/include/ /scratch/random-fuzzer/test/userprog.c
-
-# gcc -o userprog userprog.o -L/scratch/random-fuzzer -lmodel
-
-
-DIR=/scratch/random-fuzzer/pthread_test/CDSPass/dummy
-
-export LD_LIBRARY_PATH=/scratch/random-fuzzer
-
-# compile with CDSPass
-if [ "$2" != "" ]; then # C++
- clang++ -Xclang -load -Xclang /scratch/llvm/build/lib/libCDSPass.so -g -o $DIR/$1.o -I /scratch/random-fuzzer/include -L/scratch/random-fuzzer -lmodel $1
-else # C
- clang -Xclang -load -Xclang /scratch/llvm/build/lib/libCDSPass.so -o $DIR/$1.o -I/scratch/random-fuzzer/include/ -L/scratch/random-fuzzer -lmodel $1
-fi
+++ /dev/null
-/**
- * @file addr-satcycle.cc
- * @brief Address-based satisfaction cycle test
- *
- * This program has a peculiar behavior which is technically legal under the
- * current C++ memory model but which is a result of a type of satisfaction
- * cycle. We use this as justification for part of our modifications to the
- * memory model when proving our model-checker's correctness.
- */
-
-#include <atomic>
-#include <pthread.h>
-#include <stdio.h>
-
-#include "model-assert.h"
-
-using namespace std;
-
-atomic_int x[2], idx, y;
-
-int r1, r2, r3; /* "local" variables */
-
-static void *a(void *obj)
-{
- r1 = idx.load(memory_order_relaxed);
- x[r1].store(0, memory_order_relaxed);
-
- /* Key point: can we guarantee that &x[0] == &x[r1]? */
- r2 = x[0].load(memory_order_relaxed);
- y.store(r2);
- return NULL;
-}
-
-static void *b(void *obj)
-{
- r3 = y.load(memory_order_relaxed);
- idx.store(r3, memory_order_relaxed);
- return NULL;
-}
-
-int user_main(int argc, char **argv)
-{
- pthread_t t1, t2;
-
- atomic_init(&x[0], 1);
- atomic_init(&idx, 0);
- atomic_init(&y, 0);
-
- printf("Main thread: creating 2 threads\n");
- pthread_create(&t1,NULL, &a, NULL);
- pthread_create(&t2,NULL, &b, NULL);
-
- pthread_join(t1,NULL);
- pthread_join(t2,NULL);
- printf("Main thread is finished\n");
-
- printf("r1 = %d\n", r1);
- printf("r2 = %d\n", r2);
- printf("r3 = %d\n", r3);
-
- /*
- * This condition should not be hit because it only occurs under a
- * satisfaction cycle
- */
- bool cycle = (r1 == 1 && r2 == 1 && r3 == 1);
- MODEL_ASSERT(!cycle);
-
- return 0;
-}
+++ /dev/null
-/**
- * @file iriw.cc
- * @brief Independent read and independent write test
- */
-
-#include <atomic>
-#include <pthread.h>
-#include <stdio.h>
-
-#define N 14
-//#include "wildcard.h"
-//#include "model-assert.h"
-
-using namespace std;
-
-std::atomic_int x, y;
-int r1, r2, r3, r4; /* "local" variables */
-
-static void *a(void *obj)
-{
-// x.store(1, memory_order_seq_cst);
- return NULL;
-}
-
-
-static void *b(void *obj)
-{
- y.store(1, memory_order_seq_cst);
- return NULL;
-}
-
-static void *c(void *obj)
-{
- r1 = x.load(memory_order_acquire);
- r2 = y.load(memory_order_seq_cst);
- return NULL;
-}
-
-static void *d(void *obj)
-{
- r3 = y.load(memory_order_acquire);
- r4 = x.load(memory_order_seq_cst);
- return NULL;
-}
-
-
-int user_main(int argc, char **argv)
-{
- pthread_t threads[20];
-
- atomic_init(&x, 0);
- atomic_init(&y, 0);
-
- printf("Main thread: creating %d threads\n", N);
-
- for (int i = 0; i< N; i++)
- pthread_create(&threads[i],NULL, &a, NULL);
-
- for (int i=0; i<N; i++)
- printf("thread id: %ld\n", threads[i]);
-
- for (int i = 0; i< N; i++)
- pthread_join( threads[i],NULL);
-
- printf("Main thread is finished\n");
-
- /*
- * This condition should not be hit if the execution is SC */
-// bool sc = (r1 == 1 && r2 == 0 && r3 == 1 && r4 == 0);
-// printf("r1 = %d, r2 = %d, r3 = %d and r4 = %d\n", r1, r2, r3, r4);
-// MODEL_ASSERT(!sc);
-
- return 0;
-}
+++ /dev/null
-#include <stdio.h>
-
-#include "pthread.h"
-#include "librace.h"
-#include "stdatomic.h"
-#include "mutex.h"
-#include <condition_variable>
-
-cdsc::mutex * m;
-cdsc::condition_variable *v;
-int shareddata;
-
-static void *a(void *obj)
-{
- m->lock();
- while(load_32(&shareddata)==0)
- v->wait(*m);
- m->unlock();
- return NULL;
-}
-
-static void *b(void *obj)
-{
- m->lock();
- store_32(&shareddata, (unsigned int) 1);
- v->notify_all();
- m->unlock();
- return NULL;
-}
-
-int user_main(int argc, char **argv)
-{
- pthread_t t1, t2;
- store_32(&shareddata, (unsigned int) 0);
- m=new cdsc::mutex();
- v=new cdsc::condition_variable();
-
- pthread_create(&t1,NULL, &a, NULL);
- pthread_create(&t2,NULL, &b, NULL);
-
- pthread_join(t1,NULL);
- pthread_join(t2,NULL);
- return 0;
-}
+++ /dev/null
-#include <stdio.h>
-#include <pthread.h>
-
-#include "librace.h"
-
-pthread_mutex_t x;
-pthread_mutex_t y;
-uint32_t shared = 0;
-
-static void *a(void *obj)
-{
- pthread_mutex_lock(&x);
- pthread_mutex_lock(&y);
- printf("shared = %u\n", load_32(&shared));
- pthread_mutex_unlock(&y);
- pthread_mutex_unlock(&x);
- return NULL;
-}
-
-static void *b(void *obj)
-{
- pthread_mutex_lock(&y);
- pthread_mutex_lock(&x);
- store_32(&shared, 16);
- printf("write shared = 16\n");
- pthread_mutex_unlock(&x);
- pthread_mutex_unlock(&y);
- return NULL;
-}
-
-int user_main(int argc, char **argv)
-{
- pthread_t t1, t2;
-
- pthread_mutex_init(&x, NULL);
- pthread_mutex_init(&y, NULL);
-
- printf("Main thread: creating 2 threads\n");
- pthread_create(&t1,NULL, &a, NULL);
- pthread_create(&t2,NULL, &b, NULL);
-
- pthread_join(t1,NULL);
- pthread_join(t2,NULL);
- printf("Main thread is finished\n");
-
- return 0;
-}
+++ /dev/null
-#include <stdlib.h>
-#include <stdio.h>
-#include <pthread.h>
-#include <atomic>
-
-#include "librace.h"
-#include "model-assert.h"
-
-using namespace std;
-
-atomic_int x, y;
-atomic_intptr_t z, z2;
-
-int r1, r2, r3; /* "local" variables */
-
-/**
- This example illustrates a self-satisfying cycle involving
- synchronization. A failed synchronization creates the store that
- causes the synchronization to fail.
-
- The C++11 memory model nominally allows r1=0, r2=1, r3=5.
-
- This example is insane, we don't support that behavior.
-*/
-
-
-static void *a(void *obj)
-{
- z.store((intptr_t)&y, memory_order_relaxed);
- r1 = y.fetch_add(1, memory_order_release);
- z.store((intptr_t)&x, memory_order_relaxed);
- r2 = y.fetch_add(1, memory_order_release);
- return NULL;
-}
-
-
-static void *b(void *obj)
-{
- r3 = y.fetch_add(1, memory_order_acquire);
- intptr_t ptr = z.load(memory_order_relaxed);
- z2.store(ptr, memory_order_relaxed);
- return NULL;
-}
-
-static void *c(void *obj)
-{
- atomic_int *ptr2 = (atomic_int *)z2.load(memory_order_relaxed);
- (*ptr2).store(5, memory_order_relaxed);
- return NULL;
-}
-
-int user_main(int argc, char **argv)
-{
- pthread_t t1, t2, t3;
-
- atomic_init(&x, 0);
- atomic_init(&y, 0);
- atomic_init(&z, (intptr_t) &x);
- atomic_init(&z2, (intptr_t) &x);
-
- pthread_create(&t1,NULL, &a, NULL);
- pthread_create(&t2,NULL, &b, NULL);
- pthread_create(&t3,NULL, &c, NULL);
-
- pthread_join(t1,NULL);
- pthread_join(t2,NULL);
- pthread_join(t3,NULL);
-
- printf("r1=%d, r2=%d, r3=%d\n", r1, r2, r3);
-
- return 0;
-}
+++ /dev/null
-/**
- * @file iriw.cc
- * @brief Independent read and independent write test
- */
-
-#include <atomic>
-#include <pthread.h>
-#include <stdio.h>
-
-#include "wildcard.h"
-#include "model-assert.h"
-
-using namespace std;
-
-atomic_int x, y;
-int r1, r2, r3, r4; /* "local" variables */
-
-static void *a(void *obj)
-{
- x.store(1, memory_order_seq_cst);
- return new int(1);
-}
-
-static void *b(void *obj)
-{
- y.store(1, memory_order_seq_cst);
- return new int(2);
-}
-
-static void *c(void *obj)
-{
- r1 = x.load(memory_order_acquire);
- r2 = y.load(memory_order_seq_cst);
- return new int(3);
-}
-
-static void *d(void *obj)
-{
- r3 = y.load(memory_order_acquire);
- r4 = x.load(memory_order_seq_cst);
- return new int(4);
-}
-
-
-int user_main(int argc, char **argv)
-{
- pthread_t t1, t2, t3, t4;
-
- atomic_init(&x, 0);
- atomic_init(&y, 0);
-
- printf("Main thread: creating 4 threads\n");
- pthread_create(&t1,NULL, &a, NULL);
- pthread_create(&t2,NULL, &b, NULL);
- pthread_create(&t3,NULL, &c, NULL);
- pthread_create(&t4,NULL, &d, NULL);
-
- pthread_join(t1,NULL);
- pthread_join(t2,NULL);
- pthread_join(t3,NULL);
- pthread_join(t4,NULL);
- printf("Main thread is finished\n");
-
- /*
- * This condition should not be hit if the execution is SC */
- bool sc = (r1 == 1 && r2 == 0 && r3 == 1 && r4 == 0);
- printf("r1 = %d, r2 = %d, r3 = %d and r4 = %d\n", r1, r2, r3, r4);
- MODEL_ASSERT(!sc);
-
- return 0;
-}
+++ /dev/null
-/**
- * @file iriw.cc
- * @brief Independent read and independent write test
- */
-
-#include <atomic>
-#include <pthread.h>
-#include <stdio.h>
-
-#include "wildcard.h"
-#include "model-assert.h"
-
-using namespace std;
-
-atomic_int x, y;
-int r1, r2, r3, r4; /* "local" variables */
-
-static void *a(void *obj)
-{
- x.store(1, wildcard(1));
- return NULL;
-}
-
-static void *b(void *obj)
-{
- y.store(1, wildcard(2));
- return NULL;
-}
-
-static void *c(void *obj)
-{
- r1 = x.load(wildcard(3));
- r2 = y.load(wildcard(4));
- return NULL;
-}
-
-static void *d(void *obj)
-{
- r3 = y.load(wildcard(5));
- r4 = x.load(wildcard(6));
- return NULL;
-}
-
-
-int user_main(int argc, char **argv)
-{
- pthread_t t1, t2, t3, t4;
-
- atomic_init(&x, 0);
- atomic_init(&y, 0);
-
- printf("Main thread: creating 4 threads\n");
- pthread_create(&t1,NULL, &a, NULL);
- pthread_create(&t2,NULL, &b, NULL);
- pthread_create(&t3,NULL, &c, NULL);
- pthread_create(&t4,NULL, &d, NULL);
-
- pthread_join(t1,NULL);
- pthread_join(t2,NULL);
- pthread_join(t3,NULL);
- pthread_join(t4,NULL);
- printf("Main thread is finished\n");
-
- /*
- * This condition should not be hit if the execution is SC */
- bool sc = (r1 == 1 && r2 == 0 && r3 == 1 && r4 == 0);
- //MODEL_ASSERT(!sc);
-
- return 0;
-}
+++ /dev/null
-/**
- * @file mo-satcycle.cc
- * @brief MO satisfaction cycle test
- *
- * This program has a peculiar behavior which is technically legal under the
- * current C++ memory model but which is a result of a type of satisfaction
- * cycle. We use this as justification for part of our modifications to the
- * memory model when proving our model-checker's correctness.
- */
-
-#include <atomic>
-#include <pthread.h>
-#include <stdio.h>
-
-#include "model-assert.h"
-
-using namespace std;
-
-atomic_int x, y;
-int r0, r1, r2, r3; /* "local" variables */
-
-static void *a(void *obj)
-{
- y.store(10, memory_order_relaxed);
- x.store(1, memory_order_release);
- return NULL;
-}
-
-static void *b(void *obj)
-{
- r0 = x.load(memory_order_relaxed);
- r1 = x.load(memory_order_acquire);
- y.store(11, memory_order_relaxed);
- return NULL;
-}
-
-static void *c(void *obj)
-{
- r2 = y.load(memory_order_relaxed);
- r3 = y.load(memory_order_relaxed);
- if (r2 == 11 && r3 == 10)
- x.store(0, memory_order_relaxed);
- return NULL;
-}
-
-int user_main(int argc, char **argv)
-{
- pthread_t t1, t2, t3;
-
- atomic_init(&x, 0);
- atomic_init(&y, 0);
-
- printf("Main thread: creating 3 threads\n");
- pthread_create(&t1,NULL, &a, NULL);
- pthread_create(&t2,NULL, &b, NULL);
- pthread_create(&t3,NULL, &c, NULL);
-
- pthread_join(t1,NULL);
- pthread_join(t2,NULL);
- pthread_join(t3,NULL);
- printf("Main thread is finished\n");
-
- /*
- * This condition should not be hit because it only occurs under a
- * satisfaction cycle
- */
- bool cycle = (r0 == 1 && r1 == 0 && r2 == 11 && r3 == 10);
- MODEL_ASSERT(!cycle);
-
- return 0;
-}
+++ /dev/null
-#g++ -o test.o test.cc -Wall -g -O3 -I.. -I../include -L.. -lmodel
-
-export LD_LIBRARY_PATH=/scratch/random-fuzzer
-
-if [ "$2" != "" ]; then
- g++ -o "$1.o" $1 -Wall -g -O3 -I.. -I../include -L.. -lmodel
-else
- gcc -o "$1.o" $1 -Wall -g -O3 -I.. -I../include -L.. -lmodel
-fi
+++ /dev/null
-#include <stdio.h>
-
-#include "threads.h"
-#include "librace.h"
-#include "stdatomic.h"
-#include <pthread.h>
-
-int shareddata;
-pthread_mutex_t m;
-
-static void* a(void *obj)
-{
- int i;
- for(i=0;i<2;i++) {
- if ((i%2)==0) {
- pthread_mutex_lock(&m);
- store_32(&shareddata,(unsigned int)i);
- printf("shareddata: %d\n", shareddata);
- pthread_mutex_unlock(&m);
- } else {
- while(!pthread_mutex_trylock(&m))
- thrd_yield();
- store_32(&shareddata,(unsigned int)i);
- printf("shareddata: %d\n", shareddata);
- pthread_mutex_unlock(&m);
- }
- }
-}
-
-int user_main(int argc, char **argv)
-{
- thrd_t t1, t2;
- pthread_mutex_init(&m, NULL);
-
- thrd_create(&t1, (thrd_start_t)&a, NULL);
- thrd_create(&t2, (thrd_start_t)&a, NULL);
-
- thrd_join(t1);
- thrd_join(t2);
- return 0;
-}
+++ /dev/null
-#include <stdio.h>
-#include <pthread.h>
-#define NTHREADS 2
-void *thread_function(void *);
-//pthread_mutex_t mutex1;
-
-int counter=0;
-
-class Box {
-public:
-// int counter;
-
- static Box& getInstance() {
- static Box instance;
- return instance;
- }
-
- void addone() {
- pthread_mutex_lock(&pool_mutex);
- counter++;
- pthread_mutex_unlock(&pool_mutex);
- }
-
-private:
- Box() {
- pthread_mutex_init(&pool_mutex, NULL);
- counter = 0;
- }
- pthread_mutex_t pool_mutex;
-};
-
-int user_main(int argv, char **argc)
-{
-// void *dummy = NULL;
-// pthread_mutex_init(&mutex1, NULL); /* PTHREAD_MUTEX_INITIALIZER;*/
-
-// pthread_t thread_id[NTHREADS];
-// int i, j;
-
- Box::getInstance().addone();
-
-/* for(i=0; i < NTHREADS; i++)
- {
- pthread_create( &thread_id[i], NULL, &thread_function, NULL );
- }
-
- for(j=0; j < NTHREADS; j++)
- {
- pthread_join( thread_id[j], NULL);
- }
-*/
-
- printf("Final counter value: %d\n", counter);
- /*
- for (i=0;i<NTHREADS; i++) {
- printf("id %ld\n", thread_id[i]);
- }*/
- return 0;
-}
-
-void *thread_function(void *dummyPtr)
-{
-// printf("Thread number %ld\n", pthread_self());
- Box::getInstance().addone();
-// pthread_mutex_lock( &mutex1 );
-// Box::getInstance().counter++;
-// pthread_mutex_unlock( &mutex1 );
- return NULL;
-}
+++ /dev/null
-/**
- * @file iriw.cc
- * @brief Independent read and independent write test
- */
-
-#include <atomic>
-#include <pthread.h>
-#include <stdio.h>
-#include <iostream>
-
-#define N 4
-//#include "wildcard.h"
-//#include "model-assert.h"
-
-using namespace std;
-
-atomic<int> x(1);
-atomic<int> y(1);
-
-int r1, r2, r3, r4; /* "local" variables */
-
-static void *a(void *obj)
-{
- x.store(1, memory_order_relaxed);
- y.store(1, memory_order_relaxed);
-
- return new int(1);
-}
-
-
-static void *b(void *obj)
-{
- y.store(1, memory_order_relaxed);
-
- return new int(2);
-}
-
-static void *c(void *obj)
-{
- r1 = x.load(memory_order_acquire);
- r2 = y.load(memory_order_relaxed);
-
- return new int(3);
-}
-
-static void *d(void *obj)
-{
- r3 = y.load(memory_order_acquire);
- r4 = x.load(memory_order_relaxed);
-
- return new int(4);
-}
-
-
-int main(int argc, char **argv)
-{
- printf("Main thread starts\n");
-
- x.store(2, memory_order_relaxed);
- y.store(2, memory_order_relaxed);
-
- r1 = x.load(memory_order_relaxed);
- printf("%d\n", r1);
-
-// x.compare_exchange_strong(r1, r2);
-// r3 = x.load(memory_order_relaxed);
-
-// printf("%d\n", r3);
-
- printf("Main thread is finished\n");
-
- return 0;
-}
+++ /dev/null
-/**
- * @file uninit.cc
- * @brief Uninitialized loads test
- *
- * This is a test of the "uninitialized loads" code. While we don't explicitly
- * initialize y, this example's synchronization pattern should guarantee we
- * never see it uninitialized.
- */
-#include <stdio.h>
-#include <pthread.h>
-#include <atomic>
-
-//#include "librace.h"
-
-std::atomic_int x;
-std::atomic_int y;
-
-static void *a(void *obj)
-{
- int flag = x.load(std::memory_order_acquire);
- printf("flag: %d\n", flag);
- if (flag == 2)
- printf("Load: %d\n", y.load(std::memory_order_relaxed));
- return NULL;
-}
-
-static void *b(void *obj)
-{
- printf("fetch_add: %d\n", x.fetch_add(1, std::memory_order_relaxed));
- return NULL;
-}
-
-static void *c(void *obj)
-{
- y.store(3, std::memory_order_relaxed);
- x.store(1, std::memory_order_release);
- return NULL;
-}
-
-int user_main(int argc, char **argv)
-{
- pthread_t t1, t2, t3;
-
- std::atomic_init(&x, 0);
-
- printf("Main thread: creating 3 threads\n");
- pthread_create(&t1,NULL, &a, NULL);
- pthread_create(&t2,NULL, &b, NULL);
- pthread_create(&t3,NULL, &c, NULL);
-
- pthread_join(t1,NULL);
- pthread_join(t2,NULL);
- pthread_join(t3,NULL);
- printf("Main thread is finished\n");
-
- return 0;
-}
#include "mymemory.h"
typedef unsigned int uint;
-
template<typename _Tp>
class mllnode {
public:
uint _size;
};
+class actionlist;
+
template<typename _Tp>
class sllnode {
public:
_Tp val;
template<typename T>
friend class SnapList;
+ friend class actionlist;
};
template<typename _Tp>
array[index] = item;
}
+ void remove(type item) {
+ for(uint i = 0;i < _size;i++) {
+ if (at(i) == item) {
+ removeAt(i);
+ return;
+ }
+ }
+ }
+
void removeAt(uint index) {
for (uint i = index;(i + 1) < _size;i++) {
set(i, at(i + 1));
bool is_model_thread() const { return model_thread; }
+ void * get_stack_addr() { return stack; }
+ ClockVector * get_acq_fence_cv() { return acq_fence_cv; }
+
friend void thread_startup();
#ifdef TLS
friend void setup_context();
/** @brief The parent Thread which created this Thread */
Thread * const parent;
+ /** @brief Acquire fence cv */
+ ClockVector *acq_fence_cv;
+
/** @brief The THREAD_CREATE ModelAction which created this Thread */
ModelAction *creation;
void *arg;
ucontext_t context;
void *stack;
+ uint32_t stack_size;
#ifdef TLS
void * helper_stack;
public:
#include "model.h"
#include "execution.h"
#include "schedule.h"
+#include "clockvector.h"
#ifdef TLS
#include <dlfcn.h>
*/
Thread::Thread(thread_id_t tid) :
parent(NULL),
+ acq_fence_cv(new ClockVector()),
creation(NULL),
pending(NULL),
start_routine(NULL),
*/
Thread::Thread(thread_id_t tid, thrd_t *t, void (*func)(void *), void *a, Thread *parent) :
parent(parent),
+ acq_fence_cv(new ClockVector()),
creation(NULL),
pending(NULL),
start_routine(func),
*/
Thread::Thread(thread_id_t tid, thrd_t *t, void *(*func)(void *), void *a, Thread *parent) :
parent(parent),
+ acq_fence_cv(new ClockVector()),
creation(NULL),
pending(NULL),
start_routine(NULL),
{
if (!is_complete())
complete();
+
+ delete acq_fence_cv;
}
/** @return The thread_id_t corresponding to this Thread object. */
bool Thread::is_waiting_on(const Thread *t) const
{
Thread *wait;
+
+ // One thread relocks a recursive mutex
+ if (waiting_on() == t && pending->is_lock()) {
+ int mutex_type = pending->get_mutex()->get_state()->type;
+ if (mutex_type == PTHREAD_MUTEX_RECURSIVE)
+ return false;
+ }
+
for (wait = waiting_on();wait != NULL;wait = wait->waiting_on())
if (wait == t)
return true;