datarace.o impatomic.o cmodelint.o \
snapshot.o malloc.o mymemory.o common.o mutex.o conditionvariable.o \
context.o execution.o libannotate.o plugins.o pthread.o futex.o fuzzer.o \
- sleeps.o history.o funcnode.o funcinst.o printf.o
+ sleeps.o history.o funcnode.o funcinst.o predicate.o printf.o newfuzzer.o \
+ concretepredicate.o waitobj.o hashfunction.o pipe.o
CPPFLAGS += -Iinclude -I.
LDFLAGS := -ldl -lrt -rdynamic -lpthread
seq_number(ACTION_INITIAL_CLOCK)
{
/* References to NULL atomic variables can end up here */
- ASSERT(loc || type == ATOMIC_FENCE || type == NOOP);
+ ASSERT(loc || type == ATOMIC_FENCE);
Thread *t = thread ? thread : thread_current();
this->tid = t!= NULL ? t->get_id() : -1;
}
+/**
+ * @brief Construct a new ModelAction for sleep actions
+ *
+ * @param type The type of action: THREAD_SLEEP
+ * @param order The memory order of this action. A "don't care" for non-ATOMIC
+ * actions (e.g., THREAD_* or MODEL_* actions).
+ * @param loc The location that this action acts upon
+ * @param value The time duration a thread is scheduled to sleep.
+ * @param _time The this sleep action is constructed
+ */
+ModelAction::ModelAction(action_type_t type, memory_order order, uint64_t value, uint64_t _time) :
+ location(NULL),
+ position(NULL),
+ time(_time),
+ last_fence_release(NULL),
+ uninitaction(NULL),
+ cv(NULL),
+ rf_cv(NULL),
+ value(value),
+ type(type),
+ order(order),
+ original_order(order),
+ seq_number(ACTION_INITIAL_CLOCK)
+{
+ Thread *t = thread_current();
+ this->tid = t!= NULL ? t->get_id() : -1;
+}
+
/**
* @brief Construct a new ModelAction
*
Thread *t = thread ? thread : thread_current();
this->tid = t->get_id();
- // model_print("position: %s\n", position);
}
seq_number = num;
}
+void ModelAction::reset_seq_number()
+{
+ seq_number = 0;
+}
+
bool ModelAction::is_thread_start() const
{
return type == THREAD_START;
bool ModelAction::is_mutex_op() const
{
- return type == ATOMIC_LOCK || type == ATOMIC_TRYLOCK || type == ATOMIC_UNLOCK || type == ATOMIC_WAIT || type == ATOMIC_NOTIFY_ONE || type == ATOMIC_NOTIFY_ALL;
+ return type == ATOMIC_LOCK || type == ATOMIC_TRYLOCK || type == ATOMIC_UNLOCK || type == ATOMIC_WAIT || type == ATOMIC_TIMEDWAIT || type == ATOMIC_NOTIFY_ONE || type == ATOMIC_NOTIFY_ALL;
}
bool ModelAction::is_lock() const
return type == ATOMIC_LOCK;
}
+bool ModelAction::is_sleep() const
+{
+ return type == THREAD_SLEEP;
+}
+
bool ModelAction::is_wait() const {
- return type == ATOMIC_WAIT;
+ return type == ATOMIC_WAIT || type == ATOMIC_TIMEDWAIT;
}
bool ModelAction::is_notify() const {
ASSERT(act);
reads_from = act;
-
if (act->is_uninitialized()) { // WL
uint64_t val = *((uint64_t *) location);
ModelAction * act_uninitialized = (ModelAction *)act;
case THREAD_YIELD: return "thread yield";
case THREAD_JOIN: return "thread join";
case THREAD_FINISH: return "thread finish";
+ case THREAD_SLEEP: return "thread sleep";
case THREADONLY_FINISH: return "pthread_exit finish";
case PTHREAD_CREATE: return "pthread create";
case ATOMIC_UNLOCK: return "unlock";
case ATOMIC_TRYLOCK: return "trylock";
case ATOMIC_WAIT: return "wait";
+ case ATOMIC_TIMEDWAIT: return "timed wait";
case ATOMIC_NOTIFY_ONE: return "notify one";
case ATOMIC_NOTIFY_ALL: return "notify all";
case ATOMIC_ANNOTATION: return "annotation";
THREAD_JOIN, // < A thread join action
THREAD_FINISH, // < A thread completion action
THREADONLY_FINISH, // < A thread completion action
+ THREAD_SLEEP, // < A sleep operation
+
PTHREAD_CREATE, // < A pthread creation action
PTHREAD_JOIN, // < A pthread join action
+
ATOMIC_UNINIT, // < Represents an uninitialized atomic
NONATOMIC_WRITE, // < Represents a non-atomic store
ATOMIC_INIT, // < Initialization of an atomic object (e.g., atomic_init())
ATOMIC_LOCK, // < A lock action
ATOMIC_TRYLOCK, // < A trylock action
ATOMIC_UNLOCK, // < An unlock action
+
ATOMIC_NOTIFY_ONE, // < A notify_one action
ATOMIC_NOTIFY_ALL, // < A notify all action
ATOMIC_WAIT, // < A wait action
+ ATOMIC_TIMEDWAIT, // < A timed wait action
ATOMIC_ANNOTATION, // < An annotation action to pass information to a trace analysis
- NOOP // no operation, which returns control to scheduler
} action_type_t;
ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value = VALUE_NONE, Thread *thread = NULL);
ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value, int size);
ModelAction(action_type_t type, const char * position, memory_order order, void *loc, uint64_t value, int size);
+ ModelAction(action_type_t type, memory_order order, uint64_t value, uint64_t time);
ModelAction(action_type_t type, const char * position, memory_order order, void *loc, uint64_t value = VALUE_NONE, Thread *thread = NULL);
~ModelAction();
void print() const;
uint64_t get_write_value() const;
uint64_t get_return_value() const;
ModelAction * get_reads_from() const { return reads_from; }
+ uint64_t get_time() const {return time;}
cdsc::mutex * get_mutex() const;
void set_read_from(ModelAction *act);
void copy_from_new(ModelAction *newaction);
void set_seq_number(modelclock_t num);
+ void reset_seq_number();
void set_try_lock(bool obtainedlock);
bool is_thread_start() const;
bool is_thread_join() const;
bool is_mutex_op() const;
bool is_lock() const;
+ bool is_sleep() const;
bool is_trylock() const;
bool is_unlock() const;
bool is_wait() const;
*/
ModelAction *reads_from;
int size;
+ uint64_t time; //used for sleep
};
/** @brief The last fence release from the same thread */
#define CLASSLIST_H
#include <inttypes.h>
#include "stl-model.h"
+#include "hashset.h"
+#include "modeltypes.h"
class ClockVector;
class CycleGraph;
class Thread;
class TraceAnalysis;
class Fuzzer;
+class NewFuzzer;
class FuncNode;
class FuncInst;
+class Predicate;
+class ConcretePredicate;
+class WaitObj;
struct model_snapshot_members;
struct bug_message;
+
typedef SnapList<ModelAction *> action_list_t;
typedef SnapList<uint32_t> func_id_list_t;
typedef SnapList<FuncInst *> func_inst_list_t;
+typedef HashTable<FuncInst *, ModelAction *, uintptr_t, 0> inst_act_map_t;
+
+typedef HashSet<Predicate *, uintptr_t, 0, model_malloc, model_calloc, model_free> PredSet;
+typedef HSIterator<Predicate *, uintptr_t, 0, model_malloc, model_calloc, model_free> PredSetIter;
+
+typedef HashSet<uint64_t, uint64_t, 0, snapshot_malloc, snapshot_calloc, snapshot_free> value_set_t;
+typedef HSIterator<uint64_t, uint64_t, 0, snapshot_malloc, snapshot_calloc, snapshot_free> value_set_iter;
+typedef HashSet<void *, uintptr_t, 0, snapshot_malloc, snapshot_calloc, snapshot_free> loc_set_t;
+typedef HSIterator<void *, uintptr_t, 0, snapshot_malloc, snapshot_calloc, snapshot_free> loc_set_iter;
+typedef HashSet<thread_id_t, int, 0> thrd_id_set_t;
+typedef HSIterator<thread_id_t, int, 0> thrd_id_set_iter;
extern volatile int modellock;
#endif
#include "threads-model.h"
#include "datarace.h"
-memory_order orders[8] = {
+memory_order orders[7] = {
memory_order_relaxed, memory_order_consume, memory_order_acquire,
memory_order_release, memory_order_acq_rel, memory_order_seq_cst,
};
*((volatile uint ## size ## _t *)obj) = val; \
thread_id_t tid = thread_current()->get_id(); \
for(int i=0;i < size / 8;i++) { \
- recordWrite(tid, (void *)(((char *)obj)+i)); \
+ atomraceCheckWrite(tid, (void *)(((char *)obj)+i)); \
} \
}
*((volatile uint ## size ## _t *)obj) = val; \
thread_id_t tid = thread_current()->get_id(); \
for(int i=0;i < size / 8;i++) { \
- recordWrite(tid, (void *)(((char *)obj)+i)); \
+ atomraceCheckWrite(tid, (void *)(((char *)obj)+i)); \
} \
}
#define CDSATOMICLOAD(size) \
uint ## size ## _t cds_atomic_load ## size(void * obj, int atomic_index, const char * position) { \
ensureModel(); \
- return (uint ## size ## _t)model->switch_to_master( \
+ uint ## size ## _t val = (uint ## size ## _t)model->switch_to_master( \
new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj)); \
+ thread_id_t tid = thread_current()->get_id(); \
+ for(int i=0;i < size / 8;i++) { \
+ atomraceCheckRead(tid, (void *)(((char *)obj)+i)); \
+ } \
+ return val; \
}
CDSATOMICLOAD(8)
*((volatile uint ## size ## _t *)obj) = val; \
thread_id_t tid = thread_current()->get_id(); \
for(int i=0;i < size / 8;i++) { \
- recordWrite(tid, (void *)(((char *)obj)+i)); \
+ atomraceCheckWrite(tid, (void *)(((char *)obj)+i)); \
} \
}
*((volatile uint ## size ## _t *)addr) = _copy; \
thread_id_t tid = thread_current()->get_id(); \
for(int i=0;i < size / 8;i++) { \
+ atomraceCheckRead(tid, (void *)(((char *)addr)+i)); \
recordWrite(tid, (void *)(((char *)addr)+i)); \
} \
- return _old; \
+ return _old; \
})
// cds atomic exchange
*/
void cds_func_entry(const char * funcName) {
- if (!model) return;
-
- Thread * th = thread_current();
- uint32_t func_id;
-
- ModelHistory *history = model->get_history();
- if ( !history->getFuncMap()->contains(funcName) ) {
- // add func id to func map
- func_id = history->get_func_counter();
- history->incr_func_counter();
- history->getFuncMap()->put(funcName, func_id);
-
- // add func id to reverse func map
- ModelVector<const char *> * func_map_rev = history->getFuncMapRev();
- if ( func_map_rev->size() <= func_id )
- func_map_rev->resize( func_id + 1 );
- func_map_rev->at(func_id) = funcName;
- } else {
- func_id = history->getFuncMap()->get(funcName);
- }
-
- history->enter_function(func_id, th->get_id());
+ ensureModel();
+ /*
+ Thread * th = thread_current();
+ uint32_t func_id;
+
+ ModelHistory *history = model->get_history();
+ if ( !history->getFuncMap()->contains(funcName) ) {
+ // add func id to func map
+ func_id = history->get_func_counter();
+ history->incr_func_counter();
+ history->getFuncMap()->put(funcName, func_id);
+
+ // add func id to reverse func map
+ ModelVector<const char *> * func_map_rev = history->getFuncMapRev();
+ if ( func_map_rev->size() <= func_id )
+ func_map_rev->resize( func_id + 1 );
+ func_map_rev->at(func_id) = funcName;
+ } else {
+ func_id = history->getFuncMap()->get(funcName);
+ }
+
+ history->enter_function(func_id, th->get_id());
+ */
}
void cds_func_exit(const char * funcName) {
- if (!model) return;
+ ensureModel();
- Thread * th = thread_current();
- uint32_t func_id;
+/* Thread * th = thread_current();
+ uint32_t func_id;
- ModelHistory *history = model->get_history();
- func_id = history->getFuncMap()->get(funcName);
+ ModelHistory *history = model->get_history();
+ func_id = history->getFuncMap()->get(funcName);
- /* func_id not found; this could happen in the case where a function calls cds_func_entry
- * when the model has been defined yet, but then an atomic inside the function initializes
- * the model. And then cds_func_exit is called upon the function exiting.
- */
- if (func_id == 0)
- return;
+ * func_id not found; this could happen in the case where a function calls cds_func_entry
+ * when the model has been defined yet, but then an atomic inside the function initializes
+ * the model. And then cds_func_exit is called upon the function exiting.
+ *
+ if (func_id == 0)
+ return;
- history->exit_function(func_id, th->get_id());
+ history->exit_function(func_id, th->get_id());
+ */
}
*
* This function should only be called once.
*/
+char filename[256];
+
void redirect_output()
{
/* Save stdout for later use */
perror("dup");
exit(EXIT_FAILURE);
}
-
- /* Redirect program output to a pipe */
- int pipefd[2];
- if (pipe(pipefd) < 0) {
- perror("pipe");
- exit(EXIT_FAILURE);
- }
- if (dup2(pipefd[1], STDOUT_FILENO) < 0) {
+ snprintf_(filename, sizeof(filename), "C11FuzzerTmp%d", getpid());
+ fd_user_out = open(filename, O_CREAT | O_TRUNC| O_RDWR, S_IRWXU);
+ if (dup2(fd_user_out, STDOUT_FILENO) < 0) {
perror("dup2");
exit(EXIT_FAILURE);
}
- close(pipefd[1]);
- /* Save the "read" side of the pipe for use later */
- if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) < 0) {
- perror("fcntl");
- exit(EXIT_FAILURE);
- }
- fd_user_out = pipefd[0];
}
/**
void clear_program_output()
{
fflush(stdout);
- char buf[200];
- while (read_to_buf(fd_user_out, buf, sizeof(buf))) ;
+ close(fd_user_out);
+ unlink(filename);
}
/** @brief Print out any pending program output */
/* Gather all program output */
fflush(stdout);
+ lseek(fd_user_out, 0, SEEK_SET);
/* Read program output pipe and write to (real) stdout */
ssize_t ret;
while (1) {
}
}
+ close(fd_user_out);
+ unlink(filename);
+
model_print("---- END PROGRAM OUTPUT ----\n");
}
#endif /* ! CONFIG_DEBUG */
LIB_NAME := model
LIB_SO := lib$(LIB_NAME).so
-CPPFLAGS += -Wall -g -O0
+CPPFLAGS += -Wall -g -O3
# Mac OSX options
ifeq ($(UNAME), Darwin)
--- /dev/null
+#include "concretepredicate.h"
+
+ConcretePredicate::ConcretePredicate(thread_id_t tid) :
+ tid(tid),
+ expressions()
+{}
+
+void ConcretePredicate::add_expression(token_t token, uint64_t value, bool equality)
+{
+ expressions.push_back(concrete_pred_expr(token, value, equality));
+}
--- /dev/null
+#ifndef __CONCRETE_PREDICATE_H__
+#define __CONCRETE_PREDICATE_H__
+
+#include <inttypes.h>
+#include "modeltypes.h"
+#include "classlist.h"
+#include "predicatetypes.h"
+
+class ConcretePredicate {
+public:
+ ConcretePredicate(thread_id_t tid);
+ ~ConcretePredicate() {}
+
+ void add_expression(token_t token, uint64_t value, bool equality);
+ SnapVector<struct concrete_pred_expr> * getExpressions() { return &expressions; }
+ void set_location(void * loc) { location = loc; }
+ void * get_location() { return location; }
+ thread_id_t get_tid() { return tid; }
+
+ SNAPSHOTALLOC
+private:
+ thread_id_t tid;
+ void * location;
+ SnapVector<struct concrete_pred_expr> expressions;
+};
+
+#endif /* __CONCRETE_PREDICATE_H */
/* Propagate clock vector changes */
if (tonode->cv->merge(fromnode->cv)) {
- queue->push_back(fromnode);
+ queue->push_back(tonode);
while(!queue->empty()) {
const CycleNode *node = queue->back();
queue->pop_back();
uint64_t *shadow = lookupAddressEntry(location);
uint64_t shadowval = *shadow;
ClockVector *currClock = get_execution()->get_cv(thread);
+ if (currClock == NULL)
+ return;
+
struct DataRace * race = NULL;
/* Do full record */
if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
}
}
+
+/** This function does race detection for a write on an expanded record. */
+struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
+{
+ struct RaceRecord *record = (struct RaceRecord *)(*shadow);
+ struct DataRace * race = NULL;
+
+ if (record->isAtomic)
+ goto Exit;
+
+ /* Check for datarace against last read. */
+
+ for (int i = 0;i < record->numReads;i++) {
+ modelclock_t readClock = record->readClock[i];
+ thread_id_t readThread = record->thread[i];
+
+ /* Note that readClock can't actuall be zero here, so it could be
+ optimized. */
+
+ if (clock_may_race(currClock, thread, readClock, readThread)) {
+ /* We have a datarace */
+ race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+ goto Exit;
+ }
+ }
+
+ /* Check for datarace against last write. */
+
+ {
+ modelclock_t writeClock = record->writeClock;
+ thread_id_t writeThread = record->writeThread;
+
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+ /* We have a datarace */
+ race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+ goto Exit;
+ }
+ }
+Exit:
+ record->numReads = 0;
+ record->writeThread = thread;
+ record->isAtomic = 1;
+ modelclock_t ourClock = currClock->getClock(thread);
+ record->writeClock = ourClock;
+ return race;
+}
+
+/** This function does race detection on a write. */
+void atomraceCheckWrite(thread_id_t thread, void *location)
+{
+ uint64_t *shadow = lookupAddressEntry(location);
+ uint64_t shadowval = *shadow;
+ ClockVector *currClock = get_execution()->get_cv(thread);
+ if (currClock == NULL)
+ return;
+
+ struct DataRace * race = NULL;
+ /* Do full record */
+ if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+ race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
+ goto Exit;
+ }
+
+ {
+ int threadid = id_to_int(thread);
+ modelclock_t ourClock = currClock->getClock(thread);
+
+ /* Thread ID is too large or clock is too large. */
+ if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+ expandRecord(shadow);
+ race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
+ goto Exit;
+ }
+
+ /* Can't race with atomic */
+ if (shadowval & ATOMICMASK)
+ goto ShadowExit;
+
+ {
+ /* Check for datarace against last read. */
+
+ modelclock_t readClock = READVECTOR(shadowval);
+ thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, readClock, readThread)) {
+ /* We have a datarace */
+ race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+ goto ShadowExit;
+ }
+ }
+
+ {
+ /* Check for datarace against last write. */
+
+ modelclock_t writeClock = WRITEVECTOR(shadowval);
+ thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+ /* We have a datarace */
+ race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+ goto ShadowExit;
+ }
+ }
+
+ShadowExit:
+ *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
+ }
+
+Exit:
+ if (race) {
+ race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+ if (raceset->add(race))
+ assert_race(race);
+ else model_free(race);
+ }
+}
+
/** This function does race detection for a write on an expanded record. */
void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
struct RaceRecord *record = (struct RaceRecord *)(*shadow);
record->isAtomic = 1;
}
+/** This function does race detection for a write on an expanded record. */
+void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
+ struct RaceRecord *record = (struct RaceRecord *)(*shadow);
+ record->numReads = 0;
+ record->writeThread = thread;
+ modelclock_t ourClock = currClock->getClock(thread);
+ record->writeClock = ourClock;
+ record->isAtomic = 0;
+}
+
/** This function just updates metadata on atomic write. */
void recordWrite(thread_id_t thread, void *location) {
uint64_t *shadow = lookupAddressEntry(location);
*shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
}
+/** This function just updates metadata on atomic write. */
+void recordCalloc(void *location, size_t size) {
+ thread_id_t thread = thread_current()->get_id();
+ for(;size != 0;size--) {
+ uint64_t *shadow = lookupAddressEntry(location);
+ uint64_t shadowval = *shadow;
+ ClockVector *currClock = get_execution()->get_cv(thread);
+ /* Do full record */
+ if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+ fullRecordWriteNonAtomic(thread, location, shadow, currClock);
+ return;
+ }
+
+ int threadid = id_to_int(thread);
+ modelclock_t ourClock = currClock->getClock(thread);
+
+ /* Thread ID is too large or clock is too large. */
+ if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+ expandRecord(shadow);
+ fullRecordWriteNonAtomic(thread, location, shadow, currClock);
+ return;
+ }
+
+ *shadow = ENCODEOP(0, 0, threadid, ourClock);
+ location = (void *)(((char *) location) + 1);
+ }
+}
+
/** This function does race detection on a read for an expanded record. */
uint64_t *shadow = lookupAddressEntry(location);
uint64_t shadowval = *shadow;
ClockVector *currClock = get_execution()->get_cv(thread);
+ if (currClock == NULL)
+ return;
+
struct DataRace * race = NULL;
/* Do full record */
else model_free(race);
}
}
+
+
+/** This function does race detection on a read for an expanded record. */
+struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
+{
+ struct RaceRecord *record = (struct RaceRecord *) (*shadow);
+ struct DataRace * race = NULL;
+ /* Check for datarace against last write. */
+ if (record->isAtomic)
+ return NULL;
+
+ modelclock_t writeClock = record->writeClock;
+ thread_id_t writeThread = record->writeThread;
+
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+ /* We have a datarace */
+ race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+ }
+ return race;
+}
+
+/** This function does race detection on a read. */
+void atomraceCheckRead(thread_id_t thread, const void *location)
+{
+ uint64_t *shadow = lookupAddressEntry(location);
+ uint64_t shadowval = *shadow;
+ ClockVector *currClock = get_execution()->get_cv(thread);
+ if (currClock == NULL)
+ return;
+
+ struct DataRace * race = NULL;
+
+ /* Do full record */
+ if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+ race = atomfullRaceCheckRead(thread, location, shadow, currClock);
+ goto Exit;
+ }
+
+ if (shadowval & ATOMICMASK)
+ return;
+
+ {
+ /* Check for datarace against last write. */
+
+ modelclock_t writeClock = WRITEVECTOR(shadowval);
+ thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+ /* We have a datarace */
+ race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+ goto Exit;
+ }
+
+
+ }
+Exit:
+ if (race) {
+ race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+ if (raceset->add(race))
+ assert_race(race);
+ else model_free(race);
+ }
+}
void initRaceDetector();
void raceCheckWrite(thread_id_t thread, void *location);
+void atomraceCheckWrite(thread_id_t thread, void *location);
void raceCheckRead(thread_id_t thread, const void *location);
+void atomraceCheckRead(thread_id_t thread, const void *location);
void recordWrite(thread_id_t thread, void *location);
+void recordCalloc(void *location, size_t size);
void assert_race(struct DataRace *race);
bool hasNonAtomicStore(const void *location);
void setAtomicStoreFlag(const void *location);
#include "bugmessage.h"
#include "history.h"
#include "fuzzer.h"
+#include "newfuzzer.h"
#define INITIAL_THREAD_ID 0
thrd_last_fence_release(),
priv(new struct model_snapshot_members ()),
mo_graph(new CycleGraph()),
- fuzzer(new Fuzzer()),
+ fuzzer(new NewFuzzer()),
thrd_func_list(),
- thrd_func_inst_lists(),
+ thrd_func_act_lists(),
isfinished(false)
{
/* Initialize a model-checker thread, for special ModelActions */
model_thread = new Thread(get_next_id());
add_thread(model_thread);
+ fuzzer->register_engine(m->get_history(), this);
scheduler->register_engine(this);
}
return model->get_execution_number();
}
-static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
+static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 2> * hash, void * ptr)
{
action_list_t *tmp = hash->get(ptr);
if (tmp == NULL) {
return tmp;
}
-static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
+static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
{
SnapVector<action_list_t> *tmp = hash->get(ptr);
if (tmp == NULL) {
return ++priv->used_sequence_numbers;
}
+/** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
+void ModelExecution::restore_last_seq_num()
+{
+ priv->used_sequence_numbers--;
+}
+
/**
* @brief Should the current action wake up a given thread?
*
if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
return true;
}
+ /* The sleep is literally sleeping */
+ if (asleep->is_sleep()) {
+ if (fuzzer->shouldWake(asleep))
+ return true;
+ }
+
return false;
}
for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *thr = get_thread(int_to_id(i));
if (scheduler->is_sleep_set(thr)) {
- if (should_wake_up(curr, thr))
+ if (should_wake_up(curr, thr)) {
/* Remove this thread from sleep set */
scheduler->remove_sleep(thr);
+ if (thr->get_pending()->is_sleep())
+ thr->set_wakeup_state(true);
+ }
}
}
}
return priv->bugs.size() != 0;
}
-/** @return True, if any fatal bugs have been reported for this execution.
- * Any bug other than a data race is considered a fatal bug. Data races
- * are not considered fatal unless the number of races is exceeds
- * a threshold (temporarily set as 15).
- */
-bool ModelExecution::have_fatal_bug_reports() const
-{
- return priv->bugs.size() != 0;
-}
-
SnapVector<bug_message *> * ModelExecution::get_bugs() const
{
return &priv->bugs;
add_normal_write_to_lists(act);
add_write_to_lists(act);
w_modification_order(act);
+// model->get_history()->process_action(act, act->get_tid());
return act;
}
* @param rf_set is the set of model actions we can possibly read from
* @return True if processing this read updates the mo_graph.
*/
-void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
+bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
{
SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
rf_set->push_back(nonatomicstore);
}
+ // Remove writes that violate read modification order
+ /*
+ for (uint i = 0; i < rf_set->size(); i++) {
+ ModelAction * rf = (*rf_set)[i];
+ if (!r_modification_order(curr, rf, NULL, NULL, true)) {
+ (*rf_set)[i] = rf_set->back();
+ rf_set->pop_back();
+ }
+ }*/
+
while(true) {
int index = fuzzer->selectWrite(curr, rf_set);
- ModelAction *rf = (*rf_set)[index];
+ if (index == -1) // no feasible write exists
+ return false;
+ ModelAction *rf = (*rf_set)[index];
ASSERT(rf);
bool canprune = false;
int tid = id_to_int(curr->get_tid());
(*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
}
- return;
+ return true;
}
priorset->clear();
(*rf_set)[index] = rf_set->back();
}
break;
}
- case ATOMIC_WAIT:
+ case ATOMIC_WAIT: {
+ /* wake up the other threads */
+ for (unsigned int i = 0;i < get_num_threads();i++) {
+ Thread *t = get_thread(int_to_id(i));
+ Thread *curr_thrd = get_thread(curr);
+ if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
+ scheduler->wake(t);
+ }
+
+ /* unlock the lock - after checking who was waiting on it */
+ state->locked = NULL;
+
+ if (fuzzer->shouldWait(curr)) {
+ /* disable this thread */
+ get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
+ scheduler->sleep(get_thread(curr));
+ }
+
+ break;
+ }
+ case ATOMIC_TIMEDWAIT:
case ATOMIC_UNLOCK: {
//TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS MUST EVENMTUALLY RELEASE...
/* unlock the lock - after checking who was waiting on it */
state->locked = NULL;
-
- if (!curr->is_wait())
- break;/* The rest is only for ATOMIC_WAIT */
-
break;
}
case ATOMIC_NOTIFY_ALL: {
case THREAD_START: {
break;
}
+ case THREAD_SLEEP: {
+ Thread *th = get_thread(curr);
+ th->set_pending(curr);
+ scheduler->add_sleep(th);
+ break;
+ }
default:
break;
}
if (!blocking->is_complete()) {
return false;
}
+ } else if (curr->is_sleep()) {
+ if (!fuzzer->shouldSleep(curr))
+ return false;
}
return true;
wake_up_sleeping_actions(curr);
- /* Add the action to lists before any other model-checking tasks */
- if (!second_part_of_rmw && curr->get_type() != NOOP)
- add_action_to_lists(curr);
-
- if (curr->is_write())
- add_write_to_lists(curr);
+ /* Add uninitialized actions to lists */
+ if (!second_part_of_rmw)
+ add_uninit_action_to_lists(curr);
SnapVector<ModelAction *> * rf_set = NULL;
/* Build may_read_from set for newly-created actions */
if (newly_explored && curr->is_read())
rf_set = build_may_read_from(curr);
- process_thread_action(curr);
-
if (curr->is_read() && !second_part_of_rmw) {
process_read(curr, rf_set);
delete rf_set;
- } else {
+
+/* bool success = process_read(curr, rf_set);
+ delete rf_set;
+ if (!success)
+ return curr; // Do not add action to lists
+*/
+ } else
ASSERT(rf_set == NULL);
- }
+
+ /* Add the action to lists */
+ if (!second_part_of_rmw)
+ add_action_to_lists(curr);
+
+ if (curr->is_write())
+ add_write_to_lists(curr);
+
+ process_thread_action(curr);
if (curr->is_write())
process_write(curr);
*
* @param curr The current action. Must be a read.
* @param rf The ModelAction or Promise that curr reads from. Must be a write.
+ * @param check_only If true, then only check whether the current action satisfies
+ * read modification order or not, without modifiying priorset and canprune.
+ * False by default.
* @return True if modification order edges were added; false otherwise
*/
-bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune)
+bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
+ SnapVector<const ModelAction *> * priorset, bool * canprune, bool check_only)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
unsigned int i;
*act < *last_sc_fence_thread_local) {
if (mo_graph->checkReachable(rf, act))
return false;
- priorset->push_back(act);
+ if (!check_only)
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 4 */
*act < *last_sc_fence_local) {
if (mo_graph->checkReachable(rf, act))
return false;
- priorset->push_back(act);
+ if (!check_only)
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 6 */
*act < *last_sc_fence_thread_before) {
if (mo_graph->checkReachable(rf, act))
return false;
- priorset->push_back(act);
+ if (!check_only)
+ priorset->push_back(act);
break;
}
}
if (act->is_write()) {
if (mo_graph->checkReachable(rf, act))
return false;
- priorset->push_back(act);
+ if (!check_only)
+ priorset->push_back(act);
} else {
const ModelAction *prevrf = act->get_reads_from();
if (!prevrf->equals(rf)) {
if (mo_graph->checkReachable(rf, prevrf))
return false;
- priorset->push_back(prevrf);
+ if (!check_only)
+ priorset->push_back(prevrf);
} else {
if (act->get_tid() == curr->get_tid()) {
//Can prune curr from obj list
- *canprune = true;
+ if (!check_only)
+ *canprune = true;
}
}
}
}
/**
- * Performs various bookkeeping operations for the current ModelAction. For
- * instance, adds action to the per-object, per-thread action vector and to the
- * action trace list of all thread actions.
+ * Performs various bookkeeping operations for the current ModelAction when it is
+ * the first atomic action occurred at its memory location.
*
- * @param act is the ModelAction to add.
+ * For instance, adds uninitialized action to the per-object, per-thread action vector
+ * and to the action trace list of all thread actions.
+ *
+ * @param act is the ModelAction to process.
*/
-void ModelExecution::add_action_to_lists(ModelAction *act)
+void ModelExecution::add_uninit_action_to_lists(ModelAction *act)
{
int tid = id_to_int(act->get_tid());
ModelAction *uninit = NULL;
uninit_id = id_to_int(uninit->get_tid());
list->push_front(uninit);
SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
- if (uninit_id >= (int)vec->size()) {
+ if ((int)vec->size() <= uninit_id) {
int oldsize = (int) vec->size();
vec->resize(uninit_id + 1);
- for(int i=oldsize;i<uninit_id+1;i++) {
- new(&(*vec)[i]) action_list_t();
+ for(int i = oldsize; i < uninit_id + 1; i++) {
+ new (&(*vec)[i]) action_list_t();
}
}
(*vec)[uninit_id].push_front(uninit);
}
- list->push_back(act);
// Update action trace, a total order of all actions
- action_trace.push_back(act);
if (uninit)
action_trace.push_front(uninit);
// Update obj_thrd_map, a per location, per thread, order of actions
SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
- if (tid >= (int)vec->size()) {
- uint oldsize =vec->size();
+ if ((int)vec->size() <= tid) {
+ uint oldsize = vec->size();
vec->resize(priv->next_thread_id);
- for(uint i=oldsize;i<priv->next_thread_id;i++)
+ for(uint i = oldsize; i < priv->next_thread_id; i++)
new (&(*vec)[i]) action_list_t();
}
- (*vec)[tid].push_back(act);
if (uninit)
(*vec)[uninit_id].push_front(uninit);
// Update thrd_last_action, the last action taken by each thrad
if ((int)thrd_last_action.size() <= tid)
thrd_last_action.resize(get_num_threads());
- thrd_last_action[tid] = act;
if (uninit)
thrd_last_action[uninit_id] = uninit;
+}
+
+
+/**
+ * Performs various bookkeeping operations for the current ModelAction. For
+ * instance, adds action to the per-object, per-thread action vector and to the
+ * action trace list of all thread actions.
+ *
+ * @param act is the ModelAction to add.
+ */
+void ModelExecution::add_action_to_lists(ModelAction *act)
+{
+ int tid = id_to_int(act->get_tid());
+ action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
+ list->push_back(act);
+
+ // Update action trace, a total order of all actions
+ action_trace.push_back(act);
+
+ // Update obj_thrd_map, a per location, per thread, order of actions
+ SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
+ if ((int)vec->size() <= tid) {
+ uint oldsize = vec->size();
+ vec->resize(priv->next_thread_id);
+ for(uint i = oldsize; i < priv->next_thread_id; i++)
+ new (&(*vec)[i]) action_list_t();
+ }
+ (*vec)[tid].push_back(act);
+
+ // Update thrd_last_action, the last action taken by each thrad
+ if ((int)thrd_last_action.size() <= tid)
+ thrd_last_action.resize(get_num_threads());
+ thrd_last_action[tid] = act;
// Update thrd_last_fence_release, the last release fence taken by each thread
if (act->is_fence() && act->is_release()) {
get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
- if (tid >= (int)vec->size()) {
+ if ((int)vec->size() <= tid) {
uint oldsize = vec->size();
vec->resize(priv->next_thread_id);
- for(uint i=oldsize;i<priv->next_thread_id;i++)
+ for(uint i = oldsize; i < priv->next_thread_id; i++)
new (&(*vec)[i]) action_list_t();
}
(*vec)[tid].push_back(act);
Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
{
/* Do not split atomic RMW */
- if (curr->is_rmwr())
+ if (curr->is_rmwr() && !paused_by_fuzzer(curr))
return get_thread(curr);
/* Follow CREATE with the created thread */
/* which is not needed, because model.cc takes care of this */
return NULL;
}
+/** @param act A read atomic action */
+bool ModelExecution::paused_by_fuzzer(const ModelAction * act) const
+{
+ ASSERT(act->is_read());
+
+ // Actions paused by fuzzer have their sequence number reset to 0
+ return act->get_seq_number() == 0;
+}
+
/**
* Takes the next step in the execution, if possible.
* @param curr The current step to take
curr = check_current_action(curr);
ASSERT(curr);
- /* Process this action in ModelHistory for records*/
- // model->get_history()->process_action( curr, curr->get_tid() );
+ /* Process this action in ModelHistory for records */
+// model->get_history()->process_action( curr, curr->get_tid() );
if (curr_thrd->is_blocked() || curr_thrd->is_complete())
scheduler->remove_thread(curr_thrd);
bool assert_bug(const char *msg);
bool have_bug_reports() const;
- bool have_fatal_bug_reports() const;
SnapVector<bug_message *> * get_bugs() const;
ModelAction * check_current_action(ModelAction *curr);
SnapVector<func_id_list_t> * get_thrd_func_list() { return &thrd_func_list; }
- SnapVector< SnapList<func_inst_list_t *> *> * get_thrd_func_inst_lists() { return &thrd_func_inst_lists; }
+ SnapVector<uint32_t> * get_thrd_last_entered_func() { return &thrd_last_entered_func; }
+ SnapVector< SnapList<action_list_t *> *> * get_thrd_func_act_lists() { return &thrd_func_act_lists; }
bool isFinished() {return isfinished;}
void setFinished() {isfinished = true;}
+ void restore_last_seq_num();
+
SNAPSHOTALLOC
private:
int get_execution_number() const;
bool next_execution();
bool initialize_curr_action(ModelAction **curr);
- void process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set);
+ bool process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set);
void process_write(ModelAction *curr);
bool process_fence(ModelAction *curr);
bool process_mutex(ModelAction *curr);
void read_from(ModelAction *act, ModelAction *rf);
bool synchronize(const ModelAction *first, ModelAction *second);
+ void add_uninit_action_to_lists(ModelAction *act);
void add_action_to_lists(ModelAction *act);
void add_normal_write_to_lists(ModelAction *act);
void add_write_to_lists(ModelAction *act);
SnapVector<ModelAction *> * build_may_read_from(ModelAction *curr);
ModelAction * process_rmw(ModelAction *curr);
- bool r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> *priorset, bool *canprune);
+ bool r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> *priorset, bool *canprune, bool check_only = false);
void w_modification_order(ModelAction *curr);
ClockVector * get_hb_from_write(ModelAction *rf) const;
ModelAction * get_uninitialized_action(ModelAction *curr) const;
/** Per-object list of actions. Maps an object (i.e., memory location)
* to a trace of all actions performed on the object. */
- HashTable<const void *, action_list_t *, uintptr_t, 4> obj_map;
+ HashTable<const void *, action_list_t *, uintptr_t, 2> obj_map;
/** Per-object list of actions. Maps an object (i.e., memory location)
* to a trace of all actions performed on the object. */
- HashTable<const void *, action_list_t *, uintptr_t, 4> condvar_waiters_map;
+ HashTable<const void *, action_list_t *, uintptr_t, 2> condvar_waiters_map;
- HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 4> obj_thrd_map;
+ HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> obj_thrd_map;
- HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 4> obj_wr_thrd_map;
+ HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> obj_wr_thrd_map;
HashTable<const void *, ModelAction *, uintptr_t, 4> obj_last_sc_map;
Fuzzer * fuzzer;
Thread * action_select_next_thread(const ModelAction *curr) const;
+ bool paused_by_fuzzer(const ModelAction * act) const;
/* thrd_func_list stores a list of function ids for each thread.
* Each element in thrd_func_list stores the functions that
* This data structure is handled by ModelHistory
*/
SnapVector<func_id_list_t> thrd_func_list;
+ SnapVector<uint32_t> thrd_last_entered_func;
/* Keeps track of atomic actions that thread i has performed in some
* function. Index of SnapVector is thread id. SnapList simulates
* the call stack.
- *
- * This data structure is handled by ModelHistory
*/
- SnapVector< SnapList< func_inst_list_t *> *> thrd_func_inst_lists;
+ SnapVector< SnapList<action_list_t *> *> thrd_func_act_lists;
bool isfinished;
};
#include "funcinst.h"
+#include "model.h"
FuncInst::FuncInst(ModelAction *act, FuncNode *func_node) :
- collisions()
+ single_location(true),
+ execution_number(0),
+ action_marker(0) /* The marker for FuncNode starts from 1 */
{
ASSERT(act);
ASSERT(func_node);
this->position = act->get_position();
this->location = act->get_location();
this->type = act->get_type();
+ this->order = act->get_mo();
this->func_node = func_node;
}
return true;
}
+void FuncInst::set_associated_act(ModelAction * act, uint32_t marker)
+{
+ associated_act = act;
+ action_marker = marker;
+}
+
+ModelAction * FuncInst::get_associated_act(uint32_t marker)
+{
+ if (action_marker == marker)
+ return associated_act;
+ else
+ return NULL;
+}
+
+/* Search the FuncInst that has the same type as act in the collision list */
FuncInst * FuncInst::search_in_collision(ModelAction *act)
{
action_type type = act->get_type();
- mllnode<FuncInst*>* it;
- for (it = collisions.begin();it != NULL;it=it->getNext()) {
+ mllnode<FuncInst*> * it;
+ for (it = collisions.begin(); it != NULL; it = it->getNext()) {
FuncInst * inst = it->getVal();
- if ( inst->get_type() == type )
+ if (inst->get_type() == type)
return inst;
}
return NULL;
}
+void FuncInst::add_to_collision(FuncInst * inst)
+{
+ collisions.push_back(inst);
+}
+
+/* Note: is_read() is equivalent to ModelAction::is_read() */
bool FuncInst::is_read() const
{
- return type == ATOMIC_READ || type == ATOMIC_RMWR || type == ATOMIC_RMWRCAS; /* type == ATOMIC_RMW ? */
+ return type == ATOMIC_READ || type == ATOMIC_RMWR || type == ATOMIC_RMWRCAS || type == ATOMIC_RMW;
}
+/* Note: because of action type conversion in ModelExecution
+ * is_write() <==> pure writes (excluding rmw) */
bool FuncInst::is_write() const
{
return type == ATOMIC_WRITE || type == ATOMIC_RMW || type == ATOMIC_INIT || type == ATOMIC_UNINIT || type == NONATOMIC_WRITE;
}
+void FuncInst::print()
+{
+ model_print("func inst - pos: %s, loc: %p, type: %d,\n", position, location, type);
+}
FuncInst(ModelAction *act, FuncNode *func_node);
~FuncInst();
- //ModelAction * get_action() const { return action; }
const char * get_position() const { return position; }
+
void * get_location() const { return location; }
+ void set_location(void * loc) { location = loc; }
+
action_type get_type() const { return type; }
+ memory_order get_mo() const { return order; }
FuncNode * get_func_node() const { return func_node; }
bool add_pred(FuncInst * other);
bool add_succ(FuncInst * other);
FuncInst * search_in_collision(ModelAction *act);
+ void add_to_collision(FuncInst * inst);
- func_inst_list_mt * get_collisions() { return &collisions; }
func_inst_list_mt * get_preds() { return &predecessors; }
func_inst_list_mt * get_succs() { return &successors; }
bool is_read() const;
bool is_write() const;
+ bool is_single_location() { return single_location; }
+ void not_single_location() { single_location = false; }
+
+ void set_execution_number(int new_number) { execution_number = new_number; }
+ int get_execution_number() { return execution_number; }
+
+ void set_associated_act(ModelAction * act, uint32_t marker);
+ ModelAction * get_associated_act(uint32_t marker);
+
+ void print();
MEMALLOC
private:
- //ModelAction * const action;
const char * position;
+
+ /* Atomic operations with the same source line number may act at different
+ * memory locations, such as the next field of the head pointer in ms-queue.
+ * location only stores the memory location when this FuncInst was constructed.
+ */
void * location;
+
+ /* NOTE: for rmw actions, func_inst and act may have different
+ * action types because of action type conversion in ModelExecution */
action_type type;
+
+ memory_order order;
FuncNode * func_node;
- /* collisions store a list of FuncInsts with the same position
- * but different action types. For example, CAS is broken down
- * as three different atomic operations in cmodelint.cc */
+ bool single_location;
+ int execution_number;
+
+ ModelAction * associated_act;
+ uint32_t action_marker;
+
+ /**
+ * Collisions store a list of FuncInsts with the same position
+ * but different action types. For example,
+ * <code>volatile int x; x++;</code> produces read and write
+ * actions with the same position.
+ */
func_inst_list_mt collisions;
func_inst_list_mt predecessors;
};
#endif /* __FUNCINST_H__ */
-
+#include "action.h"
+#include "history.h"
#include "funcnode.h"
+#include "funcinst.h"
#include "predicate.h"
+#include "concretepredicate.h"
-FuncNode::FuncNode() :
+#include "model.h"
+
+FuncNode::FuncNode(ModelHistory * history) :
+ history(history),
+ exit_count(0),
+ marker(1),
func_inst_map(),
inst_list(),
entry_insts(),
- thrd_read_map(),
- read_locations()
-{}
+ predicate_tree_position(),
+ edge_table(32),
+ out_edges()
+{
+ predicate_tree_entry = new Predicate(NULL, true);
+ predicate_tree_entry->add_predicate_expr(NOPREDICATE, NULL, true);
+ predicate_tree_exit = new Predicate(NULL, false, true);
+
+ /* Snapshot data structures below */
+ action_list_buffer = new SnapList<action_list_t *>();
+ read_locations = new loc_set_t();
+ write_locations = new loc_set_t();
+ val_loc_map = new HashTable<uint64_t, loc_set_t *, uint64_t, 0, snapshot_malloc, snapshot_calloc, snapshot_free, int64_hash>();
+ loc_may_equal_map = new HashTable<void *, loc_set_t *, uintptr_t, 0>();
+
+ //values_may_read_from = new value_set_t();
+}
+
+/* Reallocate snapshotted memories when new executions start */
+void FuncNode::set_new_exec_flag()
+{
+ action_list_buffer = new SnapList<action_list_t *>();
+ read_locations = new loc_set_t();
+ write_locations = new loc_set_t();
+ val_loc_map = new HashTable<uint64_t, loc_set_t *, uint64_t, 0, snapshot_malloc, snapshot_calloc, snapshot_free, int64_hash>();
+ loc_may_equal_map = new HashTable<void *, loc_set_t *, uintptr_t, 0>();
+
+ //values_may_read_from = new value_set_t();
+}
/* Check whether FuncInst with the same type, position, and location
- * as act has been added to func_inst_map or not. If so, return it;
- * if not, add it and return it.
- *
- * @return FuncInst with the same type, position, and location as act */
-FuncInst * FuncNode::get_or_add_action(ModelAction *act)
+ * as act has been added to func_inst_map or not. If not, add it.
+ */
+void FuncNode::add_inst(ModelAction *act)
{
ASSERT(act);
const char * position = act->get_position();
- /* Actions THREAD_CREATE, THREAD_START, THREAD_YIELD, THREAD_JOIN,
- * THREAD_FINISH, PTHREAD_CREATE, PTHREAD_JOIN,
- * ATOMIC_LOCK, ATOMIC_TRYLOCK, and ATOMIC_UNLOCK are not tagged with their
- * source line numbers
+ /* THREAD* actions, ATOMIC_LOCK, ATOMIC_TRYLOCK, and ATOMIC_UNLOCK
+ * actions are not tagged with their source line numbers
*/
if (position == NULL)
- return NULL;
-
- if ( func_inst_map.contains(position) ) {
- FuncInst * inst = func_inst_map.get(position);
+ return;
- if (inst->get_type() != act->get_type() ) {
- // model_print("action with a different type occurs at line number %s\n", position);
- FuncInst * func_inst = inst->search_in_collision(act);
+ FuncInst * func_inst = func_inst_map.get(position);
- if (func_inst != NULL) {
- // return the FuncInst found in the collision list
- return func_inst;
- }
+ /* This position has not been inserted into hashtable before */
+ if (func_inst == NULL) {
+ func_inst = create_new_inst(act);
+ func_inst_map.put(position, func_inst);
+ return;
+ }
- func_inst = new FuncInst(act, this);
- inst->get_collisions()->push_back(func_inst);
- inst_list.push_back(func_inst); // delete?
+ /* Volatile variables that use ++ or -- syntax may result in read and write actions with the same position */
+ if (func_inst->get_type() != act->get_type()) {
+ FuncInst * collision_inst = func_inst->search_in_collision(act);
- return func_inst;
+ if (collision_inst == NULL) {
+ collision_inst = create_new_inst(act);
+ func_inst->add_to_collision(collision_inst);
+ return;
+ } else {
+ func_inst = collision_inst;
}
+ }
- return inst;
+ ASSERT(func_inst->get_type() == act->get_type());
+ int curr_execution_number = model->get_execution_number();
+
+ /* Reset locations when new executions start */
+ if (func_inst->get_execution_number() != curr_execution_number) {
+ func_inst->set_location(act->get_location());
+ func_inst->set_execution_number(curr_execution_number);
}
+ /* Mark the memory location of such inst as not unique */
+ if (func_inst->get_location() != act->get_location())
+ func_inst->not_single_location();
+}
+
+FuncInst * FuncNode::create_new_inst(ModelAction * act)
+{
FuncInst * func_inst = new FuncInst(act, this);
+ int exec_num = model->get_execution_number();
+ func_inst->set_execution_number(exec_num);
- func_inst_map.put(position, func_inst);
inst_list.push_back(func_inst);
return func_inst;
}
+
+/* Get the FuncInst with the same type, position, and location
+ * as act
+ *
+ * @return FuncInst with the same type, position, and location as act */
+FuncInst * FuncNode::get_inst(ModelAction *act)
+{
+ ASSERT(act);
+ const char * position = act->get_position();
+
+ /* THREAD* actions, ATOMIC_LOCK, ATOMIC_TRYLOCK, and ATOMIC_UNLOCK
+ * actions are not tagged with their source line numbers
+ */
+ if (position == NULL)
+ return NULL;
+
+ FuncInst * inst = func_inst_map.get(position);
+ if (inst == NULL)
+ return NULL;
+
+ action_type inst_type = inst->get_type();
+ action_type act_type = act->get_type();
+
+ if (inst_type == act_type) {
+ return inst;
+ }
+ /* RMWRCAS actions are converted to RMW or READ actions */
+ else if (inst_type == ATOMIC_RMWRCAS &&
+ (act_type == ATOMIC_RMW || act_type == ATOMIC_READ)) {
+ return inst;
+ }
+ /* Return the FuncInst in the collision list */
+ else {
+ return inst->search_in_collision(act);
+ }
+}
+
+
void FuncNode::add_entry_inst(FuncInst * inst)
{
if (inst == NULL)
return;
- mllnode<FuncInst*>* it;
- for (it = entry_insts.begin();it != NULL;it=it->getNext()) {
+ mllnode<FuncInst *> * it;
+ for (it = entry_insts.begin(); it != NULL; it = it->getNext()) {
if (inst == it->getVal())
return;
}
entry_insts.push_back(inst);
}
-/* @param inst_list a list of FuncInsts; this argument comes from ModelExecution
- * Link FuncInsts in a list - add one FuncInst to another's predecessors and successors
+/**
+ * @brief Convert ModelAdtion list to FuncInst list
+ * @param act_list A list of ModelActions
*/
-void FuncNode::link_insts(func_inst_list_t * inst_list)
+void FuncNode::update_tree(action_list_t * act_list)
+{
+ if (act_list == NULL || act_list->size() == 0)
+ return;
+
+ HashTable<void *, value_set_t *, uintptr_t, 0> * write_history = history->getWriteHistory();
+
+ /* build inst_list from act_list for later processing */
+ func_inst_list_t inst_list;
+ action_list_t rw_act_list;
+
+ for (sllnode<ModelAction *> * it = act_list->begin(); it != NULL; it = it->getNext()) {
+ ModelAction * act = it->getVal();
+ FuncInst * func_inst = get_inst(act);
+ void * loc = act->get_location();
+
+ if (func_inst == NULL)
+ continue;
+
+ inst_list.push_back(func_inst);
+ bool act_added = false;
+
+ if (act->is_write()) {
+ rw_act_list.push_back(act);
+ act_added = true;
+ if (!write_locations->contains(loc)) {
+ write_locations->add(loc);
+ history->update_loc_wr_func_nodes_map(loc, this);
+ }
+ }
+
+ if (act->is_read()) {
+ if (!act_added)
+ rw_act_list.push_back(act);
+
+ /* If func_inst may only read_from a single location, then:
+ *
+ * The first time an action reads from some location,
+ * import all the values that have been written to this
+ * location from ModelHistory and notify ModelHistory
+ * that this FuncNode may read from this location.
+ */
+ if (!read_locations->contains(loc) && func_inst->is_single_location()) {
+ read_locations->add(loc);
+ value_set_t * write_values = write_history->get(loc);
+ add_to_val_loc_map(write_values, loc);
+ history->update_loc_rd_func_nodes_map(loc, this);
+ }
+ }
+ }
+
+// model_print("function %s\n", func_name);
+// print_val_loc_map();
+
+ update_inst_tree(&inst_list);
+ update_predicate_tree(&rw_act_list);
+
+// print_predicate_tree();
+}
+
+/**
+ * @brief Link FuncInsts in inst_list - add one FuncInst to another's predecessors and successors
+ * @param inst_list A list of FuncInsts
+ */
+void FuncNode::update_inst_tree(func_inst_list_t * inst_list)
{
if (inst_list == NULL)
return;
+ else if (inst_list->size() == 0)
+ return;
+ /* start linking */
sllnode<FuncInst *>* it = inst_list->begin();
sllnode<FuncInst *>* prev;
- if (inst_list->size() == 0)
- return;
-
/* add the first instruction to the list of entry insts */
FuncInst * entry_inst = it->getVal();
add_entry_inst(entry_inst);
- it=it->getNext();
+ it = it->getNext();
while (it != NULL) {
- prev = it;
prev = it->getPrev();
FuncInst * prev_inst = prev->getVal();
prev_inst->add_succ(curr_inst);
curr_inst->add_pred(prev_inst);
- it=it->getNext();
+ it = it->getNext();
}
}
-/* @param tid thread id
- * Store the values read by atomic read actions into thrd_read_map */
-void FuncNode::store_read(ModelAction * act, uint32_t tid)
+void FuncNode::update_predicate_tree(action_list_t * act_list)
{
- ASSERT(act);
+ if (act_list == NULL || act_list->size() == 0)
+ return;
+
+ incr_marker();
+
+ /* Map a FuncInst to the its predicate */
+ HashTable<FuncInst *, Predicate *, uintptr_t, 0> inst_pred_map(128);
- void * location = act->get_location();
- uint64_t read_from_val = act->get_reads_from_value();
+ // Number FuncInsts to detect loops
+ HashTable<FuncInst *, uint32_t, uintptr_t, 0> inst_id_map(128);
+ uint32_t inst_counter = 0;
- /* resize and initialize */
- uint32_t old_size = thrd_read_map.size();
- if (old_size <= tid) {
- thrd_read_map.resize(tid + 1);
- for (uint32_t i = old_size;i < tid + 1;i++)
- thrd_read_map[i] = new read_map_t();
+ /* Only need to store the locations of read actions */
+ HashTable<void *, ModelAction *, uintptr_t, 0> loc_act_map(128);
+
+ sllnode<ModelAction *> *it = act_list->begin();
+ Predicate * curr_pred = predicate_tree_entry;
+ while (it != NULL) {
+ ModelAction * next_act = it->getVal();
+ FuncInst * next_inst = get_inst(next_act);
+ next_inst->set_associated_act(next_act, marker);
+
+ SnapVector<Predicate *> unset_predicates = SnapVector<Predicate *>();
+ bool branch_found = follow_branch(&curr_pred, next_inst, next_act, &unset_predicates);
+
+ // A branch with unset predicate expression is detected
+ if (!branch_found && unset_predicates.size() != 0) {
+ ASSERT(unset_predicates.size() == 1);
+ Predicate * one_branch = unset_predicates[0];
+
+ bool amended = amend_predicate_expr(&curr_pred, next_inst, next_act);
+ if (amended)
+ continue;
+ else {
+ curr_pred = one_branch;
+ branch_found = true;
+ }
+ }
+
+ // Detect loops
+ if (!branch_found && inst_id_map.contains(next_inst)) {
+ FuncInst * curr_inst = curr_pred->get_func_inst();
+ uint32_t curr_id = inst_id_map.get(curr_inst);
+ uint32_t next_id = inst_id_map.get(next_inst);
+
+ if (curr_id >= next_id) {
+ Predicate * old_pred = inst_pred_map.get(next_inst);
+ Predicate * back_pred = old_pred->get_parent();
+
+ curr_pred->add_backedge(back_pred);
+ curr_pred = back_pred;
+ continue;
+ }
+ }
+
+ // Generate new branches
+ if (!branch_found) {
+ SnapVector<struct half_pred_expr *> half_pred_expressions;
+ infer_predicates(next_inst, next_act, &loc_act_map, &half_pred_expressions);
+ generate_predicates(&curr_pred, next_inst, &half_pred_expressions);
+ continue;
+ }
+
+ if (next_act->is_write())
+ curr_pred->set_write(true);
+
+ if (next_act->is_read()) {
+ loc_act_map.put(next_act->get_location(), next_act);
+ }
+
+ inst_pred_map.put(next_inst, curr_pred);
+ if (!inst_id_map.contains(next_inst))
+ inst_id_map.put(next_inst, inst_counter++);
+
+ it = it->getNext();
+ curr_pred->incr_expl_count();
}
- read_map_t * read_map = thrd_read_map[tid];
- read_map->put(location, read_from_val);
+ curr_pred->set_exit(predicate_tree_exit);
+}
+
+/* Given curr_pred and next_inst, find the branch following curr_pred that
+ * contains next_inst and the correct predicate.
+ * @return true if branch found, false otherwise.
+ */
+bool FuncNode::follow_branch(Predicate ** curr_pred, FuncInst * next_inst,
+ ModelAction * next_act, SnapVector<Predicate *> * unset_predicates)
+{
+ /* Check if a branch with func_inst and corresponding predicate exists */
+ bool branch_found = false;
+ ModelVector<Predicate *> * branches = (*curr_pred)->get_children();
+ for (uint i = 0; i < branches->size(); i++) {
+ Predicate * branch = (*branches)[i];
+ if (branch->get_func_inst() != next_inst)
+ continue;
+
+ /* Check against predicate expressions */
+ bool predicate_correct = true;
+ PredExprSet * pred_expressions = branch->get_pred_expressions();
+ PredExprSetIter * pred_expr_it = pred_expressions->iterator();
+
+ /* Only read and rmw actions my have unset predicate expressions */
+ if (pred_expressions->getSize() == 0) {
+ predicate_correct = false;
+ unset_predicates->push_back(branch);
+ }
+
+ while (pred_expr_it->hasNext()) {
+ pred_expr * pred_expression = pred_expr_it->next();
+ uint64_t last_read, next_read;
+ bool equality;
+
+ switch(pred_expression->token) {
+ case NOPREDICATE:
+ predicate_correct = true;
+ break;
+ case EQUALITY:
+ FuncInst * to_be_compared;
+ ModelAction * last_act;
+
+ to_be_compared = pred_expression->func_inst;
+ last_act = to_be_compared->get_associated_act(marker);
+
+ last_read = last_act->get_reads_from_value();
+ next_read = next_act->get_reads_from_value();
+ equality = (last_read == next_read);
+ if (equality != pred_expression->value)
+ predicate_correct = false;
+
+ break;
+ case NULLITY:
+ next_read = next_act->get_reads_from_value();
+ equality = ((void*)next_read == NULL);
+ if (equality != pred_expression->value)
+ predicate_correct = false;
+ break;
+ default:
+ predicate_correct = false;
+ model_print("unkown predicate token\n");
+ break;
+ }
+ }
- /* Store the memory locations where atomic reads happen */
- bool push_loc = true;
- mllnode<void *> * it;
- for (it = read_locations.begin();it != NULL;it=it->getNext()) {
- if (location == it->getVal()) {
- push_loc = false;
+ if (predicate_correct) {
+ *curr_pred = branch;
+ branch_found = true;
break;
}
}
- if (push_loc)
- read_locations.push_back(location);
+ return branch_found;
}
-uint64_t FuncNode::query_last_read(void * location, uint32_t tid)
+/* Infer predicate expressions, which are generated in FuncNode::generate_predicates */
+void FuncNode::infer_predicates(FuncInst * next_inst, ModelAction * next_act,
+ HashTable<void *, ModelAction *, uintptr_t, 0> * loc_act_map,
+ SnapVector<struct half_pred_expr *> * half_pred_expressions)
{
- if (thrd_read_map.size() <= tid)
- return 0xdeadbeef;
+ void * loc = next_act->get_location();
+
+ if (next_inst->is_read()) {
+ /* read + rmw */
+ if ( loc_act_map->contains(loc) ) {
+ ModelAction * last_act = loc_act_map->get(loc);
+ FuncInst * last_inst = get_inst(last_act);
+ struct half_pred_expr * expression = new half_pred_expr(EQUALITY, last_inst);
+ half_pred_expressions->push_back(expression);
+ } else if ( next_inst->is_single_location() ){
+ loc_set_t * loc_may_equal = loc_may_equal_map->get(loc);
+
+ if (loc_may_equal != NULL) {
+ loc_set_iter * loc_it = loc_may_equal->iterator();
+ while (loc_it->hasNext()) {
+ void * neighbor = loc_it->next();
+ if (loc_act_map->contains(neighbor)) {
+ ModelAction * last_act = loc_act_map->get(neighbor);
+ FuncInst * last_inst = get_inst(last_act);
+
+ struct half_pred_expr * expression = new half_pred_expr(EQUALITY, last_inst);
+ half_pred_expressions->push_back(expression);
+ }
+ }
+ }
+ } else {
+ // next_inst is not single location
+ uint64_t read_val = next_act->get_reads_from_value();
+
+ // only infer NULLITY predicate when it is actually NULL.
+ if ( (void*)read_val == NULL) {
+ struct half_pred_expr * expression = new half_pred_expr(NULLITY, NULL);
+ half_pred_expressions->push_back(expression);
+ }
+ }
+ } else {
+ /* Pure writes */
+ // TODO: do anything here?
+ }
+}
+
+/* Able to generate complex predicates when there are multiple predciate expressions */
+void FuncNode::generate_predicates(Predicate ** curr_pred, FuncInst * next_inst,
+ SnapVector<struct half_pred_expr *> * half_pred_expressions)
+{
+ if (half_pred_expressions->size() == 0) {
+ Predicate * new_pred = new Predicate(next_inst);
+ (*curr_pred)->add_child(new_pred);
+ new_pred->set_parent(*curr_pred);
+
+ /* entry predicates and predicates containing pure write actions
+ * have no predicate expressions */
+ if ( (*curr_pred)->is_entry_predicate() )
+ new_pred->add_predicate_expr(NOPREDICATE, NULL, true);
+ else if (next_inst->is_write()) {
+ /* next_inst->is_write() <==> pure writes */
+ new_pred->add_predicate_expr(NOPREDICATE, NULL, true);
+ }
+
+ return;
+ }
+
+ SnapVector<Predicate *> predicates;
- read_map_t * read_map = thrd_read_map[tid];
+ struct half_pred_expr * half_expr = (*half_pred_expressions)[0];
+ predicates.push_back(new Predicate(next_inst));
+ predicates.push_back(new Predicate(next_inst));
- /* last read value not found */
- if ( !read_map->contains(location) )
- return 0xdeadbeef;
+ predicates[0]->add_predicate_expr(half_expr->token, half_expr->func_inst, true);
+ predicates[1]->add_predicate_expr(half_expr->token, half_expr->func_inst, false);
- uint64_t read_val = read_map->get(location);
- return read_val;
+ for (uint i = 1; i < half_pred_expressions->size(); i++) {
+ half_expr = (*half_pred_expressions)[i];
+
+ uint old_size = predicates.size();
+ for (uint j = 0; j < old_size; j++) {
+ Predicate * pred = predicates[j];
+ Predicate * new_pred = new Predicate(next_inst);
+ new_pred->copy_predicate_expr(pred);
+
+ pred->add_predicate_expr(half_expr->token, half_expr->func_inst, true);
+ new_pred->add_predicate_expr(half_expr->token, half_expr->func_inst, false);
+
+ predicates.push_back(new_pred);
+ }
+ }
+
+ for (uint i = 0; i < predicates.size(); i++) {
+ Predicate * pred= predicates[i];
+ (*curr_pred)->add_child(pred);
+ pred->set_parent(*curr_pred);
+ }
+
+ /* Free memories allocated by infer_predicate */
+ for (uint i = 0; i < half_pred_expressions->size(); i++) {
+ struct half_pred_expr * tmp = (*half_pred_expressions)[i];
+ snapshot_free(tmp);
+ }
}
-/* @param tid thread id
- * Reset read map for a thread. This function shall only be called
- * when a thread exits a function
- */
-void FuncNode::clear_read_map(uint32_t tid)
+/* Amend predicates that contain no predicate expressions. Currenlty only amend with NULLITY predicates */
+bool FuncNode::amend_predicate_expr(Predicate ** curr_pred, FuncInst * next_inst, ModelAction * next_act)
+{
+ // there should only be only child
+ Predicate * unset_pred = (*curr_pred)->get_children()->back();
+ uint64_t read_val = next_act->get_reads_from_value();
+
+ // only generate NULLITY predicate when it is actually NULL.
+ if ( !next_inst->is_single_location() && (void*)read_val == NULL ) {
+ Predicate * new_pred = new Predicate(next_inst);
+
+ (*curr_pred)->add_child(new_pred);
+ new_pred->set_parent(*curr_pred);
+
+ unset_pred->add_predicate_expr(NULLITY, NULL, false);
+ new_pred->add_predicate_expr(NULLITY, NULL, true);
+
+ return true;
+ }
+
+ return false;
+}
+
+void FuncNode::add_to_val_loc_map(uint64_t val, void * loc)
+{
+ loc_set_t * locations = val_loc_map->get(val);
+
+ if (locations == NULL) {
+ locations = new loc_set_t();
+ val_loc_map->put(val, locations);
+ }
+
+ update_loc_may_equal_map(loc, locations);
+ locations->add(loc);
+ // values_may_read_from->add(val);
+}
+
+void FuncNode::add_to_val_loc_map(value_set_t * values, void * loc)
+{
+ if (values == NULL)
+ return;
+
+ value_set_iter * it = values->iterator();
+ while (it->hasNext()) {
+ uint64_t val = it->next();
+ add_to_val_loc_map(val, loc);
+ }
+}
+
+void FuncNode::update_loc_may_equal_map(void * new_loc, loc_set_t * old_locations)
{
- if (thrd_read_map.size() <= tid)
+ if ( old_locations->contains(new_loc) )
return;
- thrd_read_map[tid]->reset();
+ loc_set_t * neighbors = loc_may_equal_map->get(new_loc);
+
+ if (neighbors == NULL) {
+ neighbors = new loc_set_t();
+ loc_may_equal_map->put(new_loc, neighbors);
+ }
+
+ loc_set_iter * loc_it = old_locations->iterator();
+ while (loc_it->hasNext()) {
+ // new_loc: { old_locations, ... }
+ void * member = loc_it->next();
+ neighbors->add(member);
+
+ // for each i in old_locations, i : { new_loc, ... }
+ loc_set_t * _neighbors = loc_may_equal_map->get(member);
+ if (_neighbors == NULL) {
+ _neighbors = new loc_set_t();
+ loc_may_equal_map->put(member, _neighbors);
+ }
+ _neighbors->add(new_loc);
+ }
+}
+
+/* Every time a thread enters a function, set its position to the predicate tree entry */
+void FuncNode::init_predicate_tree_position(thread_id_t tid)
+{
+ int thread_id = id_to_int(tid);
+ if (predicate_tree_position.size() <= (uint) thread_id)
+ predicate_tree_position.resize(thread_id + 1);
+
+ predicate_tree_position[thread_id] = predicate_tree_entry;
+}
+
+void FuncNode::set_predicate_tree_position(thread_id_t tid, Predicate * pred)
+{
+ int thread_id = id_to_int(tid);
+ predicate_tree_position[thread_id] = pred;
+}
+
+/* @return The position of a thread in a predicate tree */
+Predicate * FuncNode::get_predicate_tree_position(thread_id_t tid)
+{
+ int thread_id = id_to_int(tid);
+ return predicate_tree_position[thread_id];
+}
+
+/* Make sure elements of thrd_inst_act_map are initialized properly when threads enter functions */
+void FuncNode::init_inst_act_map(thread_id_t tid)
+{
+ int thread_id = id_to_int(tid);
+ SnapVector<inst_act_map_t *> * thrd_inst_act_map = history->getThrdInstActMap(func_id);
+ uint old_size = thrd_inst_act_map->size();
+
+ if (thrd_inst_act_map->size() <= (uint) thread_id) {
+ uint new_size = thread_id + 1;
+ thrd_inst_act_map->resize(new_size);
+
+ for (uint i = old_size; i < new_size; i++)
+ (*thrd_inst_act_map)[i] = new inst_act_map_t(128);
+ }
+}
+
+/* Reset elements of thrd_inst_act_map when threads exit functions */
+void FuncNode::reset_inst_act_map(thread_id_t tid)
+{
+ int thread_id = id_to_int(tid);
+ SnapVector<inst_act_map_t *> * thrd_inst_act_map = history->getThrdInstActMap(func_id);
+
+ inst_act_map_t * map = (*thrd_inst_act_map)[thread_id];
+ map->reset();
+}
+
+void FuncNode::update_inst_act_map(thread_id_t tid, ModelAction * read_act)
+{
+ int thread_id = id_to_int(tid);
+ SnapVector<inst_act_map_t *> * thrd_inst_act_map = history->getThrdInstActMap(func_id);
+
+ inst_act_map_t * map = (*thrd_inst_act_map)[thread_id];
+ FuncInst * read_inst = get_inst(read_act);
+ map->put(read_inst, read_act);
}
-void FuncNode::generate_predicate(FuncInst *func_inst)
+inst_act_map_t * FuncNode::get_inst_act_map(thread_id_t tid)
{
+ int thread_id = id_to_int(tid);
+ SnapVector<inst_act_map_t *> * thrd_inst_act_map = history->getThrdInstActMap(func_id);
+ return (*thrd_inst_act_map)[thread_id];
}
-/* @param tid thread id
- * Print the values read by the last read actions for each memory location
+/* Add FuncNodes that this node may follow */
+void FuncNode::add_out_edge(FuncNode * other)
+{
+ if ( !edge_table.contains(other) ) {
+ edge_table.put(other, OUT_EDGE);
+ out_edges.push_back(other);
+ return;
+ }
+
+ edge_type_t edge = edge_table.get(other);
+ if (edge == IN_EDGE) {
+ edge_table.put(other, BI_EDGE);
+ out_edges.push_back(other);
+ }
+}
+
+/* Compute the distance between this FuncNode and the target node.
+ * Return -1 if the target node is unreachable or the actual distance
+ * is greater than max_step.
*/
-void FuncNode::print_last_read(uint32_t tid)
+int FuncNode::compute_distance(FuncNode * target, int max_step)
{
- ASSERT(thrd_read_map.size() > tid);
- read_map_t * read_map = thrd_read_map[tid];
+ if (target == NULL)
+ return -1;
+ else if (target == this)
+ return 0;
- mllnode<void *> * it;
- for (it = read_locations.begin();it != NULL;it=it->getNext()) {
- if ( !read_map->contains(it->getVal()) )
- break;
+ SnapList<FuncNode *> queue;
+ HashTable<FuncNode *, int, uintptr_t, 0> distances(128);
+
+ queue.push_back(this);
+ distances.put(this, 0);
+
+ while (!queue.empty()) {
+ FuncNode * curr = queue.front();
+ queue.pop_front();
+ int dist = distances.get(curr);
+
+ if (max_step <= dist)
+ return -1;
+
+ ModelList<FuncNode *> * outEdges = curr->get_out_edges();
+ mllnode<FuncNode *> * it;
+ for (it = outEdges->begin(); it != NULL; it = it->getNext()) {
+ FuncNode * out_node = it->getVal();
- uint64_t read_val = read_map->get(it->getVal());
- model_print("last read of thread %d at %p: 0x%x\n", tid, it->getVal(), read_val);
+ /* This node has not been visited before */
+ if ( !distances.contains(out_node) ) {
+ if (out_node == target)
+ return dist + 1;
+
+ queue.push_back(out_node);
+ distances.put(out_node, dist + 1);
+ }
+ }
+ }
+
+ /* Target node is unreachable */
+ return -1;
+}
+
+void FuncNode::print_predicate_tree()
+{
+ model_print("digraph function_%s {\n", func_name);
+ predicate_tree_entry->print_pred_subtree();
+ predicate_tree_exit->print_predicate();
+ model_print("}\n"); // end of graph
+}
+
+void FuncNode::print_val_loc_map()
+{
+/*
+ value_set_iter * val_it = values_may_read_from->iterator();
+ while (val_it->hasNext()) {
+ uint64_t value = val_it->next();
+ model_print("val %llx: ", value);
+
+ loc_set_t * locations = val_loc_map->get(value);
+ loc_set_iter * loc_it = locations->iterator();
+ while (loc_it->hasNext()) {
+ void * location = loc_it->next();
+ model_print("%p ", location);
+ }
+ model_print("\n");
}
+*/
}
#ifndef __FUNCNODE_H__
#define __FUNCNODE_H__
-#include "action.h"
-#include "funcinst.h"
-#include "hashtable.h"
+#include "hashset.h"
+#include "hashfunction.h"
+#include "classlist.h"
+#include "threads-model.h"
+
+#define MAX_DIST 10
typedef ModelList<FuncInst *> func_inst_list_mt;
-typedef HashTable<void *, uint64_t, uintptr_t, 4, model_malloc, model_calloc, model_free> read_map_t;
+typedef enum edge_type {
+ IN_EDGE, OUT_EDGE, BI_EDGE
+} edge_type_t;
class FuncNode {
public:
- FuncNode();
+ FuncNode(ModelHistory * history);
~FuncNode();
uint32_t get_func_id() { return func_id; }
const char * get_func_name() { return func_name; }
void set_func_id(uint32_t id) { func_id = id; }
void set_func_name(const char * name) { func_name = name; }
+ void set_new_exec_flag();
- FuncInst * get_or_add_action(ModelAction *act);
+ void add_inst(ModelAction *act);
+ FuncInst * create_new_inst(ModelAction *act);
+ FuncInst * get_inst(ModelAction *act);
HashTable<const char *, FuncInst *, uintptr_t, 4, model_malloc, model_calloc, model_free> * getFuncInstMap() { return &func_inst_map; }
func_inst_list_mt * get_inst_list() { return &inst_list; }
func_inst_list_mt * get_entry_insts() { return &entry_insts; }
void add_entry_inst(FuncInst * inst);
- void link_insts(func_inst_list_t * inst_list);
- void store_read(ModelAction * act, uint32_t tid);
- uint64_t query_last_read(void * location, uint32_t tid);
- void clear_read_map(uint32_t tid);
+ void update_tree(action_list_t * act_list);
+ void update_inst_tree(func_inst_list_t * inst_list);
+ void update_predicate_tree(action_list_t * act_list);
+ bool follow_branch(Predicate ** curr_pred, FuncInst * next_inst, ModelAction * next_act, SnapVector<Predicate *> * unset_predicates);
+
+ void incr_exit_count() { exit_count++; }
+ uint32_t get_exit_count() { return exit_count; }
+
+ SnapList<action_list_t *> * get_action_list_buffer() { return action_list_buffer; }
+
+ void add_to_val_loc_map(uint64_t val, void * loc);
+ void add_to_val_loc_map(value_set_t * values, void * loc);
+ void update_loc_may_equal_map(void * new_loc, loc_set_t * old_locations);
+
+ void init_predicate_tree_position(thread_id_t tid);
+ void set_predicate_tree_position(thread_id_t tid, Predicate * pred);
+ Predicate * get_predicate_tree_position(thread_id_t tid);
- /* TODO: generate EQUALITY or NULLITY predicate based on write_history in history.cc */
- void generate_predicate(FuncInst * func_inst);
+ void init_inst_act_map(thread_id_t tid);
+ void reset_inst_act_map(thread_id_t tid);
+ void update_inst_act_map(thread_id_t tid, ModelAction * read_act);
+ inst_act_map_t * get_inst_act_map(thread_id_t tid);
- void print_last_read(uint32_t tid);
+ void add_out_edge(FuncNode * other);
+ ModelList<FuncNode *> * get_out_edges() { return &out_edges; }
+ int compute_distance(FuncNode * target, int max_step = MAX_DIST);
+
+ void print_predicate_tree();
+ void print_val_loc_map();
+ void print_last_read(thread_id_t tid);
MEMALLOC
private:
uint32_t func_id;
const char * func_name;
+ ModelHistory * history;
+ Predicate * predicate_tree_entry; // A dummy node whose children are the real entries
+ Predicate * predicate_tree_exit; // A dummy node
+
+
+ uint32_t exit_count;
+ uint32_t marker;
+
+ void incr_marker() { marker++; }
/* Use source line number as the key of hashtable, to check if
* atomic operation with this line number has been added or not
*/
HashTable<const char *, FuncInst *, uintptr_t, 4, model_malloc, model_calloc, model_free> func_inst_map;
- /* list of all atomic actions in this function */
+ /* List of all atomic actions in this function */
func_inst_list_mt inst_list;
- /* possible entry atomic actions in this function */
+ /* Possible entry atomic actions in this function */
func_inst_list_mt entry_insts;
- /* Store the values read by atomic read actions per memory location for each thread */
- ModelVector<read_map_t *> thrd_read_map;
- ModelList<void *> read_locations;
+ void infer_predicates(FuncInst * next_inst, ModelAction * next_act, HashTable<void *, ModelAction *, uintptr_t, 0> * loc_act_map, SnapVector<struct half_pred_expr *> * half_pred_expressions);
+ void generate_predicates(Predicate ** curr_pred, FuncInst * next_inst, SnapVector<struct half_pred_expr *> * half_pred_expressions);
+ bool amend_predicate_expr(Predicate ** curr_pred, FuncInst * next_inst, ModelAction * next_act);
+
+ /* Store action_lists when calls to update_tree are deferred */
+ SnapList<action_list_t *> * action_list_buffer;
+
+ /* Set of locations read by this FuncNode */
+ loc_set_t * read_locations;
+
+ /* Set of locations written to by this FuncNode */
+ loc_set_t * write_locations;
+
+ /* Keeps track of locations that have the same values written to */
+ HashTable<uint64_t, loc_set_t *, uint64_t, 0, snapshot_malloc, snapshot_calloc, snapshot_free, int64_hash> * val_loc_map;
+
+ /* Keeps track of locations that may share the same value as key, deduced from val_loc_map */
+ HashTable<void *, loc_set_t *, uintptr_t, 0> * loc_may_equal_map;
+
+ // value_set_t * values_may_read_from;
+
+ /* Run-time position in the predicate tree for each thread */
+ ModelVector<Predicate *> predicate_tree_position;
+
+ /* Store the relation between this FuncNode and other FuncNodes */
+ HashTable<FuncNode *, edge_type_t, uintptr_t, 0, model_malloc, model_calloc, model_free> edge_table;
+
+ /* FuncNodes that follow this node */
+ ModelList<FuncNode *> out_edges;
};
-#endif /* __FUNCNODE_H__ */
+#endif /* __FUNCNODE_H__ */
#include <stdlib.h>
#include "threads-model.h"
#include "model.h"
+#include "action.h"
int Fuzzer::selectWrite(ModelAction *read, SnapVector<ModelAction *> * rf_set) {
int random_index = random() % rf_set->size();
waiters->erase(it);
return thread;
}
+
+bool Fuzzer::shouldSleep(const ModelAction *sleep) {
+ return true;
+}
+
+bool Fuzzer::shouldWake(const ModelAction *sleep) {
+ struct timespec currtime;
+ clock_gettime(CLOCK_MONOTONIC, &currtime);
+ uint64_t lcurrtime = currtime.tv_sec * 1000000000 + currtime.tv_nsec;
+
+ return ((sleep->get_time()+sleep->get_value()) < lcurrtime);
+}
#include "classlist.h"
#include "mymemory.h"
#include "stl-model.h"
+#include "threads-model.h"
class Fuzzer {
public:
Fuzzer() {}
- int selectWrite(ModelAction *read, SnapVector<ModelAction *>* rf_set);
- Thread * selectThread(int * threadlist, int numthreads);
+ virtual int selectWrite(ModelAction *read, SnapVector<ModelAction *>* rf_set);
+ virtual bool has_paused_threads() { return false; }
+ virtual void notify_paused_thread(Thread * thread) = 0;
+ virtual Thread * selectThread(int * threadlist, int numthreads);
+
Thread * selectNotify(action_list_t * waiters);
- MEMALLOC
+ bool shouldSleep(const ModelAction *sleep);
+ bool shouldWake(const ModelAction *sleep);
+ virtual bool shouldWait(const ModelAction *wait) = 0;
+ virtual void register_engine(ModelHistory * history, ModelExecution * execution) = 0;
+ SNAPSHOTALLOC
private:
};
#endif
--- /dev/null
+#include "hashfunction.h"
+
+/**
+ * Hash function for 64-bit integers
+ * https://gist.github.com/badboy/6267743#64-bit-to-32-bit-hash-functions
+ */
+unsigned int int64_hash(uint64_t key) {
+ key = (~key) + (key << 18); // key = (key << 18) - key - 1;
+ key = key ^ (key >> 31);
+ key = key * 21; // key = (key + (key << 2)) + (key << 4);
+ key = key ^ (key >> 11);
+ key = key + (key << 6);
+ key = key ^ (key >> 22);
+ return (unsigned int) key;
+}
--- /dev/null
+#ifndef __HASH_FUNCTION__
+#define __HASH_FUNCTION__
+
+#include <stdint.h>
+
+unsigned int int64_hash(uint64_t key);
+
+#endif
LinkNode<_Key> *next;
};
-template<typename _Key, typename _KeyInt, int _Shift, void *
- (* _malloc)(size_t), void * (* _calloc)(size_t, size_t), void (*_free)(void *), unsigned int (*hash_function
- )(_Key), bool (*equals)(_Key, _Key)>
+template<typename _Key, typename _KeyInt, int _Shift, void * (* _malloc)(size_t), void * (* _calloc)(size_t, size_t), void (*_free)(void *), unsigned int (*hash_function)(_Key), bool (*equals)(_Key, _Key)>
class HashSet;
-template<typename _Key, typename _KeyInt, int _Shift, void * (* _malloc)(size_t), void * (* _calloc)(size_t, size_t), void (*_free)(void *), unsigned int (*hash_function)(_Key) = default_hash_function<_Key, _Shift, _KeyInt>, bool (*equals)(_Key, _Key) = default_equals<_Key> >
+template<typename _Key, typename _KeyInt, int _Shift, void * (* _malloc)(size_t) = snapshot_malloc, void * (* _calloc)(size_t, size_t) = snapshot_calloc, void (*_free)(void *) = snapshot_free, unsigned int (*hash_function)(_Key) = default_hash_function<_Key, _Shift, _KeyInt>, bool (*equals)(_Key, _Key) = default_equals<_Key> >
class HSIterator {
public:
HSIterator(LinkNode<_Key> *_curr, HashSet <_Key, _KeyInt, _Shift, _malloc, _calloc, _free, hash_function, equals> * _set) :
return size;
}
+ bool isEmpty() {
+ return size == 0;
+ }
/**
* @brief Check whether the table contains a value for the given key
#include "history.h"
#include "action.h"
#include "funcnode.h"
+#include "funcinst.h"
#include "common.h"
+#include "concretepredicate.h"
+#include "waitobj.h"
#include "model.h"
#include "execution.h"
-
+#include "newfuzzer.h"
/** @brief Constructor */
ModelHistory::ModelHistory() :
func_counter(1), /* function id starts with 1 */
func_map(),
func_map_rev(),
- func_nodes(),
- write_history()
-{}
+ func_nodes()
+{
+ /* The following are snapshot data structures */
+ write_history = new HashTable<void *, value_set_t *, uintptr_t, 0>();
+ loc_rd_func_nodes_map = new HashTable<void *, SnapVector<FuncNode *> *, uintptr_t, 0>();
+ loc_wr_func_nodes_map = new HashTable<void *, SnapVector<FuncNode *> *, uintptr_t, 0>();
+ loc_waiting_writes_map = new HashTable<void *, SnapVector<ConcretePredicate *> *, uintptr_t, 0>();
+ thrd_waiting_write = new SnapVector<ConcretePredicate *>();
+ thrd_wait_obj = new SnapVector<WaitObj *>();
+ func_inst_act_maps = new HashTable<uint32_t, SnapVector<inst_act_map_t *> *, int, 0>(128);
+}
+
+ModelHistory::~ModelHistory()
+{
+ // TODO: complete deconstructor; maybe not needed
+ for (uint i = 0; i < thrd_wait_obj->size(); i++)
+ delete (*thrd_wait_obj)[i];
+}
void ModelHistory::enter_function(const uint32_t func_id, thread_id_t tid)
{
//model_print("thread %d entering func %d\n", tid, func_id);
- uint32_t id = id_to_int(tid);
- SnapVector<func_id_list_t> * thrd_func_list = model->get_execution()->get_thrd_func_list();
- SnapVector< SnapList<func_inst_list_t *> *> *
- thrd_func_inst_lists = model->get_execution()->get_thrd_func_inst_lists();
+ ModelExecution * execution = model->get_execution();
+ uint id = id_to_int(tid);
+
+ SnapVector<func_id_list_t> * thrd_func_list = execution->get_thrd_func_list();
+ SnapVector< SnapList<action_list_t *> *> *
+ thrd_func_act_lists = execution->get_thrd_func_act_lists();
+ SnapVector<uint32_t> * thrd_last_entered_func = execution->get_thrd_last_entered_func();
if ( thrd_func_list->size() <= id ) {
uint oldsize = thrd_func_list->size();
thrd_func_list->resize( id + 1 );
- for(uint i=oldsize;i<id+1;i++) {
- new(&(*thrd_func_list)[i]) func_id_list_t();
- }
- thrd_func_inst_lists->resize( id + 1 );
- }
+ thrd_func_act_lists->resize( id + 1 );
- SnapList<func_inst_list_t *> * func_inst_lists = thrd_func_inst_lists->at(id);
+ for (uint i = oldsize; i < id + 1; i++) {
+ // push 0 as a dummy function id to a void seg fault
+ new (&(*thrd_func_list)[i]) func_id_list_t();
+ (*thrd_func_list)[i].push_back(0);
- if (func_inst_lists == NULL) {
- func_inst_lists = new SnapList< func_inst_list_t *>();
- thrd_func_inst_lists->at(id) = func_inst_lists;
+ (*thrd_func_act_lists)[i] = new SnapList<action_list_t *>();
+ thrd_last_entered_func->push_back(0);
+ }
}
+ SnapList<action_list_t *> * func_act_lists = (*thrd_func_act_lists)[id];
+ func_act_lists->push_back( new action_list_t() );
+
+ uint32_t last_entered_func_id = (*thrd_last_entered_func)[id];
+ (*thrd_last_entered_func)[id] = func_id;
(*thrd_func_list)[id].push_back(func_id);
- func_inst_lists->push_back( new func_inst_list_t() );
if ( func_nodes.size() <= func_id )
resize_func_nodes( func_id + 1 );
+
+ FuncNode * func_node = func_nodes[func_id];
+ func_node->init_predicate_tree_position(tid);
+ func_node->init_inst_act_map(tid);
+
+ /* Add edges between FuncNodes */
+ if (last_entered_func_id != 0) {
+ FuncNode * last_func_node = func_nodes[last_entered_func_id];
+ last_func_node->add_out_edge(func_node);
+ }
+
+ /* Monitor the statuses of threads waiting for tid */
+ monitor_waiting_thread(func_id, tid);
}
/* @param func_id a non-zero value */
void ModelHistory::exit_function(const uint32_t func_id, thread_id_t tid)
{
+ ModelExecution * execution = model->get_execution();
uint32_t id = id_to_int(tid);
- SnapVector<func_id_list_t> * thrd_func_list = model->get_execution()->get_thrd_func_list();
- SnapVector< SnapList<func_inst_list_t *> *> *
- thrd_func_inst_lists = model->get_execution()->get_thrd_func_inst_lists();
+ SnapVector<func_id_list_t> * thrd_func_list = execution->get_thrd_func_list();
+ SnapVector< SnapList<action_list_t *> *> *
+ thrd_func_act_lists = execution->get_thrd_func_act_lists();
- SnapList<func_inst_list_t *> * func_inst_lists = thrd_func_inst_lists->at(id);
+ SnapList<action_list_t *> * func_act_lists = (*thrd_func_act_lists)[id];
uint32_t last_func_id = (*thrd_func_list)[id].back();
if (last_func_id == func_id) {
FuncNode * func_node = func_nodes[func_id];
- func_node->clear_read_map(tid);
-
- func_inst_list_t * curr_inst_list = func_inst_lists->back();
- func_node->link_insts(curr_inst_list);
+ func_node->set_predicate_tree_position(tid, NULL);
+ func_node->reset_inst_act_map(tid);
+
+ action_list_t * curr_act_list = func_act_lists->back();
+
+ /* defer the processing of curr_act_list until the function has exits a few times
+ * (currently twice) so that more information can be gathered to infer nullity predicates.
+ */
+ func_node->incr_exit_count();
+ if (func_node->get_exit_count() >= 2) {
+ SnapList<action_list_t *> * action_list_buffer = func_node->get_action_list_buffer();
+ while (action_list_buffer->size() > 0) {
+ action_list_t * act_list = action_list_buffer->back();
+ action_list_buffer->pop_back();
+ func_node->update_tree(act_list);
+ }
+
+ func_node->update_tree(curr_act_list);
+ } else
+ func_node->get_action_list_buffer()->push_front(curr_act_list);
(*thrd_func_list)[id].pop_back();
- func_inst_lists->pop_back();
+ func_act_lists->pop_back();
} else {
model_print("trying to exit with a wrong function id\n");
model_print("--- last_func: %d, func_id: %d\n", last_func_id, func_id);
if ( old_size < new_size )
func_nodes.resize(new_size);
- for (uint32_t id = old_size;id < new_size;id++) {
+ for (uint32_t id = old_size; id < new_size; id++) {
const char * func_name = func_map_rev[id];
- FuncNode * func_node = new FuncNode();
+ FuncNode * func_node = new FuncNode(this);
func_node->set_func_id(id);
func_node->set_func_name(func_name);
func_nodes[id] = func_node;
void ModelHistory::process_action(ModelAction *act, thread_id_t tid)
{
- /* return if thread i has not entered any function or has exited
- from all functions */
- SnapVector<func_id_list_t> * thrd_func_list = model->get_execution()->get_thrd_func_list();
- SnapVector< SnapList<func_inst_list_t *> *> *
- thrd_func_inst_lists = model->get_execution()->get_thrd_func_inst_lists();
-
- uint32_t id = id_to_int(tid);
- if ( thrd_func_list->size() <= id )
+ ModelExecution * execution = model->get_execution();
+ SnapVector<func_id_list_t> * thrd_func_list = execution->get_thrd_func_list();
+ SnapVector< SnapList<action_list_t *> *> *
+ thrd_func_act_lists = execution->get_thrd_func_act_lists();
+
+ uint32_t thread_id = id_to_int(tid);
+ /* Return if thread tid has not entered any function that contains atomics */
+ if ( thrd_func_list->size() <= thread_id )
return;
- /* get the function id that thread i is currently in */
- uint32_t func_id = (*thrd_func_list)[id].back();
- SnapList<func_inst_list_t *> * func_inst_lists = thrd_func_inst_lists->at(id);
+ /* Monitor the statuses of threads waiting for tid */
+ monitor_waiting_thread_counter(tid);
+
+ /* Every write action should be processed, including
+ * nonatomic writes (which have no position) */
+ if (act->is_write()) {
+ void * location = act->get_location();
+ uint64_t value = act->get_write_value();
+ update_write_history(location, value);
+
+ /* Notify FuncNodes that may read from this location */
+ SnapVector<FuncNode *> * func_node_list = getRdFuncNodes(location);
+ for (uint i = 0; i < func_node_list->size(); i++) {
+ FuncNode * func_node = (*func_node_list)[i];
+ func_node->add_to_val_loc_map(value, location);
+ }
- if ( func_nodes.size() <= func_id )
- resize_func_nodes( func_id + 1 );
+ check_waiting_write(act);
+ }
- FuncNode * func_node = func_nodes[func_id];
- ASSERT (func_node != NULL);
+ uint32_t func_id = (*thrd_func_list)[thread_id].back();
+
+ /* The following does not care about actions that are not inside
+ * any function that contains atomics or actions without a position */
+ if (func_id == 0 || act->get_position() == NULL)
+ return;
- /* add corresponding FuncInst to func_node */
- FuncInst * inst = func_node->get_or_add_action(act);
+ SnapList<action_list_t *> * func_act_lists = (*thrd_func_act_lists)[thread_id];
- if (inst == NULL)
+ /* The list of actions that thread tid has taken in its current function */
+ action_list_t * curr_act_list = func_act_lists->back();
+
+ if (skip_action(act, curr_act_list))
return;
- // if (inst->is_read())
- // func_node->store_read(act, tid);
+ /* Add to curr_inst_list */
+ curr_act_list->push_back(act);
- if (inst->is_write())
- add_to_write_history(act->get_location(), act->get_write_value());
+ FuncNode * func_node = func_nodes[func_id];
+ func_node->add_inst(act);
- /* add to curr_inst_list */
- func_inst_list_t * curr_inst_list = func_inst_lists->back();
- ASSERT(curr_inst_list != NULL);
- curr_inst_list->push_back(inst);
+ if (act->is_read()) {
+ func_node->update_inst_act_map(tid, act);
+ }
}
-/* return the FuncNode given its func_id */
+/* Return the FuncNode given its func_id */
FuncNode * ModelHistory::get_func_node(uint32_t func_id)
{
- if (func_nodes.size() <= func_id) // this node has not been added
+ if (func_id == 0)
+ return NULL;
+
+ // This node has not been added to func_nodes
+ if (func_nodes.size() <= func_id)
return NULL;
return func_nodes[func_id];
}
-uint64_t ModelHistory::query_last_read(void * location, thread_id_t tid)
+/* Return the current FuncNode when given a thread id */
+FuncNode * ModelHistory::get_curr_func_node(thread_id_t tid)
{
- SnapVector<func_id_list_t> * thrd_func_list = model->get_execution()->get_thrd_func_list();
- uint32_t id = id_to_int(tid);
-
- ASSERT( thrd_func_list->size() > id );
- uint32_t func_id = (*thrd_func_list)[id].back();
- FuncNode * func_node = func_nodes[func_id];
+ int thread_id = id_to_int(tid);
+ SnapVector<func_id_list_t> * thrd_func_list = model->get_execution()->get_thrd_func_list();
+ uint32_t func_id = (*thrd_func_list)[thread_id].back();
- uint64_t last_read_val = 0xdeadbeef;
- if (func_node != NULL) {
- last_read_val = func_node->query_last_read(location, tid);
+ if (func_id != 0) {
+ return func_nodes[func_id];
}
- return last_read_val;
+ return NULL;
}
-void ModelHistory::add_to_write_history(void * location, uint64_t write_val)
+void ModelHistory::update_write_history(void * location, uint64_t write_val)
{
- if ( !write_history.contains(location) )
- write_history.put(location, new write_set_t() );
+ value_set_t * write_set = write_history->get(location);
+
+ if (write_set == NULL) {
+ write_set = new value_set_t();
+ write_history->put(location, write_set);
+ }
- write_set_t * write_set = write_history.get(location);
write_set->add(write_val);
}
-void ModelHistory::print()
+void ModelHistory::update_loc_rd_func_nodes_map(void * location, FuncNode * node)
+{
+ SnapVector<FuncNode *> * func_node_list = getRdFuncNodes(location);
+ func_node_list->push_back(node);
+}
+
+void ModelHistory::update_loc_wr_func_nodes_map(void * location, FuncNode * node)
+{
+ SnapVector<FuncNode *> * func_node_list = getWrFuncNodes(location);
+ func_node_list->push_back(node);
+}
+
+SnapVector<FuncNode *> * ModelHistory::getRdFuncNodes(void * location)
+{
+ SnapVector<FuncNode *> * func_node_list = loc_rd_func_nodes_map->get(location);
+ if (func_node_list == NULL) {
+ func_node_list = new SnapVector<FuncNode *>();
+ loc_rd_func_nodes_map->put(location, func_node_list);
+ }
+
+ return func_node_list;
+}
+
+SnapVector<FuncNode *> * ModelHistory::getWrFuncNodes(void * location)
+{
+ SnapVector<FuncNode *> * func_node_list = loc_wr_func_nodes_map->get(location);
+ if (func_node_list == NULL) {
+ func_node_list = new SnapVector<FuncNode *>();
+ loc_wr_func_nodes_map->put(location, func_node_list);
+ }
+
+ return func_node_list;
+}
+
+/* When a thread is paused by Fuzzer, keep track of the condition it is waiting for */
+void ModelHistory::add_waiting_write(ConcretePredicate * concrete)
+{
+ void * location = concrete->get_location();
+ SnapVector<ConcretePredicate *> * waiting_conditions = loc_waiting_writes_map->get(location);
+ if (waiting_conditions == NULL) {
+ waiting_conditions = new SnapVector<ConcretePredicate *>();
+ loc_waiting_writes_map->put(location, waiting_conditions);
+ }
+
+ /* waiting_conditions should not have duplications */
+ waiting_conditions->push_back(concrete);
+
+ int thread_id = id_to_int(concrete->get_tid());
+ if (thrd_waiting_write->size() <= (uint) thread_id) {
+ thrd_waiting_write->resize(thread_id + 1);
+ }
+
+ (*thrd_waiting_write)[thread_id] = concrete;
+}
+
+void ModelHistory::remove_waiting_write(thread_id_t tid)
+{
+ ConcretePredicate * concrete = (*thrd_waiting_write)[ id_to_int(tid) ];
+ void * location = concrete->get_location();
+ SnapVector<ConcretePredicate *> * concrete_preds = loc_waiting_writes_map->get(location);
+
+ /* Linear search should be fine because presumably not many ConcretePredicates
+ * are at the same memory location */
+ for (uint i = 0; i < concrete_preds->size(); i++) {
+ ConcretePredicate * current = (*concrete_preds)[i];
+ if (concrete == current) {
+ (*concrete_preds)[i] = concrete_preds->back();
+ concrete_preds->pop_back();
+ break;
+ }
+ }
+
+ int thread_id = id_to_int( concrete->get_tid() );
+ (*thrd_waiting_write)[thread_id] = NULL;
+ delete concrete;
+}
+
+/* Check if any other thread is waiting for this write action. If so, "notify" them */
+void ModelHistory::check_waiting_write(ModelAction * write_act)
+{
+ void * location = write_act->get_location();
+ uint64_t value = write_act->get_write_value();
+ SnapVector<ConcretePredicate *> * concrete_preds = loc_waiting_writes_map->get(location);
+ if (concrete_preds == NULL)
+ return;
+
+ uint index = 0;
+ while (index < concrete_preds->size()) {
+ ConcretePredicate * concrete_pred = (*concrete_preds)[index];
+ SnapVector<struct concrete_pred_expr> * concrete_exprs = concrete_pred->getExpressions();
+ bool satisfy_predicate = true;
+ /* Check if the written value satisfies every predicate expression */
+ for (uint i = 0; i < concrete_exprs->size(); i++) {
+ struct concrete_pred_expr concrete = (*concrete_exprs)[i];
+ bool equality = false;
+ switch (concrete.token) {
+ case EQUALITY:
+ equality = (value == concrete.value);
+ break;
+ case NULLITY:
+ equality = ((void*)value == NULL);
+ break;
+ default:
+ model_print("unknown predicate token");
+ break;
+ }
+
+ if (equality != concrete.equality) {
+ satisfy_predicate = false;
+ break;
+ }
+ }
+
+ if (satisfy_predicate) {
+ /* Wake up threads */
+ thread_id_t tid = concrete_pred->get_tid();
+ Thread * thread = model->get_thread(tid);
+
+ //model_print("** thread %d is woken up\n", thread->get_id());
+ model->get_execution()->getFuzzer()->notify_paused_thread(thread);
+ }
+
+ index++;
+ }
+}
+
+WaitObj * ModelHistory::getWaitObj(thread_id_t tid)
+{
+ int thread_id = id_to_int(tid);
+ int old_size = thrd_wait_obj->size();
+ if (old_size <= thread_id) {
+ thrd_wait_obj->resize(thread_id + 1);
+ for (int i = old_size; i < thread_id + 1; i++) {
+ (*thrd_wait_obj)[i] = new WaitObj( int_to_id(i) );
+ }
+ }
+
+ return (*thrd_wait_obj)[thread_id];
+}
+
+void ModelHistory::add_waiting_thread(thread_id_t self_id,
+ thread_id_t waiting_for_id, FuncNode * target_node, int dist)
+{
+ WaitObj * self_wait_obj = getWaitObj(self_id);
+ self_wait_obj->add_waiting_for(waiting_for_id, target_node, dist);
+
+ /* Update waited-by relation */
+ WaitObj * other_wait_obj = getWaitObj(waiting_for_id);
+ other_wait_obj->add_waited_by(self_id);
+}
+
+/* Thread tid is woken up (or notified), so it is not waiting for others anymore */
+void ModelHistory::remove_waiting_thread(thread_id_t tid)
+{
+ WaitObj * self_wait_obj = getWaitObj(tid);
+ thrd_id_set_t * waiting_for = self_wait_obj->getWaitingFor();
+
+ /* Remove tid from waited_by's */
+ thrd_id_set_iter * iter = waiting_for->iterator();
+ while (iter->hasNext()) {
+ thread_id_t other_id = iter->next();
+ WaitObj * other_wait_obj = getWaitObj(other_id);
+ other_wait_obj->remove_waited_by(tid);
+ }
+
+ self_wait_obj->clear_waiting_for();
+}
+
+void ModelHistory::stop_waiting_for_node(thread_id_t self_id,
+ thread_id_t waiting_for_id, FuncNode * target_node)
+{
+ WaitObj * self_wait_obj = getWaitObj(self_id);
+ bool thread_removed = self_wait_obj->remove_waiting_for_node(waiting_for_id, target_node);
+
+ // model_print("\t%d gives up %d on node %d\n", self_id, waiting_for_id, target_node->get_func_id());
+
+ /* If thread self_id is not waiting for waiting_for_id anymore */
+ if (thread_removed) {
+ WaitObj * other_wait_obj = getWaitObj(waiting_for_id);
+ other_wait_obj->remove_waited_by(self_id);
+
+ thrd_id_set_t * self_waiting_for = self_wait_obj->getWaitingFor();
+ if ( self_waiting_for->isEmpty() ) {
+ // model_print("\tthread %d waits for nobody, wake up\n", self_id);
+ ModelExecution * execution = model->get_execution();
+ Thread * thread = execution->get_thread(self_id);
+ execution->getFuzzer()->notify_paused_thread(thread);
+ }
+ }
+}
+
+SnapVector<inst_act_map_t *> * ModelHistory::getThrdInstActMap(uint32_t func_id)
+{
+ ASSERT(func_id != 0);
+
+ SnapVector<inst_act_map_t *> * maps = func_inst_act_maps->get(func_id);
+ if (maps == NULL) {
+ maps = new SnapVector<inst_act_map_t *>();
+ func_inst_act_maps->put(func_id, maps);
+ }
+
+ return maps;
+}
+
+bool ModelHistory::skip_action(ModelAction * act, SnapList<ModelAction *> * curr_act_list)
+{
+ ASSERT(curr_act_list != NULL);
+
+ bool second_part_of_rmw = act->is_rmwc() || act->is_rmw();
+ modelclock_t curr_seq_number = act->get_seq_number();
+
+ /* Skip actions that are second part of a read modify write */
+ if (second_part_of_rmw)
+ return true;
+
+ /* Skip actions with the same sequence number */
+ if (curr_act_list->size() != 0) {
+ ModelAction * last_act = curr_act_list->back();
+ if (last_act->get_seq_number() == curr_seq_number)
+ return true;
+ }
+
+ /* Skip actions that are paused by fuzzer (sequence number is 0) */
+ if (curr_seq_number == 0)
+ return true;
+
+ return false;
+}
+
+/* Monitor thread tid and decide whether other threads (that are waiting for tid)
+ * should keep waiting for this thread or not. Shall only be called when a thread
+ * enters a function.
+ *
+ * Heuristics: If the distance from the current FuncNode to some target node
+ * ever increases, stop waiting for this thread on this target node.
+ */
+void ModelHistory::monitor_waiting_thread(uint32_t func_id, thread_id_t tid)
+{
+ WaitObj * wait_obj = getWaitObj(tid);
+ thrd_id_set_t * waited_by = wait_obj->getWaitedBy();
+ FuncNode * curr_node = func_nodes[func_id];
+
+ /* For each thread waiting for tid */
+ thrd_id_set_iter * tid_iter = waited_by->iterator();
+ while (tid_iter->hasNext()) {
+ thread_id_t waited_by_id = tid_iter->next();
+ WaitObj * other_wait_obj = getWaitObj(waited_by_id);
+
+ node_set_t * target_nodes = other_wait_obj->getTargetNodes(tid);
+ node_set_iter * node_iter = target_nodes->iterator();
+ while (node_iter->hasNext()) {
+ FuncNode * target = node_iter->next();
+ int old_dist = other_wait_obj->lookup_dist(tid, target);
+ int new_dist = curr_node->compute_distance(target, old_dist);
+
+ if (new_dist == -1) {
+ stop_waiting_for_node(waited_by_id, tid, target);
+ }
+ }
+ }
+}
+
+void ModelHistory::monitor_waiting_thread_counter(thread_id_t tid)
+{
+ WaitObj * wait_obj = getWaitObj(tid);
+ thrd_id_set_t * waited_by = wait_obj->getWaitedBy();
+
+ // Thread tid has taken an action, update the counter for threads waiting for tid
+ thrd_id_set_iter * tid_iter = waited_by->iterator();
+ while (tid_iter->hasNext()) {
+ thread_id_t waited_by_id = tid_iter->next();
+ WaitObj * other_wait_obj = getWaitObj(waited_by_id);
+
+ bool expire = other_wait_obj->incr_counter(tid);
+ if (expire) {
+// model_print("thread %d stops waiting for thread %d\n", waited_by_id, tid);
+ wait_obj->remove_waited_by(waited_by_id);
+ other_wait_obj->remove_waiting_for(tid);
+
+ thrd_id_set_t * other_waiting_for = other_wait_obj->getWaitingFor();
+ if ( other_waiting_for->isEmpty() ) {
+ // model_print("\tthread %d waits for nobody, wake up\n", self_id);
+ ModelExecution * execution = model->get_execution();
+ Thread * thread = execution->get_thread(waited_by_id);
+ execution->getFuzzer()->notify_paused_thread(thread);
+ }
+ }
+ }
+}
+
+/* Reallocate some snapshotted memories when new executions start */
+void ModelHistory::set_new_exec_flag()
+{
+ for (uint i = 1; i < func_nodes.size(); i++) {
+ FuncNode * func_node = func_nodes[i];
+ func_node->set_new_exec_flag();
+ }
+}
+
+void ModelHistory::dump_func_node_graph()
+{
+ model_print("digraph func_node_graph {\n");
+ for (uint i = 1; i < func_nodes.size(); i++) {
+ FuncNode * node = func_nodes[i];
+ ModelList<FuncNode *> * out_edges = node->get_out_edges();
+
+ model_print("\"%p\" [label=\"%s\"]\n", node, node->get_func_name());
+ mllnode<FuncNode *> * it;
+ for (it = out_edges->begin(); it != NULL; it = it->getNext()) {
+ FuncNode * other = it->getVal();
+ model_print("\"%p\" -> \"%p\"\n", node, other);
+ }
+ }
+ model_print("}\n");
+}
+
+void ModelHistory::print_func_node()
{
/* function id starts with 1 */
- for (uint32_t i = 1;i < func_nodes.size();i++) {
+ for (uint32_t i = 1; i < func_nodes.size(); i++) {
FuncNode * func_node = func_nodes[i];
+ func_node->print_predicate_tree();
+/*
func_inst_list_mt * entry_insts = func_node->get_entry_insts();
model_print("function %s has entry actions\n", func_node->get_func_name());
FuncInst *inst = it->getVal();
model_print("type: %d, at: %s\n", inst->get_type(), inst->get_position());
}
+ */
+ }
+}
-/*
- func_inst_list_mt * inst_list = funcNode->get_inst_list();
-
- model_print("function %s has following actions\n", funcNode->get_func_name());
- func_inst_list_mt::iterator it;
- for (it = inst_list->begin(); it != inst_list->end(); it++) {
- FuncInst *inst = *it;
- model_print("type: %d, at: %s\n", inst->get_type(), inst->get_position());
- }
- */
+void ModelHistory::print_waiting_threads()
+{
+ ModelExecution * execution = model->get_execution();
+ for (unsigned int i = 0; i < execution->get_num_threads();i++) {
+ thread_id_t tid = int_to_id(i);
+ WaitObj * wait_obj = getWaitObj(tid);
+ wait_obj->print_waiting_for();
+ }
+
+ for (unsigned int i = 0; i < execution->get_num_threads();i++) {
+ thread_id_t tid = int_to_id(i);
+ WaitObj * wait_obj = getWaitObj(tid);
+ wait_obj->print_waited_by();
}
}
#ifndef __HISTORY_H__
#define __HISTORY_H__
-#include "stl-model.h"
#include "common.h"
+#include "classlist.h"
#include "hashtable.h"
-#include "hashset.h"
#include "threads-model.h"
-typedef HashSet<uint64_t, uint64_t, 0, model_malloc, model_calloc, model_free> write_set_t;
-
class ModelHistory {
public:
ModelHistory();
ModelVector<FuncNode *> * getFuncNodes() { return &func_nodes; }
FuncNode * get_func_node(uint32_t func_id);
- uint64_t query_last_read(void * location, thread_id_t tid);
+ FuncNode * get_curr_func_node(thread_id_t tid);
+
+ void update_write_history(void * location, uint64_t write_val);
+ HashTable<void *, value_set_t *, uintptr_t, 0> * getWriteHistory() { return write_history; }
+ void update_loc_rd_func_nodes_map(void * location, FuncNode * node);
+ void update_loc_wr_func_nodes_map(void * location, FuncNode * node);
+ SnapVector<FuncNode *> * getRdFuncNodes(void * location);
+ SnapVector<FuncNode *> * getWrFuncNodes(void * location);
+
+ void add_waiting_write(ConcretePredicate * concrete);
+ void remove_waiting_write(thread_id_t tid);
+ void check_waiting_write(ModelAction * write_act);
+ SnapVector<ConcretePredicate *> * getThrdWaitingWrite() { return thrd_waiting_write; }
- void add_to_write_history(void * location, uint64_t write_val);
+ WaitObj * getWaitObj(thread_id_t tid);
+ void add_waiting_thread(thread_id_t self_id, thread_id_t waiting_for_id, FuncNode * target_node, int dist);
+ void remove_waiting_thread(thread_id_t tid);
+ void stop_waiting_for_node(thread_id_t self_id, thread_id_t waiting_for_id, FuncNode * target_node);
- void print();
+ SnapVector<inst_act_map_t *> * getThrdInstActMap(uint32_t func_id);
+
+ void set_new_exec_flag();
+ void dump_func_node_graph();
+ void print_func_node();
+ void print_waiting_threads();
MEMALLOC
private:
uint32_t func_counter;
- /* map function names to integer ids */
+ /* Map function names to integer ids */
HashTable<const char *, uint32_t, uintptr_t, 4, model_malloc, model_calloc, model_free> func_map;
- /* map integer ids to function names */
+
+ /* Map integer ids to function names */
ModelVector<const char *> func_map_rev;
ModelVector<FuncNode *> func_nodes;
- HashTable<void *, write_set_t *, uintptr_t, 4, model_malloc, model_calloc, model_free> write_history;
+
+ /* Map a location to a set of values that have been written to it */
+ HashTable<void *, value_set_t *, uintptr_t, 0> * write_history;
+
+ /* Map a location to FuncNodes that may read from it */
+ HashTable<void *, SnapVector<FuncNode *> *, uintptr_t, 0> * loc_rd_func_nodes_map;
+
+ /* Map a location to FuncNodes that may write to it */
+ HashTable<void *, SnapVector<FuncNode *> *, uintptr_t, 0> * loc_wr_func_nodes_map;
+
+ HashTable<void *, SnapVector<ConcretePredicate *> *, uintptr_t, 0> * loc_waiting_writes_map;
+ /* The write values each paused thread is waiting for */
+ SnapVector<ConcretePredicate *> * thrd_waiting_write;
+ SnapVector<WaitObj *> * thrd_wait_obj;
+
+ /* A run-time map from FuncInst to ModelAction per thread, per FuncNode.
+ * Manipulated by FuncNode, and needed by NewFuzzer */
+ HashTable<uint32_t, SnapVector<inst_act_map_t *> *, int, 0> * func_inst_act_maps;
+
+ bool skip_action(ModelAction * act, SnapList<ModelAction *> * curr_act_list);
+ void monitor_waiting_thread(uint32_t func_id, thread_id_t tid);
+ void monitor_waiting_thread_counter(thread_id_t tid);
};
#endif /* __HISTORY_H__ */
bool try_lock();
void unlock();
struct mutex_state * get_state() {return &state;}
- void initialize() { state.init = 1; } // WL
- bool is_initialized() { return state.init == 1; }
private:
struct mutex_state state;
int user_main(int, char**);
}
-void check();
#endif
--- /dev/null
+/**
+ * @file predicatetypes.h
+ * @brief Define common predicate expression types
+ */
+
+#ifndef __PREDICATE_TYPES_H__
+#define __PREDICATE_TYPES_H__
+
+typedef enum predicate_token {
+ NOPREDICATE, EQUALITY, NULLITY
+} token_t;
+
+typedef enum predicate_sleep_result {
+ SLEEP_FAIL_TYPE1, SLEEP_FAIL_TYPE2, SLEEP_FAIL_TYPE3,
+ SLEEP_SUCCESS
+} sleep_result_t;
+
+/* If token is EQUALITY, then the predicate asserts whether
+ * this load should read the same value as the last value
+ * read at memory location specified in predicate_expr.
+ */
+struct pred_expr {
+ pred_expr(token_t token, FuncInst * inst, bool value) :
+ token(token),
+ func_inst(inst),
+ value(value)
+ {}
+
+ token_t token;
+ FuncInst * func_inst;
+ bool value;
+
+ MEMALLOC
+};
+
+/* Used by FuncNode to generate Predicates */
+struct half_pred_expr {
+ half_pred_expr(token_t token, FuncInst * inst) :
+ token(token),
+ func_inst(inst)
+ {}
+
+ token_t token;
+ FuncInst * func_inst;
+
+ SNAPSHOTALLOC
+};
+
+struct concrete_pred_expr {
+ concrete_pred_expr(token_t token, uint64_t value, bool equality) :
+ token(token),
+ value(value),
+ equality(equality)
+ {}
+
+ token_t token;
+ uint64_t value;
+ bool equality;
+
+ SNAPSHOTALLOC
+};
+
+#endif /* __PREDICATE_TYPES_H__ */
+
return *((uint64_t *)addr);
}
-// helper functions used by CdsPass
-// The CdsPass implementation does not replace normal load/stores with cds load/stores,
-// but inserts cds load/stores to check dataraces. Thus, the cds load/stores do not
-// return anything.
+/**
+ * Helper functions used by CDSPass
+ * The CDSPass implementation does not replace normal load/stores with cds load/stores,
+ * but inserts cds load/stores to check dataraces. Thus, the cds load/stores do not
+ * return anything.
+ */
void cds_store8(void *addr)
{
params->uninitvalue = 0;
params->maxexecutions = 10;
params->nofork = false;
+ params->threadsnocleanup = false;
}
static void print_usage(const char *program_name, struct model_params *params)
" Default: %u\n"
" -o help for a list of options\n"
"-n No fork\n"
+#ifdef TLS
+ "-d Don't allow threads to cleanup\n"
+#endif
" -- Program arguments follow.\n\n",
program_name,
params->verbose,
static void parse_options(struct model_params *params, int argc, char **argv)
{
- const char *shortopts = "hnt:o:u:x:v::";
+ const char *shortopts = "hdnt:o:u:x:v::";
const struct option longopts[] = {
{"help", no_argument, NULL, 'h'},
{"verbose", optional_argument, NULL, 'v'},
case 'h':
print_usage(argv[0], params);
break;
+ case 'd':
+ params->threadsnocleanup = true;
+ break;
case 'n':
params->nofork = true;
break;
"Distributed under the GPLv2\n"
"Written by Weiyu Luo, Brian Norris, and Brian Demsky\n\n");
- /* Configure output redirection for the model-checker */
- redirect_output();
//Initialize snapshotting library and model checker object
if (!model) {
model->startChecker();
}
+ /* Configure output redirection for the model-checker */
+ redirect_output();
+
register_plugins();
//Parse command line options
params(),
restart_flag(false),
scheduler(new Scheduler()),
- execution(new ModelExecution(this, scheduler)),
history(new ModelHistory()),
+ execution(new ModelExecution(this, scheduler)),
execution_number(1),
trace_analyses(),
inspect_plugin(NULL)
{
memset(&stats,0,sizeof(struct execution_stats));
- init_thread = new Thread(execution->get_next_id(), (thrd_t *) model_malloc(sizeof(thrd_t)), &user_main_wrapper, NULL, NULL); // L: user_main_wrapper passes the user program
+ init_thread = new Thread(execution->get_next_id(), (thrd_t *) model_malloc(sizeof(thrd_t)), &user_main_wrapper, NULL, NULL);
#ifdef TLS
init_thread->setTLS((char *)get_tls_addr());
#endif
// test code
execution_number ++;
reset_to_initial_state();
+ history->set_new_exec_flag();
return false;
}
/* Infeasible -> don't take any more steps */
if (execution->is_infeasible())
return true;
- else if (execution->isfeasibleprefix() && execution->have_fatal_bug_reports()) {
+ else if (execution->isfeasibleprefix() && execution->have_bug_reports()) {
execution->set_assert();
return true;
} else if (execution->isFinished()) {
main_thread_startup();
}
-static bool is_nonsc_write(const ModelAction *act) {
- if (act->get_type() == ATOMIC_WRITE) {
- std::memory_order order = act->get_mo();
- switch(order) {
- case std::memory_order_relaxed:
- case std::memory_order_release:
- return true;
- default:
- return false;
- }
- }
- return false;
-}
-
/** @brief Run ModelChecker for the user program */
void ModelChecker::run()
{
for (unsigned int i = 0;i < get_num_threads();i++) {
thread_id_t tid = int_to_id(i);
Thread *thr = get_thread(tid);
- if (!thr->is_model_thread() && !thr->is_complete() && (!thr->get_pending())) {
- switch_from_master(thr); // L: context swapped, and action type of thr changed.
+ if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
+ switch_from_master(thr);
if (thr->is_waiting_on(thr))
assert_bug("Deadlock detected (thread %u)", i);
}
t = get_next_thread();
if (!t || t->is_model_thread())
break;
+ if (t->just_woken_up()) {
+ t->set_wakeup_state(false);
+ t->set_pending(NULL);
+ t = NULL;
+ continue; // Allow this thread to stash the next pending action
+ }
/* Consume the next action for a Thread */
ModelAction *curr = t->get_pending();
/* Have the trace analyses dump their output. */
for (unsigned int i = 0;i < trace_analyses.size();i++)
trace_analyses[i]->finish();
+
+ /* unlink tmp file created by last child process */
+ char filename[256];
+ snprintf_(filename, sizeof(filename), "C11FuzzerTmp%d", getpid());
+ unlink(filename);
}
void startMainThread();
void startChecker();
Thread * getInitThread() {return init_thread;}
+ Scheduler * getScheduler() {return scheduler;}
MEMALLOC
private:
/** Flag indicates whether to restart the model checker. */
/** The scheduler to use: tracks the running/ready Threads */
Scheduler * const scheduler;
+ ModelHistory * history;
ModelExecution *execution;
Thread * init_thread;
- ModelHistory *history;
int execution_number;
#include "common.h"
#include "threads-model.h"
#include "model.h"
+#include "datarace.h"
#define REQUESTS_BEFORE_ALLOC 1024
*/
void *malloc(size_t size)
{
+ void *tmp;
if (user_snapshot_space) {
/* Only perform user allocations from user context */
ASSERT(!model || thread_current());
- return user_malloc(size);
+ tmp = user_malloc(size);
} else
- return HandleEarlyAllocationRequest(size);
+ tmp = HandleEarlyAllocationRequest(size);
+ recordCalloc(tmp, size);
+ return tmp;
}
/** @brief Snapshotting free implementation for user programs */
void *realloc(void *ptr, size_t size)
{
void *tmp = mspace_realloc(user_snapshot_space, ptr, size);
+ recordCalloc(tmp, size);
ASSERT(tmp);
return tmp;
}
if (user_snapshot_space) {
void *tmp = mspace_calloc(user_snapshot_space, num, size);
ASSERT(tmp);
+ recordAlloc(tmp, num*size);
return tmp;
} else {
void *tmp = HandleEarlyAllocationRequest(size * num);
memset(tmp, 0, size * num);
+ recordAlloc(tmp, num*size);
return tmp;
}
}
--- /dev/null
+#include "newfuzzer.h"
+#include "threads-model.h"
+#include "action.h"
+#include "history.h"
+#include "funcnode.h"
+#include "funcinst.h"
+#include "concretepredicate.h"
+#include "waitobj.h"
+
+#include "model.h"
+#include "schedule.h"
+#include "execution.h"
+
+NewFuzzer::NewFuzzer() :
+ thrd_last_read_act(),
+ thrd_last_func_inst(),
+ thrd_selected_child_branch(),
+ thrd_pruned_writes(),
+ paused_thread_list(),
+ paused_thread_table(128),
+ failed_predicates(32),
+ dist_info_vec()
+{}
+
+/**
+ * @brief Register the ModelHistory and ModelExecution engine
+ */
+void NewFuzzer::register_engine(ModelHistory * history, ModelExecution *execution)
+{
+ this->history = history;
+ this->execution = execution;
+}
+
+int NewFuzzer::selectWrite(ModelAction *read, SnapVector<ModelAction *> * rf_set)
+{
+ return random() % rf_set->size();
+
+ thread_id_t tid = read->get_tid();
+ int thread_id = id_to_int(tid);
+
+ if (thrd_last_read_act.size() <= (uint) thread_id) {
+ thrd_last_read_act.resize(thread_id + 1);
+ thrd_last_func_inst.resize(thread_id + 1);
+ }
+
+ // A new read action is encountered, select a random child branch of current predicate
+ if (read != thrd_last_read_act[thread_id]) {
+ FuncNode * func_node = history->get_curr_func_node(tid);
+ Predicate * curr_pred = func_node->get_predicate_tree_position(tid);
+ FuncInst * read_inst = func_node->get_inst(read);
+ inst_act_map_t * inst_act_map = func_node->get_inst_act_map(tid);
+
+ check_store_visibility(curr_pred, read_inst, inst_act_map, rf_set);
+ Predicate * selected_branch = selectBranch(tid, curr_pred, read_inst);
+ prune_writes(tid, selected_branch, rf_set, inst_act_map);
+
+ if (!failed_predicates.isEmpty())
+ failed_predicates.reset();
+
+ thrd_last_read_act[thread_id] = read;
+ thrd_last_func_inst[thread_id] = read_inst;
+ }
+
+ // No write satisfies the selected predicate, so pause this thread.
+ while ( rf_set->size() == 0 ) {
+ Predicate * selected_branch = get_selected_child_branch(tid);
+
+ //model_print("the %d read action of thread %d at %p is unsuccessful\n", read->get_seq_number(), read_thread->get_id(), read->get_location());
+
+ SnapVector<ModelAction *> * pruned_writes = thrd_pruned_writes[thread_id];
+ for (uint i = 0; i < pruned_writes->size(); i++) {
+ rf_set->push_back( (*pruned_writes)[i] );
+ }
+
+ // Reselect a predicate and prune writes
+ Predicate * curr_pred = selected_branch->get_parent();
+ FuncInst * read_inst = thrd_last_func_inst[thread_id];
+ selected_branch = selectBranch(tid, curr_pred, read_inst);
+
+ FuncNode * func_node = history->get_curr_func_node(tid);
+ inst_act_map_t * inst_act_map = func_node->get_inst_act_map(tid);
+ prune_writes(tid, selected_branch, rf_set, inst_act_map);
+
+ ASSERT(selected_branch);
+ }
+
+ ASSERT(rf_set->size() != 0);
+ int random_index = random() % rf_set->size();
+
+ return random_index;
+}
+
+void NewFuzzer::check_store_visibility(Predicate * curr_pred, FuncInst * read_inst,
+ inst_act_map_t * inst_act_map, SnapVector<ModelAction *> * rf_set)
+{
+ ASSERT(!rf_set->empty());
+ if (curr_pred == NULL || read_inst == NULL)
+ return;
+
+ ModelVector<Predicate *> * children = curr_pred->get_children();
+
+ /* Iterate over all predicate children */
+ for (uint i = 0; i < children->size(); i++) {
+ Predicate * branch = (*children)[i];
+
+ /* The children predicates may have different FuncInsts */
+ if (branch->get_func_inst() == read_inst) {
+ PredExprSet * pred_expressions = branch->get_pred_expressions();
+
+ /* Do not check unset predicates */
+ if (pred_expressions->isEmpty())
+ continue;
+
+ branch->incr_total_checking_count();
+
+ /* Iterate over all write actions */
+ for (uint j = 0; j < rf_set->size(); j++) {
+ ModelAction * write_act = (*rf_set)[j];
+ uint64_t write_val = write_act->get_write_value();
+ bool dummy = true;
+ bool satisfy_predicate = check_predicate_expressions(pred_expressions, inst_act_map, write_val, &dummy);
+
+ /* If one write value satisfies the predicate, go to check the next predicate */
+ if (satisfy_predicate) {
+ branch->incr_store_visible_count();
+ break;
+ }
+ }
+ }
+
+ }
+}
+
+
+/* Select a random branch from the children of curr_pred
+ * @return The selected branch
+ */
+Predicate * NewFuzzer::selectBranch(thread_id_t tid, Predicate * curr_pred, FuncInst * read_inst)
+{
+ int thread_id = id_to_int(tid);
+ if ( thrd_selected_child_branch.size() <= (uint) thread_id)
+ thrd_selected_child_branch.resize(thread_id + 1);
+
+ if (curr_pred == NULL || read_inst == NULL) {
+ thrd_selected_child_branch[thread_id] = NULL;
+ return NULL;
+ }
+
+ ModelVector<Predicate *> * children = curr_pred->get_children();
+ SnapVector<Predicate *> branches;
+
+ for (uint i = 0; i < children->size(); i++) {
+ Predicate * child = (*children)[i];
+ if (child->get_func_inst() == read_inst && !failed_predicates.contains(child)) {
+ branches.push_back(child);
+ }
+ }
+
+ // predicate children have not been generated
+ if (branches.size() == 0) {
+ thrd_selected_child_branch[thread_id] = NULL;
+ return NULL;
+ }
+
+ int index = choose_index(&branches, 0);
+ Predicate * random_branch = branches[ index ];
+ thrd_selected_child_branch[thread_id] = random_branch;
+
+ // Update predicate tree position
+ FuncNode * func_node = history->get_curr_func_node(tid);
+ func_node->set_predicate_tree_position(tid, random_branch);
+
+ return random_branch;
+}
+
+/**
+ * @brief Select a branch from the given predicate branches based
+ * on their exploration counts.
+ *
+ * Let b_1, ..., b_n be branches with exploration counts c_1, ..., c_n
+ * M := max(c_1, ..., c_n) + 1
+ * Factor f_i := M / (c_i + 1)
+ * The probability p_i that branch b_i is selected:
+ * p_i := f_i / (f_1 + ... + f_n)
+ * = \fraction{ 1/(c_i + 1) }{ 1/(c_1 + 1) + ... + 1/(c_n + 1) }
+ *
+ * Note: (1) c_i + 1 is used because counts may be 0.
+ * (2) The numerator of f_i is chosen to reduce the effect of underflow
+ *
+ * @param numerator is M defined above
+ */
+int NewFuzzer::choose_index(SnapVector<Predicate *> * branches, uint32_t numerator)
+{
+ return random() % branches->size();
+/*--
+ if (branches->size() == 1)
+ return 0;
+
+ double total_factor = 0;
+ SnapVector<double> factors = SnapVector<double>( branches->size() + 1 );
+ for (uint i = 0; i < branches->size(); i++) {
+ Predicate * branch = (*branches)[i];
+ double factor = (double) numerator / (branch->get_expl_count() + 5 * branch->get_fail_count() + 1);
+ total_factor += factor;
+ factors.push_back(factor);
+ }
+
+ double prob = (double) random() / RAND_MAX;
+ double prob_sum = 0;
+ int index = 0;
+
+ for (uint i = 0; i < factors.size(); i++) {
+ index = i;
+ prob_sum += (double) (factors[i] / total_factor);
+ if (prob_sum > prob) {
+ break;
+ }
+ }
+
+ return index;
+*/
+}
+
+Predicate * NewFuzzer::get_selected_child_branch(thread_id_t tid)
+{
+ int thread_id = id_to_int(tid);
+ if (thrd_selected_child_branch.size() <= (uint) thread_id)
+ return NULL;
+
+ return thrd_selected_child_branch[thread_id];
+}
+
+/* Remove writes from the rf_set that do not satisfie the selected predicate,
+ * and store them in thrd_pruned_writes
+ *
+ * @return true if rf_set is pruned
+ */
+bool NewFuzzer::prune_writes(thread_id_t tid, Predicate * pred,
+ SnapVector<ModelAction *> * rf_set, inst_act_map_t * inst_act_map)
+{
+ if (pred == NULL)
+ return false;
+
+ PredExprSet * pred_expressions = pred->get_pred_expressions();
+ if (pred_expressions->getSize() == 0) // unset predicates
+ return false;
+
+ int thread_id = id_to_int(tid);
+ uint old_size = thrd_pruned_writes.size();
+ if (thrd_pruned_writes.size() <= (uint) thread_id) {
+ uint new_size = thread_id + 1;
+ thrd_pruned_writes.resize(new_size);
+ for (uint i = old_size; i < new_size; i++)
+ thrd_pruned_writes[i] = new SnapVector<ModelAction *>();
+ }
+ SnapVector<ModelAction *> * pruned_writes = thrd_pruned_writes[thread_id];
+ pruned_writes->clear(); // clear the old pruned_writes set
+
+ bool pruned = false;
+ uint index = 0;
+
+ while ( index < rf_set->size() ) {
+ ModelAction * write_act = (*rf_set)[index];
+ uint64_t write_val = write_act->get_write_value();
+ bool no_predicate = false;
+ bool satisfy_predicate = check_predicate_expressions(pred_expressions, inst_act_map, write_val, &no_predicate);
+
+ if (no_predicate)
+ return false;
+
+ if (!satisfy_predicate) {
+ ASSERT(rf_set != NULL);
+ (*rf_set)[index] = rf_set->back();
+ rf_set->pop_back();
+ pruned_writes->push_back(write_act);
+ pruned = true;
+ } else
+ index++;
+ }
+
+ return pruned;
+}
+
+/* @brief Put a thread to sleep because no writes in rf_set satisfies the selected predicate.
+ *
+ * @param thread A thread whose last action is a read
+ */
+void NewFuzzer::conditional_sleep(Thread * thread)
+{
+ int index = paused_thread_list.size();
+
+ model->getScheduler()->add_sleep(thread);
+ paused_thread_list.push_back(thread);
+ paused_thread_table.put(thread, index); // Update table
+
+ /* Add the waiting condition to ModelHistory */
+ ModelAction * read = thread->get_pending();
+ thread_id_t tid = thread->get_id();
+ FuncNode * func_node = history->get_curr_func_node(tid);
+ inst_act_map_t * inst_act_map = func_node->get_inst_act_map(tid);
+
+ Predicate * selected_branch = get_selected_child_branch(tid);
+ ConcretePredicate * concrete = selected_branch->evaluate(inst_act_map, tid);
+ concrete->set_location(read->get_location());
+
+ history->add_waiting_write(concrete);
+ /* history->add_waiting_thread is already called in find_threads */
+}
+
+/**
+ * Decides whether a thread should condition sleep based on
+ * the sleep score of the chosen predicate.
+ *
+ * sleep_score = 0: never sleeps
+ * sleep_score = 100: always sleeps
+ **/
+bool NewFuzzer::should_conditional_sleep(Predicate * predicate)
+{
+ return false;
+ /*
+ int sleep_score = predicate->get_sleep_score();
+ int random_num = random() % 100;
+
+ // should sleep if random_num falls within [0, sleep_score)
+ if (random_num < sleep_score)
+ return true;
+
+ return false;
+ */
+}
+
+bool NewFuzzer::has_paused_threads()
+{
+ return paused_thread_list.size() != 0;
+}
+
+Thread * NewFuzzer::selectThread(int * threadlist, int numthreads)
+{
+ if (numthreads == 0 && has_paused_threads()) {
+ wake_up_paused_threads(threadlist, &numthreads);
+ //model_print("list size: %d, active t id: %d\n", numthreads, threadlist[0]);
+ }
+
+ int random_index = random() % numthreads;
+ int thread = threadlist[random_index];
+ thread_id_t curr_tid = int_to_id(thread);
+ return execution->get_thread(curr_tid);
+}
+
+/* Force waking up one of threads paused by Fuzzer, because otherwise
+ * the Fuzzer is not making progress
+ */
+void NewFuzzer::wake_up_paused_threads(int * threadlist, int * numthreads)
+{
+ int random_index = random() % paused_thread_list.size();
+ Thread * thread = paused_thread_list[random_index];
+ model->getScheduler()->remove_sleep(thread);
+
+ Thread * last_thread = paused_thread_list.back();
+ paused_thread_list[random_index] = last_thread;
+ paused_thread_list.pop_back();
+ paused_thread_table.put(last_thread, random_index); // Update table
+ paused_thread_table.remove(thread);
+
+ thread_id_t tid = thread->get_id();
+ history->remove_waiting_write(tid);
+ history->remove_waiting_thread(tid);
+
+ threadlist[*numthreads] = tid;
+ (*numthreads)++;
+
+/*--
+ Predicate * selected_branch = get_selected_child_branch(tid);
+ update_predicate_score(selected_branch, SLEEP_FAIL_TYPE3);
+*/
+
+ model_print("thread %d is woken up\n", tid);
+}
+
+/* Wake up conditional sleeping threads if the desired write is available */
+void NewFuzzer::notify_paused_thread(Thread * thread)
+{
+ ASSERT(paused_thread_table.contains(thread));
+
+ int index = paused_thread_table.get(thread);
+ model->getScheduler()->remove_sleep(thread);
+
+ Thread * last_thread = paused_thread_list.back();
+ paused_thread_list[index] = last_thread;
+ paused_thread_list.pop_back();
+ paused_thread_table.put(last_thread, index); // Update table
+ paused_thread_table.remove(thread);
+
+ thread_id_t tid = thread->get_id();
+ history->remove_waiting_write(tid);
+ history->remove_waiting_thread(tid);
+
+/*--
+ Predicate * selected_branch = get_selected_child_branch(tid);
+ update_predicate_score(selected_branch, SLEEP_SUCCESS);
+*/
+
+ model_print("** thread %d is woken up\n", tid);
+}
+
+/* Find threads that may write values that the pending read action is waiting for.
+ * Side effect: waiting thread related info are stored in dist_info_vec
+ *
+ * @return True if any thread is found
+ */
+bool NewFuzzer::find_threads(ModelAction * pending_read)
+{
+ ASSERT(pending_read->is_read());
+
+ void * location = pending_read->get_location();
+ thread_id_t self_id = pending_read->get_tid();
+ bool finds_waiting_for = false;
+
+ SnapVector<FuncNode *> * func_node_list = history->getWrFuncNodes(location);
+ for (uint i = 0; i < func_node_list->size(); i++) {
+ FuncNode * target_node = (*func_node_list)[i];
+ for (uint i = 1; i < execution->get_num_threads(); i++) {
+ thread_id_t tid = int_to_id(i);
+ if (tid == self_id)
+ continue;
+
+ FuncNode * node = history->get_curr_func_node(tid);
+ /* It is possible that thread tid is not in any FuncNode */
+ if (node == NULL)
+ continue;
+
+ int distance = node->compute_distance(target_node);
+ if (distance != -1) {
+ finds_waiting_for = true;
+ //model_print("thread: %d; distance from node %d to node %d: %d\n", tid, node->get_func_id(), target_node->get_func_id(), distance);
+
+ dist_info_vec.push_back(node_dist_info(tid, target_node, distance));
+ }
+ }
+ }
+
+ return finds_waiting_for;
+}
+
+/* Update predicate counts and scores (asynchronous) when the read value is not available
+ *
+ * @param type
+ * type 1: find_threads return false
+ * type 2: find_threads return true, but the fuzzer decides that that thread shall not sleep based on sleep score
+ * type 3: threads are put to sleep but woken up before the waited value appears
+ * type 4: threads are put to sleep and the waited vaule appears (success)
+ */
+
+/*--
+void NewFuzzer::update_predicate_score(Predicate * predicate, sleep_result_t type)
+{
+ switch (type) {
+ case SLEEP_FAIL_TYPE1:
+ predicate->incr_fail_count();
+
+ // Do not choose this predicate when reselecting a new branch
+ failed_predicates.put(predicate, true);
+ break;
+ case SLEEP_FAIL_TYPE2:
+ predicate->incr_fail_count();
+ predicate->incr_sleep_score(1);
+ failed_predicates.put(predicate, true);
+ break;
+ case SLEEP_FAIL_TYPE3:
+ predicate->incr_fail_count();
+ predicate->decr_sleep_score(10);
+ break;
+ case SLEEP_SUCCESS:
+ predicate->incr_sleep_score(10);
+ break;
+ default:
+ model_print("unknown predicate result type.\n");
+ break;
+ }
+}
+*/
+
+bool NewFuzzer::check_predicate_expressions(PredExprSet * pred_expressions,
+ inst_act_map_t * inst_act_map, uint64_t write_val, bool * no_predicate)
+{
+ bool satisfy_predicate = true;
+
+ PredExprSetIter * pred_expr_it = pred_expressions->iterator();
+ while (pred_expr_it->hasNext()) {
+ struct pred_expr * expression = pred_expr_it->next();
+ bool equality;
+
+ switch (expression->token) {
+ case NOPREDICATE:
+ *no_predicate = true;
+ break;
+ case EQUALITY:
+ FuncInst * to_be_compared;
+ ModelAction * last_act;
+ uint64_t last_read;
+
+ to_be_compared = expression->func_inst;
+ last_act = inst_act_map->get(to_be_compared);
+ last_read = last_act->get_reads_from_value();
+
+ equality = (write_val == last_read);
+ if (equality != expression->value)
+ satisfy_predicate = false;
+ break;
+ case NULLITY:
+ equality = ((void*)write_val == NULL);
+ if (equality != expression->value)
+ satisfy_predicate = false;
+ break;
+ default:
+ model_print("unknown predicate token\n");
+ break;
+ }
+
+ if (!satisfy_predicate)
+ break;
+ }
+
+ return satisfy_predicate;
+}
+
+bool NewFuzzer::shouldWait(const ModelAction * act)
+{
+ return random() & 1;
+}
--- /dev/null
+#ifndef __NEWFUZZER_H__
+#define __NEWFUZZER_H__
+
+#include "fuzzer.h"
+#include "classlist.h"
+#include "mymemory.h"
+#include "stl-model.h"
+#include "predicate.h"
+
+struct node_dist_info {
+ node_dist_info(thread_id_t tid, FuncNode * node, int distance) :
+ tid(tid),
+ target(node),
+ dist(distance)
+ {}
+
+ thread_id_t tid;
+ FuncNode * target;
+ int dist;
+
+ SNAPSHOTALLOC
+};
+
+class NewFuzzer : public Fuzzer {
+public:
+ NewFuzzer();
+ int selectWrite(ModelAction *read, SnapVector<ModelAction *>* rf_set);
+ bool has_paused_threads();
+ void notify_paused_thread(Thread * thread);
+
+ Thread * selectThread(int * threadlist, int numthreads);
+ Thread * selectNotify(action_list_t * waiters);
+ bool shouldSleep(const ModelAction * sleep);
+ bool shouldWake(const ModelAction * sleep);
+ bool shouldWait(const ModelAction * wait);
+
+ void register_engine(ModelHistory * history, ModelExecution * execution);
+
+ SNAPSHOTALLOC
+private:
+ ModelHistory * history;
+ ModelExecution * execution;
+
+ SnapVector<ModelAction *> thrd_last_read_act;
+ SnapVector<FuncInst *> thrd_last_func_inst;
+
+ SnapVector<Predicate *> thrd_selected_child_branch;
+ SnapVector< SnapVector<ModelAction *> *> thrd_pruned_writes;
+
+ void check_store_visibility(Predicate * curr_pred, FuncInst * read_inst, inst_act_map_t * inst_act_map, SnapVector<ModelAction *> * rf_set);
+ Predicate * selectBranch(thread_id_t tid, Predicate * curr_pred, FuncInst * read_inst);
+ Predicate * get_selected_child_branch(thread_id_t tid);
+ bool prune_writes(thread_id_t tid, Predicate * pred, SnapVector<ModelAction *> * rf_set, inst_act_map_t * inst_act_map);
+ int choose_index(SnapVector<Predicate *> * branches, uint32_t numerator);
+
+ /* The set of Threads put to sleep by NewFuzzer because no writes in rf_set satisfies the selected predicate. Only used by selectWrite.
+ */
+ SnapVector<Thread *> paused_thread_list; //-- (not in use)
+ HashTable<Thread *, int, uintptr_t, 0> paused_thread_table; //--
+ HashTable<Predicate *, bool, uintptr_t, 0> failed_predicates;
+
+ SnapVector<struct node_dist_info> dist_info_vec; //--
+
+ void conditional_sleep(Thread * thread); //--
+ bool should_conditional_sleep(Predicate * predicate);
+ void wake_up_paused_threads(int * threadlist, int * numthreads); //--
+
+ bool find_threads(ModelAction * pending_read); //--
+ /*-- void update_predicate_score(Predicate * predicate, sleep_result_t type); */
+
+ bool check_predicate_expressions(PredExprSet * pred_expressions, inst_act_map_t * inst_act_map, uint64_t write_val, bool * no_predicate);
+};
+
+#endif /* end of __NEWFUZZER_H__ */
unsigned int uninitvalue;
int maxexecutions;
bool nofork;
+ bool threadsnocleanup;
/** @brief Verbosity (0 = quiet; 1 = noisy; 2 = noisier) */
int verbose;
--- /dev/null
+#include "common.h"
+#include <unistd.h>
+#include "model.h"
+#include "snapshot-interface.h"
+#include <dlfcn.h>
+#include <errno.h>
+
+static int (*pipe_init_p)(int filep[2]) = NULL;
+
+int pipe(int fildes[2]) {
+ if (!model) {
+ snapshot_system_init(10000, 1024, 1024, 40000);
+ model = new ModelChecker();
+ model->startChecker();
+ }
+ if (!pipe_init_p) {
+ pipe_init_p = (int (*)(int fildes[2])) dlsym(RTLD_NEXT, "pipe");
+ char *error = dlerror();
+ if (error != NULL) {
+ fputs(error, stderr);
+ exit(EXIT_FAILURE);
+ }
+ }
+ return pipe_init_p(fildes);
+}
+#include "funcinst.h"
#include "predicate.h"
+#include "concretepredicate.h"
-inline bool operator==(const predicate_expr& expr_A, const predicate_expr& expr_B)
+Predicate::Predicate(FuncInst * func_inst, bool is_entry, bool is_exit) :
+ func_inst(func_inst),
+ entry_predicate(is_entry),
+ exit_predicate(is_exit),
+ does_write(false),
+ exploration_count(0),
+ store_visible_count(0),
+ total_checking_count(0),
+ pred_expressions(16),
+ children(),
+ parent(NULL),
+ exit(NULL),
+ backedges(16)
+{}
+
+Predicate::~Predicate()
{
- if (expr_A.token != expr_B.token)
- return false;
+ // parent and func_inst should not be deleted
+ pred_expressions.reset();
+ backedges.reset();
+ children.clear();
+}
- if (expr_A.token == EQUALITY && expr_A.location != expr_B.location)
- return false;
+unsigned int pred_expr_hash(struct pred_expr * expr)
+{
+ return (unsigned int)((uintptr_t)expr);
+}
- if (expr_A.value != expr_B.value)
+bool pred_expr_equal(struct pred_expr * p1, struct pred_expr * p2)
+{
+ if (p1->token != p2->token)
+ return false;
+ if (p1->token == EQUALITY && p1->func_inst != p2->func_inst)
+ return false;
+ if (p1->value != p2->value)
return false;
-
return true;
}
-void Predicate::add_predicate(predicate_expr predicate)
+void Predicate::add_predicate_expr(token_t token, FuncInst * func_inst, bool value)
+{
+ struct pred_expr *ptr = new pred_expr(token, func_inst, value);
+ pred_expressions.add(ptr);
+}
+
+void Predicate::add_child(Predicate * child)
+{
+ /* check duplication? */
+ children.push_back(child);
+}
+
+void Predicate::copy_predicate_expr(Predicate * other)
+{
+ PredExprSet * other_pred_expressions = other->get_pred_expressions();
+ PredExprSetIter * it = other_pred_expressions->iterator();
+
+ while (it->hasNext()) {
+ struct pred_expr * ptr = it->next();
+ struct pred_expr * copy = new pred_expr(ptr->token, ptr->func_inst, ptr->value);
+ pred_expressions.add(copy);
+ }
+}
+
+/* Evaluate predicate expressions against the given inst_act_map */
+ConcretePredicate * Predicate::evaluate(inst_act_map_t * inst_act_map, thread_id_t tid)
+{
+ ConcretePredicate * concrete = new ConcretePredicate(tid);
+ PredExprSetIter * it = pred_expressions.iterator();
+
+ while (it->hasNext()) {
+ struct pred_expr * ptr = it->next();
+ uint64_t value = 0;
+
+ switch(ptr->token) {
+ case NOPREDICATE:
+ break;
+ case EQUALITY:
+ FuncInst * to_be_compared;
+ ModelAction * last_act;
+
+ to_be_compared = ptr->func_inst;
+ last_act = inst_act_map->get(to_be_compared);
+ value = last_act->get_reads_from_value();
+ break;
+ case NULLITY:
+ break;
+ default:
+ break;
+ }
+
+ concrete->add_expression(ptr->token, value, ptr->value);
+ }
+
+ return concrete;
+}
+
+void Predicate::print_predicate()
{
- ModelList<predicate_expr>::iterator it;
- for (it = predicates.begin();it != predicates.end();it++) {
- if (predicate == *it)
- return;
+ model_print("\"%p\" [shape=box, label=\"\n", this);
+ if (entry_predicate) {
+ model_print("entry node\"];\n");
+ return;
+ }
+
+ if (exit_predicate) {
+ model_print("exit node\"];\n");
+ return;
}
- predicates.push_back(predicate);
+ func_inst->print();
+
+ if (pred_expressions.getSize() == 0)
+ model_print("predicate unset\n");
+
+ PredExprSetIter * it = pred_expressions.iterator();
+ while (it->hasNext()) {
+ struct pred_expr * expr = it->next();
+ FuncInst * inst = expr->func_inst;
+ switch (expr->token) {
+ case NOPREDICATE:
+ break;
+ case EQUALITY:
+ model_print("predicate token: equality, position: %s, value: %d\n", inst->get_position(), expr->value);
+ break;
+ case NULLITY:
+ model_print("predicate token: nullity, value: %d\n", expr->value);
+ break;
+ default:
+ model_print("unknown predicate token\n");
+ break;
+ }
+ }
+
+ if (does_write) {
+ model_print("Does write\n");
+ }
+
+ double prob = (double) store_visible_count / total_checking_count;
+ model_print("Total checks: %d, visible count: %d; prob: %f\n", total_checking_count, store_visible_count, prob);
+ model_print("Exploration count: %d", exploration_count);
+ model_print("\"];\n");
+}
+
+void Predicate::print_pred_subtree()
+{
+ print_predicate();
+ for (uint i = 0; i < children.size(); i++) {
+ Predicate * child = children[i];
+ child->print_pred_subtree();
+ model_print("\"%p\" -> \"%p\"\n", this, child);
+ }
+
+ PredSetIter * it = backedges.iterator();
+ while (it->hasNext()) {
+ Predicate * backedge = it->next();
+ model_print("\"%p\" -> \"%p\"[style=dashed, color=grey]\n", this, backedge);
+ }
+
+ if (exit) {
+ model_print("\"%p\" -> \"%p\"[style=dashed, color=red]\n", this, exit);
+ }
}
-#include "funcinst.h"
-
-typedef enum predicate_token {
- EQUALITY, NULLITY
-} token_t;
-
-/* If token is EQUALITY, then the predicate asserts whether
- * this load should read the same value as the last value
- * read at memory location specified in predicate_expr.
- */
-struct predicate_expr {
- token_t token;
- void * location;
- bool value;
-};
+#ifndef __PREDICATE_H__
+#define __PREDICATE_H__
+
+#include "hashset.h"
+#include "predicatetypes.h"
+#include "classlist.h"
+
+unsigned int pred_expr_hash (struct pred_expr *);
+bool pred_expr_equal(struct pred_expr *, struct pred_expr *);
+typedef HashSet<struct pred_expr *, uintptr_t, 0, model_malloc, model_calloc, model_free, pred_expr_hash, pred_expr_equal> PredExprSet;
+typedef HSIterator<struct pred_expr *, uintptr_t, 0, model_malloc, model_calloc, model_free, pred_expr_hash, pred_expr_equal> PredExprSetIter;
class Predicate {
public:
- Predicate();
+ Predicate(FuncInst * func_inst, bool is_entry = false, bool is_exit = false);
~Predicate();
FuncInst * get_func_inst() { return func_inst; }
- ModelList<predicate_expr> * get_predicates() { return &predicates; }
- void add_predicate(predicate_expr predicate);
+ PredExprSet * get_pred_expressions() { return &pred_expressions; }
+
+ void add_predicate_expr(token_t token, FuncInst * func_inst, bool value);
+ void add_child(Predicate * child);
+ void set_parent(Predicate * parent_pred) { parent = parent_pred; }
+ void set_exit(Predicate * exit_pred) { exit = exit_pred; }
+ void add_backedge(Predicate * back_pred) { backedges.add(back_pred); }
+ void copy_predicate_expr(Predicate * other);
+
+ ModelVector<Predicate *> * get_children() { return &children; }
+ Predicate * get_parent() { return parent; }
+ Predicate * get_exit() { return exit; }
+ PredSet * get_backedges() { return &backedges; }
+
+ bool is_entry_predicate() { return entry_predicate; }
+ void set_entry_predicate() { entry_predicate = true; }
+
+ /* Whether func_inst does write or not */
+ bool is_write() { return does_write; }
+ void set_write(bool is_write) { does_write = is_write; }
+
+ ConcretePredicate * evaluate(inst_act_map_t * inst_act_map, thread_id_t tid);
+
+ uint32_t get_expl_count() { return exploration_count; }
+ uint32_t get_store_visible_count() { return store_visible_count; }
+ uint32_t get_total_checking_count() { return total_checking_count; }
+
+ void incr_expl_count() { exploration_count++; }
+ void incr_store_visible_count() { store_visible_count++; }
+ void incr_total_checking_count() { total_checking_count++; }
+
+ void print_predicate();
+ void print_pred_subtree();
MEMALLOC
private:
FuncInst * func_inst;
- /* may have multiple precicates */
- ModelList<predicate_expr> predicates;
+ bool entry_predicate;
+ bool exit_predicate;
+ bool does_write;
+
+ uint32_t exploration_count;
+ uint32_t store_visible_count;
+ uint32_t total_checking_count; /* The number of times the store visibility is checked */
+
+ /* May have multiple predicate expressions */
+ PredExprSet pred_expressions;
+ ModelVector<Predicate *> children;
+
+ /* Only a single parent may exist */
+ Predicate * parent;
+ Predicate * exit;
+
+ /* May have multiple back edges, e.g. nested loops */
+ PredSet backedges;
};
+
+#endif /* __PREDICATE_H__ */
#include "mutex.h"
#include <condition_variable>
-#include <assert.h>
/* global "model" object */
#include "model.h"
#include "execution.h"
+#include <errno.h>
int pthread_create(pthread_t *t, const pthread_attr_t * attr,
- pthread_start_t start_routine, void * arg) {
+ pthread_start_t start_routine, void * arg) {
if (!model) {
snapshot_system_init(10000, 1024, 1024, 40000);
model = new ModelChecker();
}
int pthread_join(pthread_t t, void **value_ptr) {
-// Thread *th = model->get_pthread(t);
ModelExecution *execution = model->get_execution();
Thread *th = execution->get_pthread(t);
}
int pthread_mutex_init(pthread_mutex_t *p_mutex, const pthread_mutexattr_t *) {
- cdsc::snapmutex *m = new cdsc::snapmutex();
-
if (!model) {
snapshot_system_init(10000, 1024, 1024, 40000);
model = new ModelChecker();
model->startChecker();
}
+ cdsc::snapmutex *m = new cdsc::snapmutex();
ModelExecution *execution = model->get_execution();
execution->getMutexMap()->put(p_mutex, m);
model->startChecker();
}
-
ModelExecution *execution = model->get_execution();
/* to protect the case where PTHREAD_MUTEX_INITIALIZER is used
if (m != NULL) {
m->lock();
} else {
- printf("ah\n");
+ return 1;
}
return 0;
m->unlock();
} else {
printf("try to unlock an untracked pthread_mutex\n");
+ return 1;
}
return 0;
}
int pthread_mutex_timedlock (pthread_mutex_t *__restrict p_mutex,
- const struct timespec *__restrict abstime) {
+ const struct timespec *__restrict abstime) {
// timedlock just gives the option of giving up the lock, so return and let the scheduler decide which thread goes next
-/*
- ModelExecution *execution = model->get_execution();
- if (!execution->mutex_map.contains(p_mutex)) {
- pthread_mutex_init(p_mutex, NULL);
- }
- cdsc::snapmutex *m = execution->mutex_map.get(p_mutex);
-
- if (m != NULL) {
- m->lock();
- } else {
- printf("something is wrong with pthread_mutex_timedlock\n");
- }
-
- printf("pthread_mutex_timedlock is called. It is currently implemented as a normal lock operation without no timeout\n");
- */
- return 0;
+ if (!model) {
+ snapshot_system_init(10000, 1024, 1024, 40000);
+ model = new ModelChecker();
+ model->startChecker();
+ }
+
+ ModelExecution *execution = model->get_execution();
+
+ /* to protect the case where PTHREAD_MUTEX_INITIALIZER is used
+ instead of pthread_mutex_init, or where *p_mutex is not stored
+ in the execution->mutex_map for some reason. */
+ if (!execution->getMutexMap()->contains(p_mutex)) {
+ pthread_mutex_init(p_mutex, NULL);
+ }
+
+ cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
+
+ if (m != NULL) {
+ m->lock();
+ return 0;
+ }
+
+ return 1;
}
pthread_t pthread_self() {
ModelExecution *execution = model->get_execution();
if ( !execution->getCondMap()->contains(p_cond) )
pthread_cond_init(p_cond, NULL);
+ if ( !execution->getMutexMap()->contains(p_mutex) )
+ pthread_mutex_init(p_mutex, NULL);
cdsc::snapcondition_variable *v = execution->getCondMap()->get(p_cond);
cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
}
int pthread_cond_timedwait(pthread_cond_t *p_cond,
- pthread_mutex_t *p_mutex, const struct timespec *abstime) {
-// implement cond_timedwait as a noop and let the scheduler decide which thread goes next
+ pthread_mutex_t *p_mutex, const struct timespec *abstime) {
ModelExecution *execution = model->get_execution();
if ( !execution->getCondMap()->contains(p_cond) )
cdsc::snapcondition_variable *v = execution->getCondMap()->get(p_cond);
cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
- model->switch_to_master(new ModelAction(NOOP, std::memory_order_seq_cst, v));
-// v->wait(*m);
-// printf("timed_wait called\n");
+ model->switch_to_master(new ModelAction(ATOMIC_TIMEDWAIT, std::memory_order_seq_cst, v, (uint64_t) m));
+ m->lock();
+
+ // model_print("Timed_wait is called\n");
return 0;
}
v->notify_all();
return 0;
}
+
+int pthread_cond_destroy(pthread_cond_t *p_cond) {
+ ModelExecution *execution = model->get_execution();
+
+ if (execution->getCondMap()->contains(p_cond)) {
+ cdsc::snapcondition_variable *v = execution->getCondMap()->get(p_cond);
+ delete v;
+ execution->getCondMap()->remove(p_cond);
+ }
+ return 0;
+}
/**
* @brief Check if execution is stuck with no enabled threads and some sleeping
* thread
- * @return True if no threads are enabled an some thread is in the sleep set;
+ * @return True if no threads are enabled and some thread is in the sleep set;
* false otherwise
*/
bool Scheduler::all_threads_sleeping() const
Thread * Scheduler::select_next_thread()
{
int avail_threads = 0;
- int thread_list[enabled_len];
- for (int i = 0;i< enabled_len;i++) {
+ int sleep_threads = 0;
+ int thread_list[enabled_len], sleep_list[enabled_len];
+ Thread * thread;
+
+ for (int i = 0; i < enabled_len; i++) {
if (enabled[i] == THREAD_ENABLED)
thread_list[avail_threads++] = i;
+ else if (enabled[i] == THREAD_SLEEP_SET)
+ sleep_list[sleep_threads++] = i;
}
- if (avail_threads == 0)
- return NULL;// No threads availablex
+ if (avail_threads == 0 && !execution->getFuzzer()->has_paused_threads()) {
+ if (sleep_threads != 0) {
+ // No threads available, but some threads sleeping. Wake up one of them
+ thread = execution->getFuzzer()->selectThread(sleep_list, sleep_threads);
+ remove_sleep(thread);
+ thread->set_wakeup_state(true);
+ } else {
+ return NULL; // No threads available and no threads sleeping.
+ }
+ } else {
+ // Some threads are available
+ thread = execution->getFuzzer()->selectThread(thread_list, avail_threads);
+ }
- Thread * thread = execution->getFuzzer()->selectThread(thread_list, avail_threads);
curr_thread_index = id_to_int(thread->get_id());
return thread;
}
#include <time.h>
#include <unistd.h>
+#include <sys/param.h>
#include "action.h"
#include "model.h"
-unsigned int __sleep (unsigned int seconds)
+extern "C" {
+int nanosleep(const struct timespec *rqtp, struct timespec *rmtp);
+}
+
+unsigned int sleep(unsigned int seconds)
{
- model->switch_to_master(
- new ModelAction(NOOP, std::memory_order_seq_cst, NULL)
- );
+ /* https://code.woboq.org/userspace/glibc/sysdeps/posix/sleep.c.html */
+ const unsigned int max
+ = (unsigned int) (((unsigned long int) (~((time_t) 0))) >> 1);
+
+ struct timespec ts = { 0, 0 };
+ do {
+ if (sizeof (ts.tv_sec) <= sizeof (seconds)) {
+ /* Since SECONDS is unsigned assigning the value to .tv_sec can
+ overflow it. In this case we have to wait in steps. */
+ ts.tv_sec += MIN (seconds, max);
+ seconds -= (unsigned int) ts.tv_sec;
+ } else {
+ ts.tv_sec = (time_t) seconds;
+ seconds = 0;
+ }
+
+ nanosleep(&ts, &ts);
+ } while (seconds > 0);
+
return 0;
}
-unsigned int sleep(unsigned int seconds)
+int usleep(useconds_t useconds)
{
- return __sleep(seconds);
+ /* https://code.woboq.org/userspace/glibc/sysdeps/posix/usleep.c.html */
+ struct timespec ts = {
+ .tv_sec = (long int) (useconds / 1000000),
+ .tv_nsec = (long int) (useconds % 1000000) * 1000l,
+ };
+ return nanosleep(&ts, NULL);
}
-int usleep (useconds_t useconds)
+int nanosleep(const struct timespec *rqtp, struct timespec *rmtp)
{
- model->switch_to_master(
- new ModelAction(NOOP, std::memory_order_seq_cst, NULL)
- );
+ if (model) {
+ uint64_t time = rqtp->tv_sec * 1000000000 + rqtp->tv_nsec;
+ struct timespec currtime;
+ clock_gettime(CLOCK_MONOTONIC, &currtime);
+ uint64_t lcurrtime = currtime.tv_sec * 1000000000 + currtime.tv_nsec;
+ model->switch_to_master(new ModelAction(THREAD_SLEEP, std::memory_order_seq_cst, time, lcurrtime));
+ if (rmtp != NULL) {
+ clock_gettime(CLOCK_MONOTONIC, &currtime);
+ uint64_t lendtime = currtime.tv_sec * 1000000000 + currtime.tv_nsec;
+ uint64_t elapsed = lendtime - lcurrtime;
+ rmtp->tv_sec = elapsed / 1000000000;
+ rmtp->tv_nsec = elapsed - rmtp->tv_sec * 1000000000;
+ }
+ }
+
return 0;
}
#include "context.h"
#include "model.h"
+
+#if USE_MPROTECT_SNAPSHOT
+
/** PageAlignedAdressUpdate return a page aligned address for the
* address being added as a side effect the numBytes are also changed.
*/
return (void *)((((uintptr_t)addr) + PAGESIZE - 1) & ~(PAGESIZE - 1));
}
-#if USE_MPROTECT_SNAPSHOT
-
/* Each SnapShotRecord lists the firstbackingpage that must be written to
* revert to that snapshot */
struct SnapShotRecord {
* @see Thread::pending */
void set_pending(ModelAction *act) { pending = act; }
+ bool just_woken_up() { return wakeup_state; }
+ void set_wakeup_state(bool state) { wakeup_state = state; }
+
Thread * waiting_on() const;
bool is_waiting_on(const Thread *t) const;
*/
ModelAction *pending;
+ /** @brief True if this thread was just woken up */
+ bool wakeup_state;
+
void (*start_routine)(void *);
void *(*pstart_routine)(void *);
/* global "model" object */
#include "model.h"
#include "execution.h"
+#include "schedule.h"
#ifdef TLS
#include <dlfcn.h>
if (stack)
stack_free(stack);
#ifdef TLS
- if (this != model->getInitThread()) {
+ if (this != model->getInitThread() && !model->getParams()->threadsnocleanup) {
modellock = 1;
+ ASSERT(thread_current()==NULL);
+ Thread * curr_thread = model->getScheduler()->get_current_thread();
+ //Make any current_thread calls work in code to free
+ model->getScheduler()->set_current_thread(this);
real_pthread_mutex_unlock(&mutex2);
real_pthread_join(thread, NULL);
+ model->getScheduler()->set_current_thread(curr_thread);
modellock = 0;
}
#endif
--- /dev/null
+#include "waitobj.h"
+#include "threads-model.h"
+#include "funcnode.h"
+
+#define COUNTER_THRESHOLD 1000
+
+WaitObj::WaitObj(thread_id_t tid) :
+ tid(tid),
+ waiting_for(32),
+ waited_by(32),
+ thrd_dist_maps(),
+ thrd_target_nodes(),
+ thrd_action_counters()
+{}
+
+WaitObj::~WaitObj()
+{
+ for (uint i = 0; i < thrd_dist_maps.size(); i++)
+ delete thrd_dist_maps[i];
+
+ for (uint i = 0; i < thrd_target_nodes.size(); i++)
+ delete thrd_target_nodes[i];
+}
+
+void WaitObj::add_waiting_for(thread_id_t other, FuncNode * node, int dist)
+{
+ waiting_for.add(other);
+
+ dist_map_t * dist_map = getDistMap(other);
+ dist_map->put(node, dist);
+
+ node_set_t * target_nodes = getTargetNodes(other);
+ target_nodes->add(node);
+}
+
+void WaitObj::add_waited_by(thread_id_t other)
+{
+ waited_by.add(other);
+}
+
+/**
+ * Stop waiting for the thread to reach the target node
+ *
+ * @param other The thread to be removed
+ * @param node The target node
+ * @return true if "other" is removed from waiting_for set
+ * false if only a target node of "other" is removed
+ */
+bool WaitObj::remove_waiting_for_node(thread_id_t other, FuncNode * node)
+{
+ dist_map_t * dist_map = getDistMap(other);
+ dist_map->remove(node);
+
+ node_set_t * target_nodes = getTargetNodes(other);
+ target_nodes->remove(node);
+
+ /* The thread has no nodes to reach */
+ if (target_nodes->isEmpty()) {
+ int index = id_to_int(other);
+ thrd_action_counters[index] = 0;
+ waiting_for.remove(other);
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Stop waiting for the thread */
+void WaitObj::remove_waiting_for(thread_id_t other)
+{
+ waiting_for.remove(other);
+
+ // TODO: clear dist_map or not?
+ /* dist_map_t * dist_map = getDistMap(other);
+ dist_map->reset(); */
+
+ node_set_t * target_nodes = getTargetNodes(other);
+ target_nodes->reset();
+
+ int index = id_to_int(other);
+ thrd_action_counters[index] = 0;
+}
+
+void WaitObj::remove_waited_by(thread_id_t other)
+{
+ waited_by.remove(other);
+}
+
+int WaitObj::lookup_dist(thread_id_t tid, FuncNode * target)
+{
+ dist_map_t * map = getDistMap(tid);
+ node_set_t * node_set = getTargetNodes(tid);
+
+ /* thrd_dist_maps is not reset when clear_waiting_for is called,
+ * so node_set should be checked */
+ if (node_set->contains(target) && map->contains(target))
+ return map->get(target);
+
+ return -1;
+}
+
+dist_map_t * WaitObj::getDistMap(thread_id_t tid)
+{
+ int thread_id = id_to_int(tid);
+ int old_size = thrd_dist_maps.size();
+
+ if (old_size <= thread_id) {
+ thrd_dist_maps.resize(thread_id + 1);
+ for (int i = old_size; i < thread_id + 1; i++) {
+ thrd_dist_maps[i] = new dist_map_t(16);
+ }
+ }
+
+ return thrd_dist_maps[thread_id];
+}
+
+node_set_t * WaitObj::getTargetNodes(thread_id_t tid)
+{
+ int thread_id = id_to_int(tid);
+ int old_size = thrd_target_nodes.size();
+
+ if (old_size <= thread_id) {
+ thrd_target_nodes.resize(thread_id + 1);
+ for (int i = old_size; i < thread_id + 1; i++) {
+ thrd_target_nodes[i] = new node_set_t(16);
+ }
+ }
+
+ return thrd_target_nodes[thread_id];
+}
+
+/**
+ * Increment action counter for thread tid
+ * @return true if the counter for tid expires
+ */
+bool WaitObj::incr_counter(thread_id_t tid)
+{
+ int thread_id = id_to_int(tid);
+
+ /* thrd_action_counters.resize does not work here */
+ while (thrd_action_counters.size() <= (uint) thread_id) {
+ thrd_action_counters.push_back(0);
+ }
+
+ thrd_action_counters[thread_id]++;
+ if (thrd_action_counters[thread_id] > COUNTER_THRESHOLD) {
+ thrd_action_counters[thread_id] = 0;
+ return true;
+ }
+
+ return false;
+}
+
+void WaitObj::clear_waiting_for()
+{
+ thrd_id_set_iter * iter = waiting_for.iterator();
+ while (iter->hasNext()) {
+ thread_id_t tid = iter->next();
+ int index = id_to_int(tid);
+ thrd_action_counters[index] = 0;
+
+ /* thrd_dist_maps are not reset because distances
+ * will be overwritten when node targets are added
+ * thrd_dist_maps[index]->reset(); */
+
+ node_set_t * target_nodes = getTargetNodes(tid);
+ target_nodes->reset();
+ }
+
+ waiting_for.reset();
+ /* waited_by relation should be kept */
+}
+
+void WaitObj::print_waiting_for(bool verbose)
+{
+ if (waiting_for.getSize() == 0)
+ return;
+
+ model_print("thread %d is waiting for: ", tid);
+ thrd_id_set_iter * it = waiting_for.iterator();
+
+ while (it->hasNext()) {
+ thread_id_t waiting_for_id = it->next();
+ model_print("%d ", waiting_for_id);
+ }
+ model_print("\n");
+
+ if (verbose) {
+ /* Print out the distances from each thread to target nodes */
+ model_print("\t");
+ for (uint i = 0; i < thrd_target_nodes.size(); i++) {
+ dist_map_t * dist_map = getDistMap(i);
+ node_set_t * node_set = getTargetNodes(i);
+ node_set_iter * node_iter = node_set->iterator();
+
+ if (!node_set->isEmpty()) {
+ model_print("[thread %d](", int_to_id(i));
+
+ while (node_iter->hasNext()){
+ FuncNode * node = node_iter->next();
+ int dist = dist_map->get(node);
+ model_print("node %d: %d, ", node->get_func_id(), dist);
+ }
+ model_print(") ");
+ }
+ }
+ model_print("\n");
+ }
+}
+
+void WaitObj::print_waited_by()
+{
+ if (waited_by.getSize() == 0)
+ return;
+
+ model_print("thread %d is waited by: ", tid);
+ thrd_id_set_iter * it = waited_by.iterator();
+
+ while (it->hasNext()) {
+ thread_id_t thread_id = it->next();
+ model_print("%d ", thread_id);
+ }
+ model_print("\n");
+}
--- /dev/null
+#ifndef __WAITOBJ_H__
+#define __WAITOBJ_H__
+
+#include "classlist.h"
+#include "modeltypes.h"
+
+typedef HashTable<FuncNode *, int, uintptr_t, 0> dist_map_t;
+typedef HashSet<FuncNode *, uintptr_t, 0> node_set_t;
+typedef HSIterator<FuncNode *, uintptr_t, 0> node_set_iter;
+
+class WaitObj {
+public:
+ WaitObj(thread_id_t);
+ ~WaitObj();
+
+ thread_id_t get_tid() { return tid; }
+
+ void add_waiting_for(thread_id_t other, FuncNode * node, int dist);
+ void add_waited_by(thread_id_t other);
+ bool remove_waiting_for_node(thread_id_t other, FuncNode * node);
+ void remove_waiting_for(thread_id_t other);
+ void remove_waited_by(thread_id_t other);
+
+ thrd_id_set_t * getWaitingFor() { return &waiting_for; }
+ thrd_id_set_t * getWaitedBy() { return &waited_by; }
+
+ node_set_t * getTargetNodes(thread_id_t tid);
+ int lookup_dist(thread_id_t tid, FuncNode * target);
+
+ bool incr_counter(thread_id_t tid);
+
+ void clear_waiting_for();
+
+ void print_waiting_for(bool verbose = false);
+ void print_waited_by();
+
+ SNAPSHOTALLOC
+private:
+ thread_id_t tid;
+
+ /* The set of threads this thread (tid) is waiting for */
+ thrd_id_set_t waiting_for;
+
+ /* The set of threads waiting for this thread */
+ thrd_id_set_t waited_by;
+
+ SnapVector<dist_map_t *> thrd_dist_maps;
+ SnapVector<node_set_t *> thrd_target_nodes;
+
+ /* Count the number of actions for threads that
+ * this thread is waiting for */
+ SnapVector<uint32_t> thrd_action_counters;
+
+ dist_map_t * getDistMap(thread_id_t tid);
+};
+
+#endif