sleeps.o history.o funcnode.o funcinst.o printf.o
CPPFLAGS += -Iinclude -I.
-LDFLAGS := -ldl -lrt -rdynamic
+LDFLAGS := -ldl -lrt -rdynamic -lpthread
SHARED := -shared
# Mac OSX options
ASSERT(loc || type == ATOMIC_FENCE || type == NOOP);
Thread *t = thread ? thread : thread_current();
- this->tid = t->get_id();
+ this->tid = t!= NULL ? t->get_id() : -1;
}
typedef SnapList<uint32_t> func_id_list_t;
typedef SnapList<FuncInst *> func_inst_list_t;
-extern volatile int forklock;
+extern volatile int modellock;
#endif
void cds_volatile_store ## size (void * obj, uint ## size ## _t val, const char * position) { \
ensureModel(); \
model->switch_to_master(new ModelAction(ATOMIC_WRITE, position, memory_order_relaxed, obj, (uint64_t) val)); \
- *((volatile uint ## size ## _t *)obj) = val; \
+ *((volatile uint ## size ## _t *)obj) = val; \
}
VOLATILESTORE(8)
/** Page size configuration */
#define PAGESIZE 4096
+#define TLS 1
+
/** Thread parameters */
/* Size of stack to allocate for a thread. */
addNodeEdge(fromnode, rmwnode, true);
}
+void CycleGraph::addEdges(SnapList<ModelAction *> * edgeset, const ModelAction *to) {
+ for(SnapList<ModelAction*>::iterator it = edgeset->begin();it!=edgeset->end();) {
+ ModelAction *act = *it;
+ CycleNode *node = getNode(act);
+ SnapList<ModelAction*>::iterator it2 = it;
+ it2++;
+ for(;it2!=edgeset->end(); ) {
+ ModelAction *act2 = *it2;
+ CycleNode *node2 = getNode(act2);
+ if (checkReachable(node, node2)) {
+ it = edgeset->erase(it);
+ goto endouterloop;
+ } else if (checkReachable(node2, node)) {
+ it2 = edgeset->erase(it2);
+ goto endinnerloop;
+ }
+ it2++;
+endinnerloop:
+ ;
+ }
+ it++;
+endouterloop:
+ ;
+ }
+ for(SnapList<ModelAction*>::iterator it = edgeset->begin();it!=edgeset->end();it++) {
+ ModelAction *from = *it;
+ addEdge(from, to, from->get_tid() == to->get_tid());
+ }
+}
+
/**
* @brief Adds an edge between objects
*
public:
CycleGraph();
~CycleGraph();
+ void addEdges(SnapList<ModelAction *> * edgeset, const ModelAction *to);
void addEdge(const ModelAction *from, const ModelAction *to);
void addEdge(const ModelAction *from, const ModelAction *to, bool forceedge);
void addRMWEdge(const ModelAction *from, const ModelAction *rmw);
continue;
/* Establish hypothetical release sequences */
- ClockVector *cv = get_hb_from_write(act);
- if (curr->get_cv()->merge(cv))
+ ClockVector *cv = get_hb_from_write(act->get_reads_from());
+ if (cv != NULL && curr->get_cv()->merge(cv))
updated = true;
}
}
if (act->happens_before(curr)) {
if (i==0) {
if (last_sc_fence_local == NULL ||
- (*last_sc_fence_local < *prev_same_thread)) {
+ (*last_sc_fence_local < *act)) {
prev_same_thread = act;
}
}
unsigned int i;
ASSERT(curr->is_write());
+ SnapList<ModelAction *> edgeset;
+
if (curr->is_seqcst()) {
/* We have to at least see the last sequentially consistent write,
so we are initialized. */
ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
if (last_seq_cst != NULL) {
- mo_graph->addEdge(last_seq_cst, curr);
+ edgeset.push_back(last_seq_cst);
}
+ //update map for next query
+ obj_last_sc_map.put(curr->get_location(), curr);
}
/* Last SC fence in the current thread */
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
action_list_t::reverse_iterator rit;
- bool force_edge = false;
for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
if (act == curr) {
* 3) If normal write, we need to look at earlier actions, so
* continue processing list.
*/
- force_edge = true;
if (curr->is_rmw()) {
if (curr->get_reads_from() != NULL)
break;
/* C++, Section 29.3 statement 7 */
if (last_sc_fence_thread_before && act->is_write() &&
*act < *last_sc_fence_thread_before) {
- mo_graph->addEdge(act, curr, force_edge);
+ edgeset.push_back(act);
break;
}
* readfrom(act) --mo--> act
*/
if (act->is_write())
- mo_graph->addEdge(act, curr, force_edge);
+ edgeset.push_back(act);
else if (act->is_read()) {
//if previous read accessed a null, just keep going
- mo_graph->addEdge(act->get_reads_from(), curr, force_edge);
+ edgeset.push_back(act->get_reads_from());
}
break;
}
}
}
+ mo_graph->addEdges(&edgeset, curr);
+
}
/**
void ModelExecution::add_write_to_lists(ModelAction *write) {
- // Update seq_cst map
- if (write->is_seqcst())
- obj_last_sc_map.put(write->get_location(), write);
-
SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
int tid = id_to_int(write->get_tid());
if (tid >= (int)vec->size())
uint32_t last_func_id = (*thrd_func_list)[id].back();
if (last_func_id == func_id) {
- /* clear read map upon exiting functions */
FuncNode * func_node = func_nodes[func_id];
func_node->clear_read_map(tid);
if ( old_size < new_size )
func_nodes.resize(new_size);
- for (uint32_t id = old_size; id < new_size; id++) {
+ for (uint32_t id = old_size;id < new_size;id++) {
const char * func_name = func_map_rev[id];
FuncNode * func_node = new FuncNode();
func_node->set_func_id(id);
if (inst == NULL)
return;
- if (inst->is_read())
- func_node->store_read(act, tid);
+ // if (inst->is_read())
+ // func_node->store_read(act, tid);
if (inst->is_write())
add_to_write_history(act->get_location(), act->get_write_value());
{
memset(&stats,0,sizeof(struct execution_stats));
init_thread = new Thread(execution->get_next_id(), (thrd_t *) model_malloc(sizeof(thrd_t)), &user_main_wrapper, NULL, NULL); // L: user_main_wrapper passes the user program
+#ifdef TLS
+ init_thread->setTLS((char *)get_tls_addr());
+#endif
execution->add_thread(init_thread);
scheduler->set_current_thread(init_thread);
execution->setParams(¶ms);
*/
uint64_t ModelChecker::switch_to_master(ModelAction *act)
{
- if (forklock) {
+ if (modellock) {
static bool fork_message_printed = false;
if (!fork_message_printed) {
- model_print("Fork handler trying to call into model checker...\n");
+ model_print("Fork handler or dead thread trying to call into model checker...\n");
fork_message_printed = true;
}
delete act;
void ModelChecker::startMainThread() {
init_thread->set_state(THREAD_RUNNING);
scheduler->set_current_thread(init_thread);
- thread_startup();
+ main_thread_startup();
}
static bool is_nonsc_write(const ModelAction *act) {
void set_inspect_plugin(TraceAnalysis *a) { inspect_plugin=a; }
void startMainThread();
void startChecker();
+ Thread * getInitThread() {return init_thread;}
MEMALLOC
private:
/** Flag indicates whether to restart the model checker. */
model = new ModelChecker();
model->startChecker();
}
-
+
ModelExecution *execution = model->get_execution();
cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
return m->try_lock();
model_snapshot_space = create_mspace(numheappages * PAGESIZE, 1);
}
-volatile int forklock = 0;
+volatile int modellock = 0;
static void fork_loop() {
/* switch back here when takesnapshot is called */
pid_t forkedID;
fork_snap->currSnapShotID = snapshotid + 1;
- forklock = 1;
+ modellock = 1;
forkedID = fork();
- forklock = 0;
+ modellock = 0;
if (0 == forkedID) {
setcontext(&fork_snap->shared_ctxt);
#include "stl-model.h"
#include "context.h"
#include "classlist.h"
+#include "pthread.h"
struct thread_params {
thrd_start_t func;
bool is_model_thread() const { return model_thread; }
friend void thread_startup();
+#ifdef TLS
+ friend void setup_context();
+ friend void * helper_thread(void *);
+ friend void finalize_helper_thread();
+#endif
/**
* Intentionally NOT allocated with MODELALLOC or SNAPSHOTALLOC.
void operator delete[](void *p, size_t size) {
Thread_free(p);
}
+#ifdef TLS
+ void setTLS(char *_tls) { tls = _tls;}
+#endif
private:
int create_context();
void *arg;
ucontext_t context;
void *stack;
+#ifdef TLS
+public:
+ char *tls;
+ ucontext_t helpercontext;
+ pthread_mutex_t mutex;
+ pthread_mutex_t mutex2;
+ pthread_t thread;
+private:
+#endif
thrd_t *user_thread;
thread_id_t id;
thread_state state;
const bool model_thread;
};
+#ifdef TLS
+uintptr_t get_tls_addr();
+#endif
+
Thread * thread_current();
void thread_startup();
+void main_thread_startup();
static inline thread_id_t thrd_to_id(thrd_t t)
{
/* global "model" object */
#include "model.h"
+#include "execution.h"
+
+#ifdef TLS
+#include <dlfcn.h>
+uintptr_t get_tls_addr() {
+ uintptr_t addr;
+ asm ("mov %%fs:0, %0" : "=r" (addr));
+ return addr;
+}
+
+#include <asm/prctl.h>
+#include <sys/prctl.h>
+extern "C" {
+int arch_prctl(int code, unsigned long addr);
+}
+static void set_tls_addr(uintptr_t addr) {
+ arch_prctl(ARCH_SET_FS, addr);
+ asm ("mov %0, %%fs:0" : : "r" (addr) : "memory");
+}
+#endif
/** Allocate a stack for a new thread. */
static void * stack_allocate(size_t size)
return model->get_current_thread();
}
+void main_thread_startup() {
+#ifdef TLS
+ Thread * curr_thread = thread_current();
+ /* Add dummy "start" action, just to create a first clock vector */
+ model->switch_to_master(new ModelAction(THREAD_START, std::memory_order_seq_cst, curr_thread));
+#endif
+ thread_startup();
+}
+
/**
* Provides a startup wrapper for each thread, allowing some initial
* model-checking data to be recorded. This method also gets around makecontext
void thread_startup()
{
Thread * curr_thread = thread_current();
-
+#ifndef TLS
/* Add dummy "start" action, just to create a first clock vector */
model->switch_to_master(new ModelAction(THREAD_START, std::memory_order_seq_cst, curr_thread));
+#endif
/* Call the actual thread function */
if (curr_thread->start_routine != NULL) {
model->switch_to_master(new ModelAction(THREAD_FINISH, std::memory_order_seq_cst, curr_thread));
}
+#ifdef TLS
+static int (*pthread_mutex_init_p)(pthread_mutex_t *__mutex, const pthread_mutexattr_t *__mutexattr) = NULL;
+
+int real_pthread_mutex_init(pthread_mutex_t *__mutex, const pthread_mutexattr_t *__mutexattr) {
+ return pthread_mutex_init_p(__mutex, __mutexattr);
+}
+
+static int (*pthread_mutex_lock_p) (pthread_mutex_t *__mutex) = NULL;
+
+int real_pthread_mutex_lock (pthread_mutex_t *__mutex) {
+ return pthread_mutex_lock_p(__mutex);
+}
+
+static int (*pthread_mutex_unlock_p) (pthread_mutex_t *__mutex) = NULL;
+
+int real_pthread_mutex_unlock (pthread_mutex_t *__mutex) {
+ return pthread_mutex_unlock_p(__mutex);
+}
+
+static int (*pthread_create_p) (pthread_t *__restrict, const pthread_attr_t *__restrict, void *(*)(void *), void * __restrict) = NULL;
+
+int real_pthread_create (pthread_t *__restrict __newthread, const pthread_attr_t *__restrict __attr, void *(*__start_routine)(void *), void *__restrict __arg) {
+ return pthread_create_p(__newthread, __attr, __start_routine, __arg);
+}
+
+static int (*pthread_join_p) (pthread_t __th, void ** __thread_return) = NULL;
+
+int real_pthread_join (pthread_t __th, void ** __thread_return) {
+ return pthread_join_p(__th, __thread_return);
+}
+
+void real_init_all() {
+ char * error;
+ if (!pthread_mutex_init_p) {
+ pthread_mutex_init_p = (int (*)(pthread_mutex_t *__mutex, const pthread_mutexattr_t *__mutexattr))dlsym(RTLD_NEXT, "pthread_mutex_init");
+ if ((error = dlerror()) != NULL) {
+ fputs(error, stderr);
+ exit(EXIT_FAILURE);
+ }
+ }
+ if (!pthread_mutex_lock_p) {
+ pthread_mutex_lock_p = (int (*)(pthread_mutex_t *__mutex))dlsym(RTLD_NEXT, "pthread_mutex_lock");
+ if ((error = dlerror()) != NULL) {
+ fputs(error, stderr);
+ exit(EXIT_FAILURE);
+ }
+ }
+ if (!pthread_mutex_unlock_p) {
+ pthread_mutex_unlock_p = (int (*)(pthread_mutex_t *__mutex))dlsym(RTLD_NEXT, "pthread_mutex_unlock");
+ if ((error = dlerror()) != NULL) {
+ fputs(error, stderr);
+ exit(EXIT_FAILURE);
+ }
+ }
+ if (!pthread_create_p) {
+ pthread_create_p = (int (*)(pthread_t *__restrict, const pthread_attr_t *__restrict, void *(*)(void *), void *__restrict))dlsym(RTLD_NEXT, "pthread_create");
+ if ((error = dlerror()) != NULL) {
+ fputs(error, stderr);
+ exit(EXIT_FAILURE);
+ }
+ }
+ if (!pthread_join_p) {
+ pthread_join_p = (int (*)(pthread_t __th, void ** __thread_return))dlsym(RTLD_NEXT, "pthread_join");
+ if ((error = dlerror()) != NULL) {
+ fputs(error, stderr);
+ exit(EXIT_FAILURE);
+ }
+ }
+}
+
+void finalize_helper_thread() {
+ Thread * curr_thread = thread_current();
+ real_pthread_mutex_lock(&curr_thread->mutex);
+ curr_thread->tls = (char *) get_tls_addr();
+ real_pthread_mutex_unlock(&curr_thread->mutex);
+ //Wait in the kernel until it is time for us to finish
+ real_pthread_mutex_lock(&curr_thread->mutex2);
+ real_pthread_mutex_unlock(&curr_thread->mutex2);
+ //return to helper thread function
+ setcontext(&curr_thread->context);
+}
+
+void * helper_thread(void * ptr) {
+ Thread * curr_thread = thread_current();
+
+ //build a context for this real thread so we can take it's context
+ int ret = getcontext(&curr_thread->helpercontext);
+ ASSERT(!ret);
+
+ /* Initialize new managed context */
+ void *helperstack = stack_allocate(STACK_SIZE);
+ curr_thread->helpercontext.uc_stack.ss_sp = helperstack;
+ curr_thread->helpercontext.uc_stack.ss_size = STACK_SIZE;
+ curr_thread->helpercontext.uc_stack.ss_flags = 0;
+ curr_thread->helpercontext.uc_link = model->get_system_context();
+ makecontext(&curr_thread->helpercontext, finalize_helper_thread, 0);
+
+ model_swapcontext(&curr_thread->context, &curr_thread->helpercontext);
+
+ //start the real thread
+ thread_startup();
+
+ //now the real thread has control again
+ stack_free(helperstack);
+
+ return NULL;
+}
+
+void setup_context() {
+ Thread * curr_thread = thread_current();
+
+ /* Add dummy "start" action, just to create a first clock vector */
+ model->switch_to_master(new ModelAction(THREAD_START, std::memory_order_seq_cst, curr_thread));
+
+ real_init_all();
+
+ /* Initialize our lock */
+ real_pthread_mutex_init(&curr_thread->mutex, NULL);
+ real_pthread_mutex_init(&curr_thread->mutex2, NULL);
+ real_pthread_mutex_lock(&curr_thread->mutex2);
+
+ /* Create the real thread */
+ real_pthread_create(&curr_thread->thread, NULL, helper_thread, NULL);
+ bool notdone = true;
+ while(notdone) {
+ real_pthread_mutex_lock(&curr_thread->mutex);
+ if (curr_thread->tls != NULL)
+ notdone = false;
+ real_pthread_mutex_unlock(&curr_thread->mutex);
+ }
+
+ set_tls_addr((uintptr_t)curr_thread->tls);
+ setcontext(&curr_thread->context);
+}
+#endif
+
/**
* Create a thread context for a new thread so we can use
* setcontext/getcontext/swapcontext to swap it out.
context.uc_stack.ss_size = STACK_SIZE;
context.uc_stack.ss_flags = 0;
context.uc_link = model->get_system_context();
+#ifdef TLS
+ if (model != NULL)
+ makecontext(&context, setup_context, 0);
+ else
+ makecontext(&context, main_thread_startup, 0);
+#else
makecontext(&context, thread_startup, 0);
+#endif
return 0;
}
int Thread::swap(Thread *t, ucontext_t *ctxt)
{
t->set_state(THREAD_READY);
+#ifdef TLS
+ set_tls_addr((uintptr_t)model->getInitThread()->tls);
+#endif
return model_swapcontext(&t->context, ctxt);
}
int Thread::swap(ucontext_t *ctxt, Thread *t)
{
t->set_state(THREAD_RUNNING);
+#ifdef TLS
+ if (t->tls != NULL)
+ set_tls_addr((uintptr_t)t->tls);
+#endif
return model_swapcontext(ctxt, &t->context);
}
state = THREAD_COMPLETED;
if (stack)
stack_free(stack);
+#ifdef TLS
+ if (this != model->getInitThread()) {
+ modellock = 1;
+ real_pthread_mutex_unlock(&mutex2);
+ real_pthread_join(thread, NULL);
+ modellock = 0;
+ }
+#endif
}
/**
start_routine(NULL),
arg(NULL),
stack(NULL),
+#ifdef TLS
+ tls(NULL),
+#endif
user_thread(NULL),
id(tid),
state(THREAD_READY), /* Thread is always ready? */
start_routine(func),
pstart_routine(NULL),
arg(a),
+#ifdef TLS
+ tls(NULL),
+#endif
user_thread(t),
id(tid),
state(THREAD_CREATED),
start_routine(NULL),
pstart_routine(func),
arg(a),
+#ifdef TLS
+ tls(NULL),
+#endif
user_thread(t),
id(tid),
state(THREAD_CREATED),