#define ACTION_INITIAL_CLOCK 0
-ModelAction::ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value) :
+/**
+ * @brief Construct a new ModelAction
+ *
+ * @param type The type of action
+ * @param order The memory order of this action. A "don't care" for non-ATOMIC
+ * actions (e.g., THREAD_* or MODEL_* actions).
+ * @param loc The location that this action acts upon
+ * @param value (optional) A value associated with the action (e.g., the value
+ * read or written). Defaults to a given macro constant, for debugging purposes.
+ * @param thread (optional) The Thread in which this action occurred. If NULL
+ * (default), then a Thread is assigned according to the scheduler.
+ */
+ModelAction::ModelAction(action_type_t type, memory_order order, void *loc,
+ uint64_t value, Thread *thread) :
type(type),
order(order),
location(loc),
seq_number(ACTION_INITIAL_CLOCK),
cv(NULL)
{
- Thread *t = thread_current();
+ Thread *t = thread ? thread : thread_current();
this->tid = t->get_id();
}
seq_number = num;
}
+bool ModelAction::is_relseq_fixup() const
+{
+ return type == MODEL_FIXUP_RELSEQ;
+}
+
bool ModelAction::is_mutex_op() const
{
return type == ATOMIC_LOCK || type == ATOMIC_TRYLOCK || type == ATOMIC_UNLOCK;
{
const char *type_str, *mo_str;
switch (this->type) {
+ case MODEL_FIXUP_RELSEQ:
+ type_str = "relseq fixup";
+ break;
case THREAD_CREATE:
type_str = "thread create";
break;
#include "modeltypes.h"
class ClockVector;
+class Thread;
using std::memory_order;
using std::memory_order_relaxed;
/** @brief Represents an action type, identifying one of several types of
* ModelAction */
typedef enum action_type {
+ MODEL_FIXUP_RELSEQ, /**< Special ModelAction: finalize a release
+ * sequence */
THREAD_CREATE, /**< A thread creation action */
THREAD_START, /**< First action in each thread */
THREAD_YIELD, /**< A thread yield action */
*/
class ModelAction {
public:
- ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value = VALUE_NONE);
+ ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value = VALUE_NONE, Thread *thread = NULL);
~ModelAction();
void print() const;
void copy_from_new(ModelAction *newaction);
void set_seq_number(modelclock_t num);
void set_try_lock(bool obtainedlock);
+ bool is_relseq_fixup() const;
bool is_mutex_op() const;
bool is_lock() const;
bool is_trylock() const;
#include "common.h"
#include "model.h"
+#include "stacktrace.h"
#define MAX_TRACE_LEN 100
+#define CONFIG_STACKTRACE
/** Print a backtrace of the current program state. */
void print_trace(void)
{
+#ifdef CONFIG_STACKTRACE
+ print_stacktrace(stdout);
+#else
void *array[MAX_TRACE_LEN];
char **strings;
int size, i;
printf("\t%s\n", strings[i]);
free(strings);
+#endif /* CONFIG_STACKTRACE */
}
void model_print_summary(void)
priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
/* First thread created will have id INITIAL_THREAD_ID */
priv->next_thread_id = INITIAL_THREAD_ID;
+
+ /* Initialize a model-checker thread, for special ModelActions */
+ model_thread = new Thread(get_next_id());
+ thread_map->put(id_to_int(model_thread->get_id()), model_thread);
}
/** @brief Destructor */
/* The next node will try to read from a different future value. */
tid = next->get_tid();
node_stack->pop_restofstack(2);
+ } else if (nextnode->increment_relseq_break()) {
+ /* The next node will try to resolve a release sequence differently */
+ tid = next->get_tid();
+ node_stack->pop_restofstack(2);
} else {
/* Make a different thread execute for next step */
scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
num_feasible_executions++;
}
- DEBUG("Number of acquires waiting on pending release sequences: %lu\n",
+ DEBUG("Number of acquires waiting on pending release sequences: %zu\n",
pending_rel_seqs->size());
if (isfinalfeasible() || DBG_ENABLED())
return updated;
}
+/**
+ * @brief Process the current action for release sequence fixup activity
+ *
+ * Performs model-checker release sequence fixups for the current action,
+ * forcing a single pending release sequence to break (with a given, potential
+ * "loose" write) or to complete (i.e., synchronize). If a pending release
+ * sequence forms a complete release sequence, then we must perform the fixup
+ * synchronization, mo_graph additions, etc.
+ *
+ * @param curr The current action; must be a release sequence fixup action
+ * @param work_queue The work queue to which to add work items as they are
+ * generated
+ */
+void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
+{
+ const ModelAction *write = curr->get_node()->get_relseq_break();
+ struct release_seq *sequence = pending_rel_seqs->back();
+ pending_rel_seqs->pop_back();
+ ASSERT(sequence);
+ ModelAction *acquire = sequence->acquire;
+ const ModelAction *rf = sequence->rf;
+ const ModelAction *release = sequence->release;
+ ASSERT(acquire);
+ ASSERT(release);
+ ASSERT(rf);
+ ASSERT(release->same_thread(rf));
+
+ if (write == NULL) {
+ /**
+ * @todo Forcing a synchronization requires that we set
+ * modification order constraints. For instance, we can't allow
+ * a fixup sequence in which two separate read-acquire
+ * operations read from the same sequence, where the first one
+ * synchronizes and the other doesn't. Essentially, we can't
+ * allow any writes to insert themselves between 'release' and
+ * 'rf'
+ */
+
+ /* Must synchronize */
+ if (!acquire->synchronize_with(release)) {
+ set_bad_synchronization();
+ return;
+ }
+ /* Re-check all pending release sequences */
+ work_queue->push_back(CheckRelSeqWorkEntry(NULL));
+ /* Re-check act for mo_graph edges */
+ work_queue->push_back(MOEdgeWorkEntry(acquire));
+
+ /* propagate synchronization to later actions */
+ action_list_t::reverse_iterator rit = action_trace->rbegin();
+ for (; (*rit) != acquire; rit++) {
+ ModelAction *propagate = *rit;
+ if (acquire->happens_before(propagate)) {
+ propagate->synchronize_with(acquire);
+ /* Re-check 'propagate' for mo_graph edges */
+ work_queue->push_back(MOEdgeWorkEntry(propagate));
+ }
+ }
+ } else {
+ /* Break release sequence with new edges:
+ * release --mo--> write --mo--> rf */
+ mo_graph->addEdge(release, write);
+ mo_graph->addEdge(write, rf);
+ }
+
+ /* See if we have realized a data race */
+ if (checkDataRaces())
+ set_assert();
+}
+
/**
* Initialize the current action by performing one or more of the following
* actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
*/
if (newcurr->is_write())
compute_promises(newcurr);
+ else if (newcurr->is_relseq_fixup())
+ compute_relseq_breakwrites(newcurr);
}
return newcurr;
}
if (act->is_mutex_op() && process_mutex(act))
update_all = true;
+ if (act->is_relseq_fixup())
+ process_relseq_fixup(curr, &work_queue);
+
if (update_all)
work_queue.push_back(CheckRelSeqWorkEntry(NULL));
else if (update)
if ((!parnode->backtrack_empty() ||
!currnode->read_from_empty() ||
!currnode->future_value_empty() ||
- !currnode->promise_empty())
+ !currnode->promise_empty() ||
+ !currnode->relseq_break_empty())
&& (!priv->next_backtrack ||
*curr > *priv->next_backtrack)) {
priv->next_backtrack = curr;
bool future_ordered = false;
ModelAction *last = get_last_action(int_to_id(i));
- if (last && (rf->happens_before(last) ||
- get_thread(int_to_id(i))->is_complete()))
+ Thread *th = get_thread(int_to_id(i));
+ if ((last && rf->happens_before(last)) ||
+ !scheduler->is_enabled(th) ||
+ th->is_complete())
future_ordered = true;
+ ASSERT(!th->is_model_thread() || future_ordered);
+
for (rit = list->rbegin(); rit != list->rend(); rit++) {
const ModelAction *act = *rit;
/* Reach synchronization -> this thread is complete */
}
}
+/**
+ * Compute the set of writes that may break the current pending release
+ * sequence. This information is extracted from previou release sequence
+ * calculations.
+ *
+ * @param curr The current ModelAction. Must be a release sequence fixup
+ * action.
+ */
+void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
+{
+ if (pending_rel_seqs->empty())
+ return;
+
+ struct release_seq *pending = pending_rel_seqs->back();
+ for (unsigned int i = 0; i < pending->writes.size(); i++) {
+ const ModelAction *write = pending->writes[i];
+ curr->get_node()->add_relseq_break(write);
+ }
+
+ /* NULL means don't break the sequence; just synchronize */
+ curr->get_node()->add_relseq_break(NULL);
+}
+
/**
* Build up an initial set of all past writes that this 'read' action may read
* from. This set is determined by the clock vector's "happens before"
if (has_asserted())
return false;
- Thread *curr = thread_current();
+ Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
if (curr) {
if (curr->get_state() == THREAD_READY) {
ASSERT(priv->current_action);
DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
next ? id_to_int(next->get_id()) : -1);
+ /*
+ * Launch end-of-execution release sequence fixups only when there are:
+ *
+ * (1) no more user threads to run (or when execution replay chooses
+ * the 'model_thread')
+ * (2) pending release sequences
+ * (3) pending assertions (i.e., data races)
+ * (4) no pending promises
+ */
+ if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
+ isfinalfeasible() && !unrealizedraces.empty()) {
+ printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
+ pending_rel_seqs->size());
+ ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
+ std::memory_order_seq_cst, NULL, VALUE_NONE,
+ model_thread);
+ set_current_action(fixup);
+ return true;
+ }
+
/* next == NULL -> don't take any more steps */
if (!next)
return false;
bool process_write(ModelAction *curr);
bool process_mutex(ModelAction *curr);
bool process_thread_action(ModelAction *curr);
+ void process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue);
bool check_action_enabled(ModelAction *curr);
bool take_step();
void reset_to_initial_state();
bool resolve_promises(ModelAction *curr);
void compute_promises(ModelAction *curr);
+ void compute_relseq_breakwrites(ModelAction *curr);
void check_curr_backtracking(ModelAction * curr);
void add_action_to_lists(ModelAction *act);
* together for efficiency and maintainability. */
struct model_snapshot_members *priv;
+ /** A special model-checker Thread; used for associating with
+ * model-checker-related ModelAcitons */
+ Thread *model_thread;
+
/**
* @brief The modification order graph
*
may_read_from(),
read_from_index(0),
future_values(),
- future_index(-1)
+ future_index(-1),
+ relseq_break_writes(),
+ relseq_break_index(0)
{
if (act) {
act->set_node(this);
return false;
}
+/**
+ * Add a write ModelAction to the set of writes that may break the release
+ * sequence. This is used during replay exploration of pending release
+ * sequences. This Node must correspond to a release sequence fixup action.
+ *
+ * @param write The write that may break the release sequence. NULL means we
+ * allow the release sequence to synchronize.
+ */
+void Node::add_relseq_break(const ModelAction *write)
+{
+ relseq_break_writes.push_back(write);
+}
+
+/**
+ * Get the write that may break the current pending release sequence,
+ * according to the replay / divergence pattern.
+ *
+ * @return A write that may break the release sequence. If NULL, that means
+ * the release sequence should not be broken.
+ */
+const ModelAction * Node::get_relseq_break()
+{
+ if (relseq_break_index < (int)relseq_break_writes.size())
+ return relseq_break_writes[relseq_break_index];
+ else
+ return NULL;
+}
+
+/**
+ * Increments the index into the relseq_break_writes set to explore the next
+ * item.
+ * @return Returns false if we have explored all values.
+ */
+bool Node::increment_relseq_break()
+{
+ DBG();
+ promises.clear();
+ if (relseq_break_index < ((int)relseq_break_writes.size())) {
+ relseq_break_index++;
+ return (relseq_break_index < ((int)relseq_break_writes.size()));
+ }
+ return false;
+}
+
+/**
+ * @return True if all writes that may break the release sequence have been
+ * explored
+ */
+bool Node::relseq_break_empty() {
+ return ((relseq_break_index + 1) >= ((int)relseq_break_writes.size()));
+}
+
void Node::explore(thread_id_t tid)
{
int i = id_to_int(tid);
bool promise_empty();
enabled_type_t *get_enabled_array() {return enabled_array;}
+ void add_relseq_break(const ModelAction *write);
+ const ModelAction * get_relseq_break();
+ bool increment_relseq_break();
+ bool relseq_break_empty();
+
void print();
void print_may_read_from();
std::vector< struct future_value, ModelAlloc<struct future_value> > future_values;
std::vector< promise_t, ModelAlloc<promise_t> > promises;
int future_index;
+
+ std::vector< const ModelAction *, ModelAlloc<const ModelAction *> > relseq_break_writes;
+ int relseq_break_index;
};
typedef std::vector< Node *, ModelAlloc< Node * > > node_list_t;
/** Constructor */
Scheduler::Scheduler() :
- is_enabled(NULL),
+ enabled(NULL),
enabled_len(0),
curr_thread_index(0),
current(NULL)
if (threadid>=enabled_len) {
enabled_type_t *new_enabled = (enabled_type_t *)snapshot_malloc(sizeof(enabled_type_t) * (threadid + 1));
memset(&new_enabled[enabled_len], 0, (threadid+1-enabled_len)*sizeof(enabled_type_t));
- if (is_enabled != NULL) {
- memcpy(new_enabled, is_enabled, enabled_len*sizeof(enabled_type_t));
- snapshot_free(is_enabled);
+ if (enabled != NULL) {
+ memcpy(new_enabled, enabled, enabled_len*sizeof(enabled_type_t));
+ snapshot_free(enabled);
}
- is_enabled=new_enabled;
+ enabled=new_enabled;
enabled_len=threadid+1;
}
- is_enabled[threadid]=enabled_status;
+ enabled[threadid]=enabled_status;
+}
+
+/**
+ * @brief Check if a Thread is currently enabled
+ * @param t The Thread to check
+ * @return True if the Thread is currently enabled
+ */
+bool Scheduler::is_enabled(Thread *t) const
+{
+ int id = id_to_int(t->get_id());
+ return (id >= enabled_len) ? false : (enabled[id] != THREAD_DISABLED);
}
enabled_type_t Scheduler::get_enabled(Thread *t) {
- return is_enabled[id_to_int(t->get_id())];
+ return enabled[id_to_int(t->get_id())];
}
void Scheduler::update_sleep_set(Node *n) {
enabled_type_t *enabled_array=n->get_enabled_array();
for(int i=0;i<enabled_len;i++) {
if (enabled_array[i]==THREAD_SLEEP_SET) {
- is_enabled[i]=THREAD_SLEEP_SET;
+ enabled[i]=THREAD_SLEEP_SET;
}
}
}
void Scheduler::add_thread(Thread *t)
{
DEBUG("thread %d\n", id_to_int(t->get_id()));
+ ASSERT(!t->is_model_thread());
set_enabled(t, THREAD_ENABLED);
}
*/
void Scheduler::wake(Thread *t)
{
+ ASSERT(!t->is_model_thread());
set_enabled(t, THREAD_DISABLED);
t->set_state(THREAD_READY);
}
int old_curr_thread = curr_thread_index;
while(true) {
curr_thread_index = (curr_thread_index+1) % enabled_len;
- if (is_enabled[curr_thread_index]==THREAD_ENABLED) {
+ if (enabled[curr_thread_index]==THREAD_ENABLED) {
t = model->get_thread(int_to_id(curr_thread_index));
break;
}
return NULL;
}
}
+ } else if (t->is_model_thread()) {
+ /* model-checker threads never run */
+ t = NULL;
} else {
curr_thread_index = id_to_int(t->get_id());
}
*/
Thread * Scheduler::get_current_thread() const
{
+ ASSERT(!current || !current->is_model_thread());
return current;
}
Thread * next_thread(Thread *t);
Thread * get_current_thread() const;
void print() const;
- enabled_type_t * get_enabled() { return is_enabled; };
+ enabled_type_t * get_enabled() { return enabled; };
void remove_sleep(Thread *t);
void add_sleep(Thread *t);
enabled_type_t get_enabled(Thread *t);
void update_sleep_set(Node *n);
+ bool is_enabled(Thread *t) const;
+
SNAPSHOTALLOC
private:
/** The list of available Threads that are not currently running */
- enabled_type_t * is_enabled;
+ enabled_type_t *enabled;
int enabled_len;
int curr_thread_index;
void set_enabled(Thread *t, enabled_type_t enabled_status);
--- /dev/null
+// stacktrace.h (c) 2008, Timo Bingmann from http://idlebox.net/
+// published under the WTFPL v2.0
+
+#ifndef __STACKTRACE_H__
+#define __STACKTRACE_H__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <execinfo.h>
+#include <cxxabi.h>
+
+/** Print a demangled stack backtrace of the caller function to FILE* out. */
+static inline void print_stacktrace(FILE *out = stderr, unsigned int max_frames = 63)
+{
+ fprintf(out, "stack trace:\n");
+
+ // storage array for stack trace address data
+ void* addrlist[max_frames+1];
+
+ // retrieve current stack addresses
+ int addrlen = backtrace(addrlist, sizeof(addrlist) / sizeof(void*));
+
+ if (addrlen == 0) {
+ fprintf(out, " <empty, possibly corrupt>\n");
+ return;
+ }
+
+ // resolve addresses into strings containing "filename(function+address)",
+ // this array must be free()-ed
+ char** symbollist = backtrace_symbols(addrlist, addrlen);
+
+ // allocate string which will be filled with the demangled function name
+ size_t funcnamesize = 256;
+ char* funcname = (char*)malloc(funcnamesize);
+
+ // iterate over the returned symbol lines. skip the first, it is the
+ // address of this function.
+ for (int i = 1; i < addrlen; i++) {
+ char *begin_name = 0, *begin_offset = 0, *end_offset = 0;
+
+ // find parentheses and +address offset surrounding the mangled name:
+ // ./module(function+0x15c) [0x8048a6d]
+ for (char *p = symbollist[i]; *p; ++p) {
+ if (*p == '(')
+ begin_name = p;
+ else if (*p == '+')
+ begin_offset = p;
+ else if (*p == ')' && begin_offset) {
+ end_offset = p;
+ break;
+ }
+ }
+
+ if (begin_name && begin_offset && end_offset && begin_name < begin_offset) {
+ *begin_name++ = '\0';
+ *begin_offset++ = '\0';
+ *end_offset = '\0';
+
+ // mangled name is now in [begin_name, begin_offset) and caller
+ // offset in [begin_offset, end_offset). now apply
+ // __cxa_demangle():
+
+ int status;
+ char* ret = abi::__cxa_demangle(begin_name,
+ funcname, &funcnamesize, &status);
+ if (status == 0) {
+ funcname = ret; // use possibly realloc()-ed string
+ fprintf(out, " %s : %s+%s\n",
+ symbollist[i], funcname, begin_offset);
+ } else {
+ // demangling failed. Output function name as a C function with
+ // no arguments.
+ fprintf(out, " %s : %s()+%s\n",
+ symbollist[i], begin_name, begin_offset);
+ }
+ } else {
+ // couldn't parse the line? print the whole line.
+ fprintf(out, " %s\n", symbollist[i]);
+ }
+ }
+
+ free(funcname);
+ free(symbollist);
+}
+
+#endif // __STACKTRACE_H__
--- /dev/null
+/*
+ * This test performs some relaxed, release, acquire opeations on a single
+ * atomic variable. It can give some rough idea of release sequence support but
+ * probably should be improved to give better information.
+ *
+ * This test tries to establish two release sequences, where we should always
+ * either establish both or establish neither. (Note that this is only true for
+ * a few executions of interest, where both load-acquire's read from the same
+ * write.)
+ */
+
+#include <stdio.h>
+
+#include "libthreads.h"
+#include "librace.h"
+#include "stdatomic.h"
+
+atomic_int x;
+int var = 0;
+
+static void a(void *obj)
+{
+ store_32(&var, 1);
+ atomic_store_explicit(&x, 1, memory_order_release);
+ atomic_store_explicit(&x, 42, memory_order_relaxed);
+}
+
+static void b(void *obj)
+{
+ int r = atomic_load_explicit(&x, memory_order_acquire);
+ printf("r = %u\n", r);
+ printf("load %d\n", load_32(&var));
+}
+
+static void c(void *obj)
+{
+ atomic_store_explicit(&x, 2, memory_order_relaxed);
+}
+
+void user_main()
+{
+ thrd_t t1, t2, t3, t4;
+
+ atomic_init(&x, 0);
+
+ printf("Thread %d: creating 4 threads\n", thrd_current());
+ thrd_create(&t1, (thrd_start_t)&a, NULL);
+ thrd_create(&t2, (thrd_start_t)&b, NULL);
+ thrd_create(&t3, (thrd_start_t)&b, NULL);
+ thrd_create(&t4, (thrd_start_t)&c, NULL);
+
+ thrd_join(t1);
+ thrd_join(t2);
+ thrd_join(t3);
+ thrd_join(t4);
+ printf("Thread %d is finished\n", thrd_current());
+}
#include "stdatomic.h"
atomic_int x;
+int var = 0;
static void a(void *obj)
{
+ store_32(&var, 1);
atomic_store_explicit(&x, *((int *)obj), memory_order_release);
atomic_store_explicit(&x, *((int *)obj) + 1, memory_order_relaxed);
}
{
int r = atomic_load_explicit(&x, memory_order_acquire);
printf("r = %u\n", r);
+ store_32(&var, 3);
}
static void b1(void *obj)
int i = 7;
int r = atomic_load_explicit(&x, memory_order_acquire);
printf("r = %u\n", r);
+ store_32(&var, 2);
thrd_create(&t3, (thrd_start_t)&a, &i);
thrd_create(&t4, (thrd_start_t)&b2, NULL);
thrd_join(t3);
#include "stdatomic.h"
atomic_int x;
+int var = 0;
static void a(void *obj)
{
+ store_32(&var, 1);
atomic_store_explicit(&x, 1, memory_order_release);
atomic_store_explicit(&x, 42, memory_order_relaxed);
}
{
int r = atomic_load_explicit(&x, memory_order_acquire);
printf("r = %u\n", r);
+ printf("load %d\n", load_32(&var));
}
static void c(void *obj)
* @brief Thread functions.
*/
+#include <string.h>
+
#include "libthreads.h"
#include "common.h"
#include "threads.h"
}
}
+/**
+ * @brief Construct a new model-checker Thread
+ *
+ * A model-checker Thread is used for accounting purposes only. It will never
+ * have its own stack, and it should never be inserted into the Scheduler.
+ *
+ * @param tid The thread ID to assign
+ */
+Thread::Thread(thread_id_t tid) :
+ parent(NULL),
+ creation(NULL),
+ pending(NULL),
+ start_routine(NULL),
+ arg(NULL),
+ stack(NULL),
+ user_thread(NULL),
+ id(tid),
+ state(THREAD_READY), /* Thread is always ready? */
+ wait_list(),
+ last_action_val(0),
+ model_thread(true)
+{
+ memset(&context, 0, sizeof(context));
+}
+
/**
* Construct a new thread.
* @param t The thread identifier of the newly created thread.
user_thread(t),
state(THREAD_CREATED),
wait_list(),
- last_action_val(VALUE_NONE)
+ last_action_val(VALUE_NONE),
+ model_thread(false)
{
int ret;
/** @brief A Thread is created for each user-space thread */
class Thread {
public:
+ Thread(thread_id_t tid);
Thread(thrd_t *t, void (*func)(void *), void *a);
~Thread();
void complete();
return ret;
}
+ bool is_model_thread() { return model_thread; }
+
friend void thread_startup();
SNAPSHOTALLOC
* @see Thread::get_return_value()
*/
uint64_t last_action_val;
+
+ /** @brief Is this Thread a special model-checker thread? */
+ const bool model_thread;
};
Thread * thread_current();