--- /dev/null
+indent_with_tabs = 2
+indent_cmt_with_tabs = True
+indent_columns = 2
+indent_class = True
+output_tab_size = 2
+nl_func_leave_one_liners = True
+sp_after_semi = Ignore
+sp_after_semi_for = Remove
+sp_before_semi_for = Ignore
+sp_special_semi = ignore
+sp_before_semi = ignore
+sp_before_semi_for_empty = ignore
+sp_after_semi_for_empty = ignore
+sp_before_tr_emb_cmt = force
+sp_num_before_tr_emb_cmt = 2
+indent_relative_single_line_comments = true
\ No newline at end of file
OBJECTS := libthreads.o schedule.o model.o threads.o librace.o action.o \
nodestack.o clockvector.o main.o snapshot-interface.o cyclegraph.o \
datarace.o impatomic.o cmodelint.o \
- snapshot.o malloc.o mymemory.o common.o mutex.o promise.o conditionvariable.o \
- context.o execution.o libannotate.o pthread.o futex.o
+ snapshot.o malloc.o mymemory.o common.o mutex.o conditionvariable.o \
+ context.o execution.o libannotate.o plugins.o pthread.o futex.o fuzzer.o
CPPFLAGS += -Iinclude -I.
LDFLAGS := -ldl -lrt -rdynamic
# A 1-inch margin PDF generated by 'pandoc'
%.pdf: %.md
pandoc -o $@ $< -V header-includes='\usepackage[margin=1in]{geometry}'
+
+tabbing:
+ uncrustify -c C.cfg --no-backup --replace *.cc
+ uncrustify -c C.cfg --no-backup --replace *.h
* (default), then a Thread is assigned according to the scheduler.
*/
ModelAction::ModelAction(action_type_t type, memory_order order, void *loc,
- uint64_t value, Thread *thread) :
+ uint64_t value, Thread *thread) :
+ location(loc),
+ reads_from(NULL),
+ last_fence_release(NULL),
+ node(NULL),
+ cv(NULL),
+ value(value),
type(type),
order(order),
original_order(order),
+ seq_number(ACTION_INITIAL_CLOCK)
+{
+ /* References to NULL atomic variables can end up here */
+ ASSERT(loc || type == ATOMIC_FENCE);
+
+ Thread *t = thread ? thread : thread_current();
+ this->tid = t->get_id();
+}
+
+
+/**
+ * @brief Construct a new ModelAction
+ *
+ * @param type The type of action
+ * @param order The memory order of this action. A "don't care" for non-ATOMIC
+ * actions (e.g., THREAD_* or MODEL_* actions).
+ * @param loc The location that this action acts upon
+ * @param value (optional) A value associated with the action (e.g., the value
+ * read or written). Defaults to a given macro constant, for debugging purposes.
+ * @param size (optional) The Thread in which this action occurred. If NULL
+ * (default), then a Thread is assigned according to the scheduler.
+ */
+ModelAction::ModelAction(action_type_t type, memory_order order, void *loc,
+ uint64_t value, int size) :
location(loc),
- value(value),
reads_from(NULL),
- reads_from_promise(NULL),
last_fence_release(NULL),
node(NULL),
- seq_number(ACTION_INITIAL_CLOCK),
cv(NULL),
- sleep_flag(false)
+ value(value),
+ type(type),
+ order(order),
+ original_order(order),
+ seq_number(ACTION_INITIAL_CLOCK)
{
/* References to NULL atomic variables can end up here */
- ASSERT(loc || type == ATOMIC_FENCE || type == MODEL_FIXUP_RELSEQ);
-
- Thread *t = thread ? thread : thread_current();
+ ASSERT(loc);
+ this->size = size;
+ Thread *t = thread_current();
this->tid = t->get_id();
}
*/
/*
- if (cv)
- delete cv; */
+ if (cv)
+ delete cv; */
+}
+
+int ModelAction::getSize() const {
+ return size;
}
void ModelAction::copy_from_new(ModelAction *newaction)
return type == THREAD_JOIN || type == PTHREAD_JOIN;
}
-bool ModelAction::is_relseq_fixup() const
-{
- return type == MODEL_FIXUP_RELSEQ;
-}
-
bool ModelAction::is_mutex_op() const
{
return type == ATOMIC_LOCK || type == ATOMIC_TRYLOCK || type == ATOMIC_UNLOCK || type == ATOMIC_WAIT || type == ATOMIC_NOTIFY_ONE || type == ATOMIC_NOTIFY_ALL;
bool ModelAction::is_read() const
{
- return type == ATOMIC_READ || type == ATOMIC_RMWR || type == ATOMIC_RMW;
+ return type == ATOMIC_READ || type == ATOMIC_RMWR || type == ATOMIC_RMWRCAS || type == ATOMIC_RMW;
}
bool ModelAction::is_write() const
bool ModelAction::is_rmwr() const
{
- return type == ATOMIC_RMWR;
+ return type == ATOMIC_RMWR || type == ATOMIC_RMWRCAS;
+}
+
+bool ModelAction::is_rmwrcas() const
+{
+ return type == ATOMIC_RMWRCAS;
}
bool ModelAction::is_rmw() const
ASSERT(is_read());
if (reads_from)
return reads_from->get_write_value();
- else if (reads_from_promise)
- return reads_from_promise->get_value();
- return VALUE_NONE; /* Only for new actions with no reads-from */
+ return VALUE_NONE; // Only for new actions with no reads-from
}
/**
ASSERT(act);
reads_from = act;
- reads_from_promise = NULL;
- if (act->is_uninitialized()) { // WL
+ if (act->is_uninitialized()) { // WL
uint64_t val = *((uint64_t *) location);
ModelAction * act_initialized = (ModelAction *)act;
act_initialized->set_value(val);
// disabled by WL, because LLVM IR is unable to detect atomic init
/* model->assert_bug("May read from uninitialized atomic:\n"
- " action %d, thread %d, location %p (%s, %s)",
- seq_number, id_to_int(tid), location,
- get_type_str(), get_mo_str());
-*/
- }
-}
-
-/**
- * Set this action's read-from promise
- * @param promise The promise to read from
+ " action %d, thread %d, location %p (%s, %s)",
+ seq_number, id_to_int(tid), location,
+ get_type_str(), get_mo_str());
*/
-void ModelAction::set_read_from_promise(Promise *promise)
-{
- ASSERT(is_read());
- reads_from_promise = promise;
- reads_from = NULL;
+ }
}
/**
const char * ModelAction::get_type_str() const
{
switch (this->type) {
- case MODEL_FIXUP_RELSEQ: return "relseq fixup";
- case THREAD_CREATE: return "thread create";
- case THREAD_START: return "thread start";
- case THREAD_YIELD: return "thread yield";
- case THREAD_JOIN: return "thread join";
- case THREAD_FINISH: return "thread finish";
-
- case PTHREAD_CREATE: return "pthread create";
- case PTHREAD_JOIN: return "pthread join";
-
- case ATOMIC_UNINIT: return "uninitialized";
- case ATOMIC_READ: return "atomic read";
- case ATOMIC_WRITE: return "atomic write";
- case ATOMIC_RMW: return "atomic rmw";
- case ATOMIC_FENCE: return "fence";
- case ATOMIC_RMWR: return "atomic rmwr";
- case ATOMIC_RMWC: return "atomic rmwc";
- case ATOMIC_INIT: return "init atomic";
- case ATOMIC_LOCK: return "lock";
- case ATOMIC_UNLOCK: return "unlock";
- case ATOMIC_TRYLOCK: return "trylock";
- case ATOMIC_WAIT: return "wait";
- case ATOMIC_NOTIFY_ONE: return "notify one";
- case ATOMIC_NOTIFY_ALL: return "notify all";
- case ATOMIC_ANNOTATION: return "annotation";
- default: return "unknown type";
+ case THREAD_CREATE: return "thread create";
+ case THREAD_START: return "thread start";
+ case THREAD_YIELD: return "thread yield";
+ case THREAD_JOIN: return "thread join";
+ case THREAD_FINISH: return "thread finish";
+
+ case PTHREAD_CREATE: return "pthread create";
+ case PTHREAD_JOIN: return "pthread join";
+
+ case ATOMIC_UNINIT: return "uninitialized";
+ case ATOMIC_READ: return "atomic read";
+ case ATOMIC_WRITE: return "atomic write";
+ case ATOMIC_RMW: return "atomic rmw";
+ case ATOMIC_FENCE: return "fence";
+ case ATOMIC_RMWR: return "atomic rmwr";
+ case ATOMIC_RMWRCAS: return "atomic rmwrcas";
+ case ATOMIC_RMWC: return "atomic rmwc";
+ case ATOMIC_INIT: return "init atomic";
+ case ATOMIC_LOCK: return "lock";
+ case ATOMIC_UNLOCK: return "unlock";
+ case ATOMIC_TRYLOCK: return "trylock";
+ case ATOMIC_WAIT: return "wait";
+ case ATOMIC_NOTIFY_ONE: return "notify one";
+ case ATOMIC_NOTIFY_ALL: return "notify all";
+ case ATOMIC_ANNOTATION: return "annotation";
+ default: return "unknown type";
};
}
const char * ModelAction::get_mo_str() const
{
switch (this->order) {
- case std::memory_order_relaxed: return "relaxed";
- case std::memory_order_acquire: return "acquire";
- case std::memory_order_release: return "release";
- case std::memory_order_acq_rel: return "acq_rel";
- case std::memory_order_seq_cst: return "seq_cst";
- default: return "unknown";
+ case std::memory_order_relaxed: return "relaxed";
+ case std::memory_order_acquire: return "acquire";
+ case std::memory_order_release: return "release";
+ case std::memory_order_acq_rel: return "acq_rel";
+ case std::memory_order_seq_cst: return "seq_cst";
+ default: return "unknown";
}
}
const char *type_str = get_type_str(), *mo_str = get_mo_str();
model_print("%-4d %-2d %-14s %7s %14p %-#18" PRIx64,
- seq_number, id_to_int(tid), type_str, mo_str, location, get_return_value());
+ seq_number, id_to_int(tid), type_str, mo_str, location, get_return_value());
if (is_read()) {
if (reads_from)
model_print(" %-3d", reads_from->get_seq_number());
- else if (reads_from_promise) {
- int idx = reads_from_promise->get_index();
- if (idx >= 0)
- model_print(" P%-2d", idx);
- else
- model_print(" P? ");
- } else
+ else
model_print(" ? ");
}
if (cv) {
hash ^= id_to_int(tid) << 6;
if (is_read()) {
- if (reads_from)
- hash ^= reads_from->get_seq_number();
- else if (reads_from_promise)
- hash ^= reads_from_promise->get_index();
- hash ^= get_reads_from_value();
+ if (reads_from)
+ hash ^= reads_from->get_seq_number();
+ hash ^= get_reads_from_value();
}
return hash;
}
-/**
- * @brief Checks the NodeStack to see if a ModelAction is in our may-read-from set
- * @param write The ModelAction to check for
- * @return True if the ModelAction is found; false otherwise
- */
-bool ModelAction::may_read_from(const ModelAction *write) const
-{
- for (int i = 0; i < node->get_read_from_past_size(); i++)
- if (node->get_read_from_past(i) == write)
- return true;
- return false;
-}
-
-/**
- * @brief Checks the NodeStack to see if a Promise is in our may-read-from set
- * @param promise The Promise to check for
- * @return True if the Promise is found; false otherwise
- */
-bool ModelAction::may_read_from(const Promise *promise) const
-{
- for (int i = 0; i < node->get_read_from_promise_size(); i++)
- if (node->get_read_from_promise(i) == promise)
- return true;
- return false;
-}
-
/**
* Only valid for LOCK, TRY_LOCK, UNLOCK, and WAIT operations.
* @return The mutex operated on by this action, if any; otherwise NULL
#include "mymemory.h"
#include "memoryorder.h"
#include "modeltypes.h"
-#include "pthread.h"
-
-/* Forward declarations */
-class ClockVector;
-class Thread;
-class Promise;
+#include "mypthread.h"
+#include "classlist.h"
namespace cdsc {
- class mutex;
+class mutex;
}
using std::memory_order;
/** @brief Represents an action type, identifying one of several types of
* ModelAction */
typedef enum action_type {
- MODEL_FIXUP_RELSEQ, /**< Special ModelAction: finalize a release
- * sequence */
- THREAD_CREATE, /**< A thread creation action */
- THREAD_START, /**< First action in each thread */
- THREAD_YIELD, /**< A thread yield action */
- THREAD_JOIN, /**< A thread join action */
- THREAD_FINISH, /**< A thread completion action */
- PTHREAD_CREATE, /**< A pthread creation action */
- PTHREAD_JOIN, /**< A pthread join action */
-
- ATOMIC_UNINIT, /**< Represents an uninitialized atomic */
- ATOMIC_READ, /**< An atomic read action */
- ATOMIC_WRITE, /**< An atomic write action */
- ATOMIC_RMWR, /**< The read part of an atomic RMW action */
- ATOMIC_RMW, /**< The write part of an atomic RMW action */
- ATOMIC_RMWC, /**< Convert an atomic RMW action into a READ */
- ATOMIC_INIT, /**< Initialization of an atomic object (e.g.,
- * atomic_init()) */
- ATOMIC_FENCE, /**< A fence action */
- ATOMIC_LOCK, /**< A lock action */
- ATOMIC_TRYLOCK, /**< A trylock action */
- ATOMIC_UNLOCK, /**< An unlock action */
- ATOMIC_NOTIFY_ONE, /**< A notify_one action */
- ATOMIC_NOTIFY_ALL, /**< A notify all action */
- ATOMIC_WAIT, /**< A wait action */
- ATOMIC_ANNOTATION, /**< An annotation action to pass information
- to a trace analysis */
+ THREAD_CREATE, // < A thread creation action
+ THREAD_START, // < First action in each thread
+ THREAD_YIELD, // < A thread yield action
+ THREAD_JOIN, // < A thread join action
+ THREAD_FINISH, // < A thread completion action
+ PTHREAD_CREATE, // < A pthread creation action
+ PTHREAD_JOIN, // < A pthread join action
+ ATOMIC_UNINIT, // < Represents an uninitialized atomic
+ ATOMIC_READ, // < An atomic read action
+ ATOMIC_WRITE, // < An atomic write action
+ ATOMIC_RMWR, // < The read part of an atomic RMW action
+ ATOMIC_RMWRCAS, // < The read part of an atomic RMW action
+ ATOMIC_RMW, // < The write part of an atomic RMW action
+ ATOMIC_RMWC, // < Convert an atomic RMW action into a READ
+ ATOMIC_INIT, // < Initialization of an atomic object (e.g., atomic_init())
+ ATOMIC_FENCE, // < A fence action
+ ATOMIC_LOCK, // < A lock action
+ ATOMIC_TRYLOCK, // < A trylock action
+ ATOMIC_UNLOCK, // < An unlock action
+ ATOMIC_NOTIFY_ONE, // < A notify_one action
+ ATOMIC_NOTIFY_ALL, // < A notify all action
+ ATOMIC_WAIT, // < A wait action
+ ATOMIC_ANNOTATION, // < An annotation action to pass information to a trace analysis
NOOP
} action_type_t;
-/* Forward declaration */
-class Node;
-class ClockVector;
/**
* @brief Represents a single atomic action
class ModelAction {
public:
ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value = VALUE_NONE, Thread *thread = NULL);
+ ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value, int size);
~ModelAction();
void print() const;
uint64_t get_write_value() const;
uint64_t get_return_value() const;
const ModelAction * get_reads_from() const { return reads_from; }
- Promise * get_reads_from_promise() const { return reads_from_promise; }
cdsc::mutex * get_mutex() const;
Node * get_node() const;
void set_node(Node *n) { node = n; }
void set_read_from(const ModelAction *act);
- void set_read_from_promise(Promise *promise);
/** Store the most recent fence-release from the same thread
* @param fence The fence-release that occured prior to this */
void set_try_lock(bool obtainedlock);
bool is_thread_start() const;
bool is_thread_join() const;
- bool is_relseq_fixup() const;
bool is_mutex_op() const;
bool is_lock() const;
bool is_trylock() const;
bool is_yield() const;
bool could_be_write() const;
bool is_rmwr() const;
+ bool is_rmwrcas() const;
bool is_rmwc() const;
bool is_rmw() const;
bool is_fence() const;
bool same_thread(const ModelAction *act) const;
bool is_conflicting_lock(const ModelAction *act) const;
bool could_synchronize_with(const ModelAction *act) const;
-
+ int getSize() const;
Thread * get_thread_operand() const;
-
void create_cv(const ModelAction *parent = NULL);
ClockVector * get_cv() const { return cv; }
bool synchronize_with(const ModelAction *act);
void process_rmw(ModelAction * act);
void copy_typeandorder(ModelAction * act);
-
- void set_sleep_flag() { sleep_flag=true; }
- bool get_sleep_flag() { return sleep_flag; }
unsigned int hash() const;
-
bool equals(const ModelAction *x) const { return this == x; }
- bool equals(const Promise *x) const { return false; }
-
- bool may_read_from(const ModelAction *write) const;
- bool may_read_from(const Promise *promise) const;
- MEMALLOC
-
void set_value(uint64_t val) { value = val; }
/* to accomodate pthread create and join */
Thread * thread_operand;
- void set_thread_operand(Thread *th) { thread_operand = th; }
+ void set_thread_operand(Thread *th) { thread_operand = th; }
+ MEMALLOC
private:
-
const char * get_type_str() const;
const char * get_mo_str() const;
- /** @brief Type of action (read, write, RMW, fence, thread create, etc.) */
- action_type type;
-
- /** @brief The memory order for this operation. */
- memory_order order;
-
- /** @brief The original memory order parameter for this operation. */
- memory_order original_order;
-
/** @brief A pointer to the memory location for this action. */
void *location;
- /** @brief The thread id that performed this action. */
- thread_id_t tid;
-
- /** @brief The value written (for write or RMW; undefined for read) */
- uint64_t value;
-
- /**
- * @brief The store that this action reads from
- *
- * Only valid for reads
- */
- const ModelAction *reads_from;
-
- /**
- * @brief The promise that this action reads from
- *
- * Only valid for reads
- */
- Promise *reads_from_promise;
+ union {
+ /**
+ * @brief The store that this action reads from
+ *
+ * Only valid for reads
+ */
+ const ModelAction *reads_from;
+ int size;
+ };
/** @brief The last fence release from the same thread */
const ModelAction *last_fence_release;
*/
Node *node;
- /**
- * @brief The sequence number of this action
- *
- * Except for ATOMIC_UNINIT actions, this number should be unique and
- * should represent the action's position in the execution order.
- */
- modelclock_t seq_number;
-
/**
* @brief The clock vector for this operation
*
*/
ClockVector *cv;
- bool sleep_flag;
+ /** @brief The value written (for write or RMW; undefined for read) */
+ uint64_t value;
+
+ /** @brief Type of action (read, write, RMW, fence, thread create, etc.) */
+ action_type type;
+
+ /** @brief The memory order for this operation. */
+ memory_order order;
+
+ /** @brief The original memory order parameter for this operation. */
+ memory_order original_order;
+
+ /** @brief The thread id that performed this action. */
+ thread_id_t tid;
+
+ /**
+ * @brief The sequence number of this action
+ *
+ * Except for ATOMIC_UNINIT actions, this number should be unique and
+ * should represent the action's position in the execution order.
+ */
+ modelclock_t seq_number;
};
-#endif /* __ACTION_H__ */
+#endif /* __ACTION_H__ */
msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
sprintf(msg, fmt, str);
}
- ~bug_message() { if (msg) snapshot_free(msg); }
+ ~bug_message() { if (msg) snapshot_free(msg);}
char *msg;
void print() { model_print("%s", msg); }
SNAPSHOTALLOC
};
-#endif /* __BUGMESSAGE_H__ */
+#endif /* __BUGMESSAGE_H__ */
--- /dev/null
+#ifndef CLASSLIST_H
+#define CLASSLIST_H
+#include "stl-model.h"
+
+class ClockVector;
+class CycleGraph;
+class CycleNode;
+class ModelAction;
+class ModelChecker;
+class ModelExecution;
+class Node;
+class NodeStack;
+class Scheduler;
+class Thread;
+class TraceAnalysis;
+class Fuzzer;
+
+struct model_snapshot_members;
+struct bug_message;
+typedef SnapList<ModelAction *> action_list_t;
+#endif
#include <stdlib.h>
#include "action.h"
+
#include "clockvector.h"
#include "common.h"
#include "threads-model.h"
+
/**
* Constructs a new ClockVector, given a parent ClockVector and a first
* ModelAction. This constructor can assign appropriate default settings if no
* same thread or the parent that created this thread)
* @param act is an action with which to update the ClockVector
*/
-ClockVector::ClockVector(ClockVector *parent, ModelAction *act)
+ClockVector::ClockVector(ClockVector *parent, const ModelAction *act)
{
ASSERT(act);
num_threads = int_to_id(act->get_tid()) + 1;
bool changed = false;
if (cv->num_threads > num_threads) {
clock = (modelclock_t *)snapshot_realloc(clock, cv->num_threads * sizeof(modelclock_t));
- for (int i = num_threads; i < cv->num_threads; i++)
+ for (int i = num_threads;i < cv->num_threads;i++)
clock[i] = 0;
num_threads = cv->num_threads;
}
/* Element-wise maximum */
- for (int i = 0; i < cv->num_threads; i++)
+ for (int i = 0;i < cv->num_threads;i++)
if (cv->clock[i] > clock[i]) {
clock[i] = cv->clock[i];
changed = true;
}
-
+
return changed;
}
{
int i;
model_print("(");
- for (i = 0; i < num_threads; i++)
+ for (i = 0;i < num_threads;i++)
model_print("%2u%s", clock[i], (i == num_threads - 1) ? ")\n" : ", ");
}
#include "mymemory.h"
#include "modeltypes.h"
-
-/* Forward declaration */
-class ModelAction;
+#include "classlist.h"
class ClockVector {
public:
- ClockVector(ClockVector *parent = NULL, ModelAction *act = NULL);
+ ClockVector(ClockVector *parent = NULL, const ModelAction *act = NULL);
~ClockVector();
bool merge(const ClockVector *cv);
bool synchronized_since(const ModelAction *act) const;
int num_threads;
};
-#endif /* __CLOCKVECTOR_H__ */
+#endif /* __CLOCKVECTOR_H__ */
return model->switch_to_master(new ModelAction(ATOMIC_RMWR, ord, obj));
}
+/**
+ * Performs the read part of a RMW CAS action. The next action must
+ * either be the write part of the RMW action or an explicit close out
+ * of the RMW action w/o a write.
+ */
+uint64_t model_rmwrcas_action(void *obj, memory_order ord, uint64_t oldval, int size) {
+ return model->switch_to_master(new ModelAction(ATOMIC_RMWRCAS, ord, obj, oldval, size));
+}
+
/** Performs the write part of a RMW action. */
void model_rmw_action(void *obj, memory_order ord, uint64_t val) {
model->switch_to_master(new ModelAction(ATOMIC_RMW, ord, obj, val));
}
// cds atomic loads
-uint8_t cds_atomic_load8(void * obj, int atomic_index) {
+uint8_t cds_atomic_load8(void * obj, int atomic_index) {
return (uint8_t) ( model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj)) );
}
uint16_t cds_atomic_load16(void * obj, int atomic_index) {
// cds atomic stores
void cds_atomic_store8(void * obj, int atomic_index, uint8_t val) {
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
+ model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
}
void cds_atomic_store16(void * obj, int atomic_index, uint16_t val) {
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
+ model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
}
void cds_atomic_store32(void * obj, int atomic_index, uint32_t val) {
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
+ model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
}
void cds_atomic_store64(void * obj, int atomic_index, uint64_t val) {
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, val));
+ model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, val));
}
/*
-#define _ATOMIC_RMW_(__op__, size, addr, atomic_index, val ) \
-({ \
- uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index); \
- uint##size##_t _copy = _old; \
- _copy __op__ ( uint##size##_t ) _val; \
- model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy); \
- return _old; \
-})*/
+ #define _ATOMIC_RMW_(__op__, size, addr, atomic_index, val ) \
+ ({ \
+ uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index); \
+ uint##size##_t _copy = _old; \
+ _copy __op__ ( uint##size##_t ) _val; \
+ model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy); \
+ return _old; \
+ })*/
#define _ATOMIC_RMW_(__op__, size, addr, atomic_index, val ) \
-({ \
- uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index); \
- uint##size##_t _copy = _old; \
- uint##size##_t _val = val; \
- _copy __op__ _val; \
- model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy); \
- return _old; \
-})
+ ({ \
+ uint ## size ## _t _old = model_rmwr_action_helper(addr, atomic_index); \
+ uint ## size ## _t _copy = _old; \
+ uint ## size ## _t _val = val; \
+ _copy __op__ _val; \
+ model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy); \
+ return _old; \
+ })
// cds atomic exchange
uint8_t cds_atomic_exchange8(void* addr, int atomic_index, uint8_t val) {
- _ATOMIC_RMW_( = , 8, addr, atomic_index, val);
+ _ATOMIC_RMW_( =, 8, addr, atomic_index, val);
}
uint16_t cds_atomic_exchange16(void* addr, int atomic_index, uint16_t val) {
- _ATOMIC_RMW_( = , 16, addr, atomic_index, val);
+ _ATOMIC_RMW_( =, 16, addr, atomic_index, val);
}
uint32_t cds_atomic_exchange32(void* addr, int atomic_index, uint32_t val) {
- _ATOMIC_RMW_( = , 32, addr, atomic_index, val);
+ _ATOMIC_RMW_( =, 32, addr, atomic_index, val);
}
uint64_t cds_atomic_exchange64(void* addr, int atomic_index, uint64_t val) {
- _ATOMIC_RMW_( = , 64, addr, atomic_index, val);
+ _ATOMIC_RMW_( =, 64, addr, atomic_index, val);
}
// cds atomic fetch add
uint8_t cds_atomic_fetch_add8(void* addr, int atomic_index, uint8_t val) {
- _ATOMIC_RMW_( += , 8, addr, atomic_index, val);
+ _ATOMIC_RMW_( +=, 8, addr, atomic_index, val);
}
uint16_t cds_atomic_fetch_add16(void* addr, int atomic_index, uint16_t val) {
- _ATOMIC_RMW_( += , 16, addr, atomic_index, val);
+ _ATOMIC_RMW_( +=, 16, addr, atomic_index, val);
}
uint32_t cds_atomic_fetch_add32(void* addr, int atomic_index, uint32_t val) {
- _ATOMIC_RMW_( += , 32, addr, atomic_index, val);
+ _ATOMIC_RMW_( +=, 32, addr, atomic_index, val);
}
uint64_t cds_atomic_fetch_add64(void* addr, int atomic_index, uint64_t val) {
- _ATOMIC_RMW_( += , 64, addr, atomic_index, val);
+ _ATOMIC_RMW_( +=, 64, addr, atomic_index, val);
}
// cds atomic fetch sub
uint8_t cds_atomic_fetch_sub8(void* addr, int atomic_index, uint8_t val) {
- _ATOMIC_RMW_( -= , 8, addr, atomic_index, val);
+ _ATOMIC_RMW_( -=, 8, addr, atomic_index, val);
}
uint16_t cds_atomic_fetch_sub16(void* addr, int atomic_index, uint16_t val) {
- _ATOMIC_RMW_( -= , 16, addr, atomic_index, val);
+ _ATOMIC_RMW_( -=, 16, addr, atomic_index, val);
}
uint32_t cds_atomic_fetch_sub32(void* addr, int atomic_index, uint32_t val) {
- _ATOMIC_RMW_( -= , 32, addr, atomic_index, val);
+ _ATOMIC_RMW_( -=, 32, addr, atomic_index, val);
}
uint64_t cds_atomic_fetch_sub64(void* addr, int atomic_index, uint64_t val) {
- _ATOMIC_RMW_( -= , 64, addr, atomic_index, val);
+ _ATOMIC_RMW_( -=, 64, addr, atomic_index, val);
}
// cds atomic fetch and
uint8_t cds_atomic_fetch_and8(void* addr, int atomic_index, uint8_t val) {
- _ATOMIC_RMW_( &= , 8, addr, atomic_index, val);
+ _ATOMIC_RMW_( &=, 8, addr, atomic_index, val);
}
uint16_t cds_atomic_fetch_and16(void* addr, int atomic_index, uint16_t val) {
- _ATOMIC_RMW_( &= , 16, addr, atomic_index, val);
+ _ATOMIC_RMW_( &=, 16, addr, atomic_index, val);
}
uint32_t cds_atomic_fetch_and32(void* addr, int atomic_index, uint32_t val) {
- _ATOMIC_RMW_( &= , 32, addr, atomic_index, val);
+ _ATOMIC_RMW_( &=, 32, addr, atomic_index, val);
}
uint64_t cds_atomic_fetch_and64(void* addr, int atomic_index, uint64_t val) {
- _ATOMIC_RMW_( &= , 64, addr, atomic_index, val);
+ _ATOMIC_RMW_( &=, 64, addr, atomic_index, val);
}
// cds atomic fetch or
uint8_t cds_atomic_fetch_or8(void* addr, int atomic_index, uint8_t val) {
- _ATOMIC_RMW_( |= , 8, addr, atomic_index, val);
+ _ATOMIC_RMW_( |=, 8, addr, atomic_index, val);
}
uint16_t cds_atomic_fetch_or16(void* addr, int atomic_index, uint16_t val) {
- _ATOMIC_RMW_( |= , 16, addr, atomic_index, val);
+ _ATOMIC_RMW_( |=, 16, addr, atomic_index, val);
}
uint32_t cds_atomic_fetch_or32(void* addr, int atomic_index, uint32_t val) {
- _ATOMIC_RMW_( |= , 32, addr, atomic_index, val);
+ _ATOMIC_RMW_( |=, 32, addr, atomic_index, val);
}
uint64_t cds_atomic_fetch_or64(void* addr, int atomic_index, uint64_t val) {
- _ATOMIC_RMW_( |= , 64, addr, atomic_index, val);
+ _ATOMIC_RMW_( |=, 64, addr, atomic_index, val);
}
// cds atomic fetch xor
uint8_t cds_atomic_fetch_xor8(void* addr, int atomic_index, uint8_t val) {
- _ATOMIC_RMW_( ^= , 8, addr, atomic_index, val);
+ _ATOMIC_RMW_( ^=, 8, addr, atomic_index, val);
}
uint16_t cds_atomic_fetch_xor16(void* addr, int atomic_index, uint16_t val) {
- _ATOMIC_RMW_( ^= , 16, addr, atomic_index, val);
+ _ATOMIC_RMW_( ^=, 16, addr, atomic_index, val);
}
uint32_t cds_atomic_fetch_xor32(void* addr, int atomic_index, uint32_t val) {
- _ATOMIC_RMW_( ^= , 32, addr, atomic_index, val);
+ _ATOMIC_RMW_( ^=, 32, addr, atomic_index, val);
}
uint64_t cds_atomic_fetch_xor64(void* addr, int atomic_index, uint64_t val) {
- _ATOMIC_RMW_( ^= , 64, addr, atomic_index, val);
+ _ATOMIC_RMW_( ^=, 64, addr, atomic_index, val);
}
// cds atomic compare and exchange
-// In order to accomodate the LLVM PASS, the return values are not true or false.
+// In order to accomodate the LLVM PASS, the return values are not true or false.
#define _ATOMIC_CMPSWP_WEAK_ _ATOMIC_CMPSWP_
#define _ATOMIC_CMPSWP_(size, addr, expected, desired, atomic_index) \
-({ \
- uint##size##_t _desired = desired; \
- uint##size##_t _expected = expected; \
- uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index); \
- if (_old == _expected ) { \
- model_rmw_action_helper(addr, atomic_index, (uint64_t) _desired ); return _expected; } \
- else { \
- model_rmwc_action_helper(addr, atomic_index); _expected = _old; return _old; } \
-})
+ ({ \
+ uint ## size ## _t _desired = desired; \
+ uint ## size ## _t _expected = expected; \
+ uint ## size ## _t _old = model_rmwr_action_helper(addr, atomic_index); \
+ if (_old == _expected ) { \
+ model_rmw_action_helper(addr, atomic_index, (uint64_t) _desired ); return _expected; } \
+ else { \
+ model_rmwc_action_helper(addr, atomic_index); _expected = _old; return _old; } \
+ })
// expected is supposed to be a pointer to an address, but the CmpOperand
-// extracted from LLVM IR is an integer type.
+// extracted from LLVM IR is an integer type.
-uint8_t cds_atomic_compare_exchange8(void* addr, uint8_t expected,
- uint8_t desired, int atomic_index_succ, int atomic_index_fail ) {
- _ATOMIC_CMPSWP_(8, addr, expected, desired, atomic_index_succ );
+uint8_t cds_atomic_compare_exchange8(void* addr, uint8_t expected,
+ uint8_t desired, int atomic_index_succ, int atomic_index_fail ) {
+ _ATOMIC_CMPSWP_(8, addr, expected, desired, atomic_index_succ );
}
uint16_t cds_atomic_compare_exchange16(void* addr, uint16_t expected,
- uint16_t desired, int atomic_index_succ, int atomic_index_fail ) {
- _ATOMIC_CMPSWP_(16, addr, expected, desired, atomic_index_succ );
+ uint16_t desired, int atomic_index_succ, int atomic_index_fail ) {
+ _ATOMIC_CMPSWP_(16, addr, expected, desired, atomic_index_succ );
}
-uint32_t cds_atomic_compare_exchange32(void* addr, uint32_t expected,
- uint32_t desired, int atomic_index_succ, int atomic_index_fail ) {
- _ATOMIC_CMPSWP_(32, addr, expected, desired, atomic_index_succ );
+uint32_t cds_atomic_compare_exchange32(void* addr, uint32_t expected,
+ uint32_t desired, int atomic_index_succ, int atomic_index_fail ) {
+ _ATOMIC_CMPSWP_(32, addr, expected, desired, atomic_index_succ );
}
-uint64_t cds_atomic_compare_exchange64(void* addr, uint64_t expected,
- uint64_t desired, int atomic_index_succ, int atomic_index_fail ) {
- _ATOMIC_CMPSWP_(64, addr, expected, desired, atomic_index_succ );
+uint64_t cds_atomic_compare_exchange64(void* addr, uint64_t expected,
+ uint64_t desired, int atomic_index_succ, int atomic_index_fail ) {
+ _ATOMIC_CMPSWP_(64, addr, expected, desired, atomic_index_succ );
}
// cds atomic thread fence
}
/*
-#define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
+ #define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
__typeof__(__e__) __q__ = (__e__); \
__typeof__(__m__) __v__ = (__m__); \
else { model_rmwc_action((void *)__p__, __x__); *__q__ = __t__; __r__ = false;} \
__r__; })
-#define _ATOMIC_FENCE_( __x__ ) \
+ #define _ATOMIC_FENCE_( __x__ ) \
({ model_fence_action(__x__);})
-*/
+ */
/*
-#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
+ #define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
__typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
__typeof__(__m__) __v__ = (__m__); \
__copy__ __o__ __v__; \
model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); \
__old__ = __old__; Silence clang (-Wunused-value) \
- })
-*/
+ })
+ */
model_print("\nDumping stack trace (%d frames):\n", size);
- for (i = 0; i < size; i++)
+ for (i = 0;i < size;i++)
model_print("\t%s\n", strings[i]);
free(strings);
-#endif /* CONFIG_STACKTRACE */
+#endif /* CONFIG_STACKTRACE */
}
void assert_hook(void)
if (!expr) {
char msg[100];
sprintf(msg, "Program has hit assertion in file %s at line %d\n",
- file, line);
+ file, line);
model->assert_user_bug(msg);
}
}
#ifndef CONFIG_DEBUG
-static int fd_user_out; /**< @brief File descriptor from which to read user program output */
+static int fd_user_out; /**< @brief File descriptor from which to read user program output */
/**
* @brief Setup output redirecting
{
fflush(stdout);
char buf[200];
- while (read_to_buf(fd_user_out, buf, sizeof(buf)));
+ while (read_to_buf(fd_user_out, buf, sizeof(buf))) ;
}
/** @brief Print out any pending program output */
model_print("---- END PROGRAM OUTPUT ----\n");
}
-#endif /* ! CONFIG_DEBUG */
+#endif /* ! CONFIG_DEBUG */
extern int model_out;
extern int switch_alloc;
-#define model_print(fmt, ...) do { switch_alloc = 1; dprintf(model_out, fmt, ##__VA_ARGS__); switch_alloc = 0; } while (0)
+#define model_print(fmt, ...) do { switch_alloc = 1; dprintf(model_out, fmt, ## __VA_ARGS__); switch_alloc = 0; } while (0)
#ifdef CONFIG_DEBUG
-#define DEBUG(fmt, ...) do { model_print("*** %15s:%-4d %25s() *** " fmt, __FILE__, __LINE__, __func__, ##__VA_ARGS__); } while (0)
+#define DEBUG(fmt, ...) do { model_print("*** %15s:%-4d %25s() *** " fmt, __FILE__, __LINE__, __func__, ## __VA_ARGS__); } while (0)
#define DBG() DEBUG("\n")
#define DBG_ENABLED() (1)
#else
#ifdef CONFIG_ASSERT
#define ASSERT(expr) \
-do { \
- if (!(expr)) { \
- fprintf(stderr, "Error: assertion failed in %s at line %d\n", __FILE__, __LINE__); \
- /* print_trace(); // Trace printing may cause dynamic memory allocation */ \
- assert_hook(); \
- exit(EXIT_FAILURE); \
- } \
-} while (0)
+ do { \
+ if (!(expr)) { \
+ fprintf(stderr, "Error: assertion failed in %s at line %d\n", __FILE__, __LINE__); \
+ /* print_trace(); // Trace printing may cause dynamic memory allocation */ \
+ assert_hook(); \
+ exit(EXIT_FAILURE); \
+ } \
+ } while (0)
#else
#define ASSERT(expr) \
do { } while (0)
-#endif /* CONFIG_ASSERT */
+#endif /* CONFIG_ASSERT */
#define error_msg(...) fprintf(stderr, "Error: " __VA_ARGS__)
void print_trace(void);
-#endif /* __COMMON_H__ */
+#endif /* __COMMON_H__ */
namespace cdsc {
condition_variable::condition_variable() {
-
+
}
condition_variable::~condition_variable() {
-
+
}
void condition_variable::notify_one() {
/** Turn on debugging. */
/* #ifndef CONFIG_DEBUG
- #define CONFIG_DEBUG
- #endif
+ #define CONFIG_DEBUG
+ #endif
- #ifndef CONFIG_ASSERT
- #define CONFIG_ASSERT
- #endif
-*/
+ #ifndef CONFIG_ASSERT
+ #define CONFIG_ASSERT
+ #endif
+ */
/** Turn on support for dumping cyclegraphs as dot files at each
* printed summary.*/
#else
#define BIT48 0
#endif
-#endif /* BIT48 */
+#endif /* BIT48 */
/** Snapshotting configurables */
-/**
+/**
* If USE_MPROTECT_SNAPSHOT=2, then snapshot by tuned mmap() algorithm
* If USE_MPROTECT_SNAPSHOT=1, then snapshot by using mmap() and mprotect()
* If USE_MPROTECT_SNAPSHOT=0, then snapshot by using fork() */
return 0;
}
-#endif /* MAC */
+#endif /* MAC */
int model_swapcontext(ucontext_t *oucp, ucontext_t *ucp);
-#else /* !MAC */
+#else /* !MAC */
static inline int model_swapcontext(ucontext_t *oucp, ucontext_t *ucp)
-{
+{
return swapcontext(oucp, ucp);
}
-#endif /* !MAC */
+#endif /* !MAC */
-#endif /* __CONTEXT_H__ */
+#endif /* __CONTEXT_H__ */
#include "cyclegraph.h"
#include "action.h"
#include "common.h"
-#include "promise.h"
#include "threads-model.h"
+#include "clockvector.h"
/** Initializes a CycleGraph object. */
CycleGraph::CycleGraph() :
- discovered(new HashTable<const CycleNode *, const CycleNode *, uintptr_t, 4, model_malloc, model_calloc, model_free>(16)),
- queue(new ModelVector<const CycleNode *>()),
- hasCycles(false),
- oldCycles(false)
+ queue(new SnapVector<const CycleNode *>())
{
}
CycleGraph::~CycleGraph()
{
delete queue;
- delete discovered;
}
/**
#endif
}
-/**
- * Add a CycleNode to the graph, corresponding to a Promise
- * @param promise The Promise that should be added
- * @param node The CycleNode that corresponds to the Promise
- */
-void CycleGraph::putNode(const Promise *promise, CycleNode *node)
-{
- promiseToNode.put(promise, node);
-#if SUPPORT_MOD_ORDER_DUMP
- nodeList.push_back(node);
-#endif
-}
-
-/**
- * @brief Remove the Promise node from the graph
- * @param promise The promise to remove from the graph
- */
-void CycleGraph::erasePromiseNode(const Promise *promise)
-{
- promiseToNode.put(promise, NULL);
-#if SUPPORT_MOD_ORDER_DUMP
- /* Remove the promise node from nodeList */
- CycleNode *node = getNode_noCreate(promise);
- for (unsigned int i = 0; i < nodeList.size(); )
- if (nodeList[i] == node)
- nodeList.erase(nodeList.begin() + i);
- else
- i++;
-#endif
-}
-
/** @return The corresponding CycleNode, if exists; otherwise NULL */
CycleNode * CycleGraph::getNode_noCreate(const ModelAction *act) const
{
return actionToNode.get(act);
}
-/** @return The corresponding CycleNode, if exists; otherwise NULL */
-CycleNode * CycleGraph::getNode_noCreate(const Promise *promise) const
-{
- return promiseToNode.get(promise);
-}
-
/**
* @brief Returns the CycleNode corresponding to a given ModelAction
*
return node;
}
-/**
- * @brief Returns a CycleNode corresponding to a promise
- *
- * Gets (or creates, if none exist) a CycleNode corresponding to a promised
- * value.
- *
- * @param promise The Promise generated by a reader
- * @return The CycleNode corresponding to the Promise
- */
-CycleNode * CycleGraph::getNode(const Promise *promise)
-{
- CycleNode *node = getNode_noCreate(promise);
- if (node == NULL) {
- node = new CycleNode(promise);
- putNode(promise, node);
- }
- return node;
-}
-
-/**
- * Resolve/satisfy a Promise with a particular store ModelAction, taking care
- * of the CycleGraph cleanups, including merging any necessary CycleNodes.
- *
- * @param promise The Promise to resolve
- * @param writer The store that will resolve this Promise
- * @return false if the resolution results in a cycle (or fails in some other
- * way); true otherwise
- */
-bool CycleGraph::resolvePromise(const Promise *promise, ModelAction *writer)
-{
- CycleNode *promise_node = promiseToNode.get(promise);
- CycleNode *w_node = actionToNode.get(writer);
- ASSERT(promise_node);
-
- if (w_node)
- return mergeNodes(w_node, promise_node);
- /* No existing write-node; just convert the promise-node */
- promise_node->resolvePromise(writer);
- erasePromiseNode(promise_node->getPromise());
- putNode(writer, promise_node);
- return true;
-}
-
-/**
- * @brief Merge two CycleNodes that represent the same write
- *
- * Note that this operation cannot be rolled back.
- *
- * @param w_node The write ModelAction node with which to merge
- * @param p_node The Promise node to merge. Will be destroyed after this
- * function.
- *
- * @return false if the merge cannot succeed; true otherwise
- */
-bool CycleGraph::mergeNodes(CycleNode *w_node, CycleNode *p_node)
-{
- ASSERT(!w_node->is_promise());
- ASSERT(p_node->is_promise());
-
- const Promise *promise = p_node->getPromise();
- if (!promise->is_compatible(w_node->getAction()) ||
- !promise->same_value(w_node->getAction()))
- return false;
-
- /* Transfer the RMW */
- CycleNode *promise_rmw = p_node->getRMW();
- if (promise_rmw && promise_rmw != w_node->getRMW() && w_node->setRMW(promise_rmw))
- return false;
-
- /* Transfer back edges to w_node */
- while (p_node->getNumBackEdges() > 0) {
- CycleNode *back = p_node->removeBackEdge();
- if (back == w_node)
- continue;
- addNodeEdge(back, w_node);
- if (hasCycles)
- return false;
- }
-
- /* Transfer forward edges to w_node */
- while (p_node->getNumEdges() > 0) {
- CycleNode *forward = p_node->removeEdge();
- if (forward == w_node)
- continue;
- addNodeEdge(w_node, forward);
- if (hasCycles)
- return false;
- }
-
- erasePromiseNode(promise);
- /* Not deleting p_node, to maintain consistency if mergeNodes() fails */
-
- return !hasCycles;
-}
-
/**
* Adds an edge between two CycleNodes.
* @param fromnode The edge comes from this CycleNode
* @param tonode The edge points to this CycleNode
* @return True, if new edge(s) are added; otherwise false
*/
-bool CycleGraph::addNodeEdge(CycleNode *fromnode, CycleNode *tonode)
+void CycleGraph::addNodeEdge(CycleNode *fromnode, CycleNode *tonode, bool forceedge)
{
- if (fromnode->addEdge(tonode)) {
- rollbackvector.push_back(fromnode);
- if (!hasCycles)
- hasCycles = checkReachable(tonode, fromnode);
- } else
- return false; /* No new edge */
+ //quick check whether edge is redundant
+ if (checkReachable(fromnode, tonode) && !forceedge) {
+ return;
+ }
/*
- * If the fromnode has a rmwnode that is not the tonode, we should
- * follow its RMW chain to add an edge at the end, unless we encounter
- * tonode along the way
+ * If the fromnode has a rmwnode, we should
+ * follow its RMW chain to add an edge at the end.
*/
- CycleNode *rmwnode = fromnode->getRMW();
- if (rmwnode) {
- while (rmwnode != tonode && rmwnode->getRMW())
- rmwnode = rmwnode->getRMW();
-
- if (rmwnode != tonode) {
- if (rmwnode->addEdge(tonode)) {
- if (!hasCycles)
- hasCycles = checkReachable(tonode, rmwnode);
+ while (fromnode->getRMW()) {
+ CycleNode *nextnode = fromnode->getRMW();
+ if (nextnode == tonode)
+ break;
+ fromnode = nextnode;
+ }
- rollbackvector.push_back(rmwnode);
+ fromnode->addEdge(tonode); //Add edge to edgeSrcNode
+
+ /* Propagate clock vector changes */
+ if (tonode->cv->merge(fromnode->cv)) {
+ queue->push_back(fromnode);
+ while(!queue->empty()) {
+ const CycleNode *node = queue->back();
+ queue->pop_back();
+ unsigned int numedges = node->getNumEdges();
+ for(unsigned int i = 0;i < numedges;i++) {
+ CycleNode * enode = node->getEdge(i);
+ if (enode->cv->merge(node->cv))
+ queue->push_back(enode);
}
}
}
- return true;
}
/**
* @brief Add an edge between a write and the RMW which reads from it
*
* Handles special case of a RMW action, where the ModelAction rmw reads from
- * the ModelAction/Promise from. The key differences are:
+ * the ModelAction from. The key differences are:
* -# No write can occur in between the @a rmw and @a from actions.
* -# Only one RMW action can read from a given write.
*
- * @param from The edge comes from this ModelAction/Promise
+ * @param from The edge comes from this ModelAction
* @param rmw The edge points to this ModelAction; this action must read from
- * the ModelAction/Promise from
+ * the ModelAction from
*/
-template <typename T>
-void CycleGraph::addRMWEdge(const T *from, const ModelAction *rmw)
+void CycleGraph::addRMWEdge(const ModelAction *from, const ModelAction *rmw)
{
ASSERT(from);
ASSERT(rmw);
CycleNode *fromnode = getNode(from);
CycleNode *rmwnode = getNode(rmw);
-
/* We assume that this RMW has no RMW reading from it yet */
ASSERT(!rmwnode->getRMW());
- /* Two RMW actions cannot read from the same write. */
- if (fromnode->setRMW(rmwnode))
- hasCycles = true;
- else
- rmwrollbackvector.push_back(fromnode);
+ fromnode->setRMW(rmwnode);
/* Transfer all outgoing edges from the from node to the rmw node */
/* This process should not add a cycle because either:
* (2) the fromnode is the new node and therefore it should not
* have any outgoing edges.
*/
- for (unsigned int i = 0; i < fromnode->getNumEdges(); i++) {
+ for (unsigned int i = 0;i < fromnode->getNumEdges();i++) {
CycleNode *tonode = fromnode->getEdge(i);
if (tonode != rmwnode) {
- if (rmwnode->addEdge(tonode))
- rollbackvector.push_back(rmwnode);
+ rmwnode->addEdge(tonode);
}
}
-
- addNodeEdge(fromnode, rmwnode);
+ fromnode->edges.clear();
+
+ addNodeEdge(fromnode, rmwnode, true);
}
-/* Instantiate two forms of CycleGraph::addRMWEdge */
-template void CycleGraph::addRMWEdge(const ModelAction *from, const ModelAction *rmw);
-template void CycleGraph::addRMWEdge(const Promise *from, const ModelAction *rmw);
/**
* @brief Adds an edge between objects
* @param from The edge comes from this object, of type U
* @return True, if new edge(s) are added; otherwise false
*/
-template <typename T, typename U>
-bool CycleGraph::addEdge(const T *from, const U *to)
+
+void CycleGraph::addEdge(const ModelAction *from, const ModelAction *to)
+{
+ ASSERT(from);
+ ASSERT(to);
+
+ CycleNode *fromnode = getNode(from);
+ CycleNode *tonode = getNode(to);
+
+ addNodeEdge(fromnode, tonode, false);
+}
+
+void CycleGraph::addEdge(const ModelAction *from, const ModelAction *to, bool forceedge)
{
ASSERT(from);
ASSERT(to);
CycleNode *fromnode = getNode(from);
CycleNode *tonode = getNode(to);
- return addNodeEdge(fromnode, tonode);
+ addNodeEdge(fromnode, tonode, forceedge);
}
-/* Instantiate four forms of CycleGraph::addEdge */
-template bool CycleGraph::addEdge(const ModelAction *from, const ModelAction *to);
-template bool CycleGraph::addEdge(const ModelAction *from, const Promise *to);
-template bool CycleGraph::addEdge(const Promise *from, const ModelAction *to);
-template bool CycleGraph::addEdge(const Promise *from, const Promise *to);
#if SUPPORT_MOD_ORDER_DUMP
static void print_node(FILE *file, const CycleNode *node, int label)
{
- if (node->is_promise()) {
- const Promise *promise = node->getPromise();
- int idx = promise->get_index();
- fprintf(file, "P%u", idx);
- if (label) {
- int first = 1;
- fprintf(file, " [label=\"P%d, T", idx);
- for (unsigned int i = 0 ; i < promise->max_available_thread_idx(); i++)
- if (promise->thread_is_available(int_to_id(i))) {
- fprintf(file, "%s%u", first ? "": ",", i);
- first = 0;
- }
- fprintf(file, "\"]");
- }
- } else {
- const ModelAction *act = node->getAction();
- modelclock_t idx = act->get_seq_number();
- fprintf(file, "N%u", idx);
- if (label)
- fprintf(file, " [label=\"N%u, T%u\"]", idx, act->get_tid());
- }
+ const ModelAction *act = node->getAction();
+ modelclock_t idx = act->get_seq_number();
+ fprintf(file, "N%u", idx);
+ if (label)
+ fprintf(file, " [label=\"N%u, T%u\"]", idx, act->get_tid());
}
static void print_edge(FILE *file, const CycleNode *from, const CycleNode *to, const char *prop)
print_node(file, getNode(act), 1);
}
-template <typename T, typename U>
-void CycleGraph::dot_print_edge(FILE *file, const T *from, const U *to, const char *prop)
+void CycleGraph::dot_print_edge(FILE *file, const ModelAction *from, const ModelAction *to, const char *prop)
{
CycleNode *fromnode = getNode(from);
CycleNode *tonode = getNode(to);
print_edge(file, fromnode, tonode, prop);
}
-/* Instantiate two forms of CycleGraph::dot_print_edge */
-template void CycleGraph::dot_print_edge(FILE *file, const Promise *from, const ModelAction *to, const char *prop);
-template void CycleGraph::dot_print_edge(FILE *file, const ModelAction *from, const ModelAction *to, const char *prop);
void CycleGraph::dumpNodes(FILE *file) const
{
- for (unsigned int i = 0; i < nodeList.size(); i++) {
+ for (unsigned int i = 0;i < nodeList.size();i++) {
CycleNode *n = nodeList[i];
print_node(file, n, 1);
fprintf(file, ";\n");
if (n->getRMW())
print_edge(file, n, n->getRMW(), "style=dotted");
- for (unsigned int j = 0; j < n->getNumEdges(); j++)
+ for (unsigned int j = 0;j < n->getNumEdges();j++)
print_edge(file, n, n->getEdge(j), NULL);
}
}
*/
bool CycleGraph::checkReachable(const CycleNode *from, const CycleNode *to) const
{
- discovered->reset();
- queue->clear();
- queue->push_back(from);
- discovered->put(from, from);
- while (!queue->empty()) {
- const CycleNode *node = queue->back();
- queue->pop_back();
- if (node == to)
- return true;
- for (unsigned int i = 0; i < node->getNumEdges(); i++) {
- CycleNode *next = node->getEdge(i);
- if (!discovered->contains(next)) {
- discovered->put(next, next);
- queue->push_back(next);
- }
- }
- }
- return false;
+ return to->cv->synchronized_since(from->action);
}
/**
- * Checks whether one ModelAction/Promise can reach another ModelAction/Promise
- * @param from The ModelAction or Promise from which to begin exploration
- * @param to The ModelAction or Promise to reach
+ * Checks whether one ModelAction can reach another ModelAction
+ * @param from The ModelAction from which to begin exploration
+ * @param to The ModelAction to reach
* @return True, @a from can reach @a to; otherwise, false
*/
-template <typename T, typename U>
-bool CycleGraph::checkReachable(const T *from, const U *to) const
+bool CycleGraph::checkReachable(const ModelAction *from, const ModelAction *to) const
{
CycleNode *fromnode = getNode_noCreate(from);
CycleNode *tonode = getNode_noCreate(to);
return checkReachable(fromnode, tonode);
}
-/* Instantiate four forms of CycleGraph::checkReachable */
-template bool CycleGraph::checkReachable(const ModelAction *from,
- const ModelAction *to) const;
-template bool CycleGraph::checkReachable(const ModelAction *from,
- const Promise *to) const;
-template bool CycleGraph::checkReachable(const Promise *from,
- const ModelAction *to) const;
-template bool CycleGraph::checkReachable(const Promise *from,
- const Promise *to) const;
-
-/** @return True, if the promise has failed; false otherwise */
-bool CycleGraph::checkPromise(const ModelAction *fromact, Promise *promise) const
-{
- discovered->reset();
- queue->clear();
- CycleNode *from = actionToNode.get(fromact);
-
- queue->push_back(from);
- discovered->put(from, from);
- while (!queue->empty()) {
- const CycleNode *node = queue->back();
- queue->pop_back();
-
- if (node->getPromise() == promise)
- return true;
- if (!node->is_promise())
- promise->eliminate_thread(node->getAction()->get_tid());
-
- for (unsigned int i = 0; i < node->getNumEdges(); i++) {
- CycleNode *next = node->getEdge(i);
- if (!discovered->contains(next)) {
- discovered->put(next, next);
- queue->push_back(next);
- }
- }
- }
- return false;
-}
-
-/** @brief Begin a new sequence of graph additions which can be rolled back */
-void CycleGraph::startChanges()
-{
- ASSERT(rollbackvector.empty());
- ASSERT(rmwrollbackvector.empty());
- ASSERT(oldCycles == hasCycles);
-}
-
-/** Commit changes to the cyclegraph. */
-void CycleGraph::commitChanges()
-{
- rollbackvector.clear();
- rmwrollbackvector.clear();
- oldCycles = hasCycles;
-}
-
-/** Rollback changes to the previous commit. */
-void CycleGraph::rollbackChanges()
-{
- for (unsigned int i = 0; i < rollbackvector.size(); i++)
- rollbackvector[i]->removeEdge();
-
- for (unsigned int i = 0; i < rmwrollbackvector.size(); i++)
- rmwrollbackvector[i]->clearRMW();
-
- hasCycles = oldCycles;
- rollbackvector.clear();
- rmwrollbackvector.clear();
-}
-
-/** @returns whether a CycleGraph contains cycles. */
-bool CycleGraph::checkForCycles() const
-{
- return hasCycles;
-}
/**
* @brief Constructor for a CycleNode
*/
CycleNode::CycleNode(const ModelAction *act) :
action(act),
- promise(NULL),
- hasRMW(NULL)
-{
-}
-
-/**
- * @brief Constructor for a Promise CycleNode
- * @param promise The Promise which was generated
- */
-CycleNode::CycleNode(const Promise *promise) :
- action(NULL),
- promise(promise),
- hasRMW(NULL)
+ hasRMW(NULL),
+ cv(new ClockVector(NULL, act))
{
}
return edges.size();
}
-/**
- * @param i The index of the back edge to return
- * @returns The CycleNode back-edge indexed by i
- */
-CycleNode * CycleNode::getBackEdge(unsigned int i) const
-{
- return back_edges[i];
-}
-
-/** @returns The number of edges entering this CycleNode */
-unsigned int CycleNode::getNumBackEdges() const
-{
- return back_edges.size();
-}
-
-/**
- * @brief Remove an element from a vector
- * @param v The vector
- * @param n The element to remove
- * @return True if the element was found; false otherwise
- */
-template <typename T>
-static bool vector_remove_node(SnapVector<T>& v, const T n)
-{
- for (unsigned int i = 0; i < v.size(); i++) {
- if (v[i] == n) {
- v.erase(v.begin() + i);
- return true;
- }
- }
- return false;
-}
-
/**
* @brief Remove a (forward) edge from this CycleNode
* @return The CycleNode which was popped, if one exists; otherwise NULL
CycleNode *ret = edges.back();
edges.pop_back();
- vector_remove_node(ret->back_edges, this);
- return ret;
-}
-
-/**
- * @brief Remove a (back) edge from this CycleNode
- * @return The CycleNode which was popped, if one exists; otherwise NULL
- */
-CycleNode * CycleNode::removeBackEdge()
-{
- if (back_edges.empty())
- return NULL;
-
- CycleNode *ret = back_edges.back();
- back_edges.pop_back();
- vector_remove_node(ret->edges, this);
return ret;
}
* @param node The node to which we add a directed edge
* @return True if this edge is a new edge; false otherwise
*/
-bool CycleNode::addEdge(CycleNode *node)
+void CycleNode::addEdge(CycleNode *node)
{
- for (unsigned int i = 0; i < edges.size(); i++)
+ for (unsigned int i = 0;i < edges.size();i++)
if (edges[i] == node)
- return false;
+ return;
edges.push_back(node);
- node->back_edges.push_back(this);
- return true;
}
/** @returns the RMW CycleNode that reads from the current CycleNode */
hasRMW = node;
return false;
}
-
-/**
- * Convert a Promise CycleNode into a concrete-valued CycleNode. Should only be
- * used when there's no existing ModelAction CycleNode for this write.
- *
- * @param writer The ModelAction which wrote the future value represented by
- * this CycleNode
- */
-void CycleNode::resolvePromise(const ModelAction *writer)
-{
- ASSERT(is_promise());
- ASSERT(promise->is_compatible(writer));
- action = writer;
- promise = NULL;
- ASSERT(!is_promise());
-}
#include "config.h"
#include "mymemory.h"
#include "stl-model.h"
-
-class Promise;
-class CycleNode;
-class ModelAction;
+#include "classlist.h"
/** @brief A graph of Model Actions for tracking cycles. */
class CycleGraph {
- public:
+public:
CycleGraph();
~CycleGraph();
+ void addEdge(const ModelAction *from, const ModelAction *to);
+ void addEdge(const ModelAction *from, const ModelAction *to, bool forceedge);
+ void addRMWEdge(const ModelAction *from, const ModelAction *rmw);
+ bool checkReachable(const ModelAction *from, const ModelAction *to) const;
- template <typename T, typename U>
- bool addEdge(const T *from, const U *to);
-
- template <typename T>
- void addRMWEdge(const T *from, const ModelAction *rmw);
-
- bool checkForCycles() const;
- bool checkPromise(const ModelAction *from, Promise *p) const;
-
- template <typename T, typename U>
- bool checkReachable(const T *from, const U *to) const;
-
- void startChanges();
- void commitChanges();
- void rollbackChanges();
#if SUPPORT_MOD_ORDER_DUMP
void dumpNodes(FILE *file) const;
void dumpGraphToFile(const char *filename) const;
-
void dot_print_node(FILE *file, const ModelAction *act);
- template <typename T, typename U>
- void dot_print_edge(FILE *file, const T *from, const U *to, const char *prop);
+ void dot_print_edge(FILE *file, const ModelAction *from, const ModelAction *to, const char *prop);
#endif
- bool resolvePromise(const Promise *promise, ModelAction *writer);
-
+ CycleNode * getNode_noCreate(const ModelAction *act) const;
SNAPSHOTALLOC
- private:
- bool addNodeEdge(CycleNode *fromnode, CycleNode *tonode);
+private:
+ void addNodeEdge(CycleNode *fromnode, CycleNode *tonode, bool forceedge);
void putNode(const ModelAction *act, CycleNode *node);
- void putNode(const Promise *promise, CycleNode *node);
- void erasePromiseNode(const Promise *promise);
CycleNode * getNode(const ModelAction *act);
- CycleNode * getNode(const Promise *promise);
- CycleNode * getNode_noCreate(const ModelAction *act) const;
- CycleNode * getNode_noCreate(const Promise *promise) const;
- bool mergeNodes(CycleNode *node1, CycleNode *node2);
-
- HashTable<const CycleNode *, const CycleNode *, uintptr_t, 4, model_malloc, model_calloc, model_free> *discovered;
- ModelVector<const CycleNode *> * queue;
-
/** @brief A table for mapping ModelActions to CycleNodes */
HashTable<const ModelAction *, CycleNode *, uintptr_t, 4> actionToNode;
- /** @brief A table for mapping Promises to CycleNodes */
- HashTable<const Promise *, CycleNode *, uintptr_t, 4> promiseToNode;
+ SnapVector<const CycleNode *> * queue;
#if SUPPORT_MOD_ORDER_DUMP
SnapVector<CycleNode *> nodeList;
#endif
bool checkReachable(const CycleNode *from, const CycleNode *to) const;
-
- /** @brief A flag: true if this graph contains cycles */
- bool hasCycles;
- /** @brief The previous value of CycleGraph::hasCycles, for rollback */
- bool oldCycles;
-
- SnapVector<CycleNode *> rollbackvector;
- SnapVector<CycleNode *> rmwrollbackvector;
};
/**
- * @brief A node within a CycleGraph; corresponds either to one ModelAction or
- * to a promised future value
+ * @brief A node within a CycleGraph; corresponds either to one ModelAction
*/
class CycleNode {
- public:
+public:
CycleNode(const ModelAction *act);
- CycleNode(const Promise *promise);
- bool addEdge(CycleNode *node);
+ void addEdge(CycleNode *node);
CycleNode * getEdge(unsigned int i) const;
unsigned int getNumEdges() const;
- CycleNode * getBackEdge(unsigned int i) const;
- unsigned int getNumBackEdges() const;
CycleNode * removeEdge();
- CycleNode * removeBackEdge();
-
bool setRMW(CycleNode *);
CycleNode * getRMW() const;
void clearRMW() { hasRMW = NULL; }
const ModelAction * getAction() const { return action; }
- const Promise * getPromise() const { return promise; }
- bool is_promise() const { return !action; }
- void resolvePromise(const ModelAction *writer);
SNAPSHOTALLOC
- private:
+private:
/** @brief The ModelAction that this node represents */
const ModelAction *action;
- /** @brief The promise represented by this node; only valid when action
- * is NULL */
- const Promise *promise;
-
/** @brief The edges leading out from this node */
SnapVector<CycleNode *> edges;
- /** @brief The edges leading into this node */
- SnapVector<CycleNode *> back_edges;
-
/** Pointer to a RMW node that reads from this node, or NULL, if none
* exists */
CycleNode *hasRMW;
+
+ /** ClockVector for this Node. */
+ ClockVector *cv;
+ friend class CycleGraph;
};
-#endif /* __CYCLEGRAPH_H__ */
+#endif /* __CYCLEGRAPH_H__ */
* @return true if the current clock allows a race with the event at clock2/tid2
*/
static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
- modelclock_t clock2, thread_id_t tid2)
+ modelclock_t clock2, thread_id_t tid2)
{
return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
}
if (get_execution()->isfeasibleprefix()) {
bool race_asserted = false;
/* Prune the non-racing unrealized dataraces */
- for (unsigned i = 0; i < unrealizedraces->size(); i++) {
+ for (unsigned i = 0;i < unrealizedraces->size();i++) {
struct DataRace *race = (*unrealizedraces)[i];
if (clock_may_race(race->newaction->get_cv(), race->newaction->get_tid(), race->oldclock, race->oldthread)) {
assert_race(race);
void assert_race(struct DataRace *race)
{
model->assert_bug(
- "Data race detected @ address %p:\n"
- " Access 1: %5s in thread %2d @ clock %3u\n"
- " Access 2: %5s in thread %2d @ clock %3u",
- race->address,
- race->isoldwrite ? "write" : "read",
- id_to_int(race->oldthread),
- race->oldclock,
- race->isnewwrite ? "write" : "read",
- id_to_int(race->newaction->get_tid()),
- race->newaction->get_seq_number()
+ "Data race detected @ address %p:\n"
+ " Access 1: %5s in thread %2d @ clock %3u\n"
+ " Access 2: %5s in thread %2d @ clock %3u",
+ race->address,
+ race->isoldwrite ? "write" : "read",
+ id_to_int(race->oldthread),
+ race->oldclock,
+ race->isnewwrite ? "write" : "read",
+ id_to_int(race->newaction->get_tid()),
+ race->newaction->get_seq_number()
);
}
/* Check for datarace against last read. */
- for (int i = 0; i < record->numReads; i++) {
+ for (int i = 0;i < record->numReads;i++) {
modelclock_t readClock = record->readClock[i];
thread_id_t readThread = record->thread[i];
/* Note that readClock can't actuall be zero here, so it could be
- optimized. */
+ optimized. */
if (clock_may_race(currClock, thread, readClock, readThread)) {
/* We have a datarace */
int copytoindex = 0;
- for (int i = 0; i < record->numReads; i++) {
+ for (int i = 0;i < record->numReads;i++) {
modelclock_t readClock = record->readClock[i];
thread_id_t readThread = record->thread[i];
/* Note that is not really a datarace check as reads cannott
- actually race. It is just determining that this read subsumes
- another in the sense that either this read races or neither
- read races. Note that readClock can't actually be zero, so it
- could be optimized. */
+ actually race. It is just determining that this read subsumes
+ another in the sense that either this read races or neither
+ read races. Note that readClock can't actually be zero, so it
+ could be optimized. */
if (clock_may_race(currClock, thread, readClock, readThread)) {
/* Still need this read in vector */
#include "config.h"
#include <stdint.h>
#include "modeltypes.h"
-
-/* Forward declaration */
-class ModelAction;
+#include "classlist.h"
struct ShadowTable {
void * array[65536];
struct DataRace {
/* Clock and thread associated with first action. This won't change in
- response to synchronization. */
+ response to synchronization. */
thread_id_t oldthread;
modelclock_t oldclock;
bool isoldwrite;
/* Model action associated with second action. This could change as
- a result of synchronization. */
+ a result of synchronization. */
ModelAction *newaction;
/* Record whether this is a write, so we can tell the user. */
bool isnewwrite;
#define MAXREADVECTOR (READMASK-1)
#define MAXWRITEVECTOR (WRITEMASK-1)
-#endif /* __DATARACE_H__ */
+#endif /* __DATARACE_H__ */
#include "common.h"
#include "clockvector.h"
#include "cyclegraph.h"
-#include "promise.h"
#include "datarace.h"
#include "threads-model.h"
#include "bugmessage.h"
+#include "fuzzer.h"
-#define INITIAL_THREAD_ID 0
+#define INITIAL_THREAD_ID 0
/**
* Structure for holding small ModelChecker members that should be snapshotted
/* First thread created will have id INITIAL_THREAD_ID */
next_thread_id(INITIAL_THREAD_ID),
used_sequence_numbers(0),
- next_backtrack(NULL),
bugs(),
- failed_promise(false),
- hard_failed_promise(false),
- too_many_reads(false),
- no_valid_reads(false),
bad_synchronization(false),
- bad_sc_read(false),
asserted(false)
{ }
~model_snapshot_members() {
- for (unsigned int i = 0; i < bugs.size(); i++)
+ for (unsigned int i = 0;i < bugs.size();i++)
delete bugs[i];
bugs.clear();
}
unsigned int next_thread_id;
modelclock_t used_sequence_numbers;
- ModelAction *next_backtrack;
SnapVector<bug_message *> bugs;
- bool failed_promise;
- bool hard_failed_promise;
- bool too_many_reads;
- bool no_valid_reads;
/** @brief Incorrectly-ordered synchronization was made */
bool bad_synchronization;
- bool bad_sc_read;
bool asserted;
SNAPSHOTALLOC
};
/** @brief Constructor */
-ModelExecution::ModelExecution(ModelChecker *m,
- const struct model_params *params,
- Scheduler *scheduler,
- NodeStack *node_stack) :
+ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler, NodeStack *node_stack) :
model(m),
- params(params),
+ params(NULL),
scheduler(scheduler),
action_trace(),
- thread_map(2), /* We'll always need at least 2 threads */
+ thread_map(2), /* We'll always need at least 2 threads */
pthread_map(0),
pthread_counter(0),
obj_map(),
condvar_waiters_map(),
obj_thrd_map(),
mutex_map(),
- promises(),
- futurevalues(),
- pending_rel_seqs(),
thrd_last_action(1),
thrd_last_fence_release(),
node_stack(node_stack),
- priv(new struct model_snapshot_members()),
- mo_graph(new CycleGraph())
+ priv(new struct model_snapshot_members ()),
+ mo_graph(new CycleGraph()),
+ fuzzer(new Fuzzer())
{
/* Initialize a model-checker thread, for special ModelActions */
- model_thread = new Thread(get_next_id()); // L: Create model thread
- add_thread(model_thread); // L: Add model thread to scheduler
+ model_thread = new Thread(get_next_id());
+ add_thread(model_thread);
scheduler->register_engine(this);
node_stack->register_engine(this);
}
/** @brief Destructor */
ModelExecution::~ModelExecution()
{
- for (unsigned int i = 0; i < get_num_threads(); i++)
+ for (unsigned int i = 0;i < get_num_threads();i++)
delete get_thread(int_to_id(i));
- for (unsigned int i = 0; i < promises.size(); i++)
- delete promises[i];
-
delete mo_graph;
delete priv;
}
void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
{
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *thr = get_thread(int_to_id(i));
if (scheduler->is_sleep_set(thr)) {
if (should_wake_up(curr, thr))
priv->bad_synchronization = true;
}
-/** @brief Alert the model-checker that an incorrectly-ordered
- * synchronization was made */
-void ModelExecution::set_bad_sc_read()
-{
- priv->bad_sc_read = true;
-}
-
bool ModelExecution::assert_bug(const char *msg)
{
priv->bugs.push_back(new bug_message(msg));
bool ModelExecution::is_deadlocked() const
{
bool blocking_threads = false;
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
thread_id_t tid = int_to_id(i);
if (is_enabled(tid))
return false;
return blocking_threads;
}
-/**
- * @brief Check if we are yield-blocked
- *
- * A program can be "yield-blocked" if all threads are ready to execute a
- * yield.
- *
- * @return True if the program is yield-blocked; false otherwise
- */
-bool ModelExecution::is_yieldblocked() const
-{
- if (!params->yieldblock)
- return false;
-
- for (unsigned int i = 0; i < get_num_threads(); i++) {
- thread_id_t tid = int_to_id(i);
- Thread *t = get_thread(tid);
- if (t->get_pending() && t->get_pending()->is_yield())
- return true;
- }
- return false;
-}
-
/**
* Check if this is a complete execution. That is, have all thread completed
* execution (rather than exiting because sleep sets have forced a redundant
*/
bool ModelExecution::is_complete_execution() const
{
- if (is_yieldblocked())
- return false;
- for (unsigned int i = 0; i < get_num_threads(); i++)
+ for (unsigned int i = 0;i < get_num_threads();i++)
if (is_enabled(int_to_id(i)))
return false;
return true;
}
-/**
- * @brief Find the last fence-related backtracking conflict for a ModelAction
- *
- * This function performs the search for the most recent conflicting action
- * against which we should perform backtracking, as affected by fence
- * operations. This includes pairs of potentially-synchronizing actions which
- * occur due to fence-acquire or fence-release, and hence should be explored in
- * the opposite execution order.
- *
- * @param act The current action
- * @return The most recent action which conflicts with act due to fences
- */
-ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
-{
- /* Only perform release/acquire fence backtracking for stores */
- if (!act->is_write())
- return NULL;
-
- /* Find a fence-release (or, act is a release) */
- ModelAction *last_release;
- if (act->is_release())
- last_release = act;
- else
- last_release = get_last_fence_release(act->get_tid());
- if (!last_release)
- return NULL;
-
- /* Skip past the release */
- const action_list_t *list = &action_trace;
- action_list_t::const_reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++)
- if (*rit == last_release)
- break;
- ASSERT(rit != list->rend());
-
- /* Find a prior:
- * load-acquire
- * or
- * load --sb-> fence-acquire */
- ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
- ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
- bool found_acquire_fences = false;
- for ( ; rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (act->same_thread(prev))
- continue;
-
- int tid = id_to_int(prev->get_tid());
-
- if (prev->is_read() && act->same_var(prev)) {
- if (prev->is_acquire()) {
- /* Found most recent load-acquire, don't need
- * to search for more fences */
- if (!found_acquire_fences)
- return NULL;
- } else {
- prior_loads[tid] = prev;
- }
- }
- if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
- found_acquire_fences = true;
- acquire_fences[tid] = prev;
- }
- }
-
- ModelAction *latest_backtrack = NULL;
- for (unsigned int i = 0; i < acquire_fences.size(); i++)
- if (acquire_fences[i] && prior_loads[i])
- if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
- latest_backtrack = acquire_fences[i];
- return latest_backtrack;
-}
/**
- * @brief Find the last backtracking conflict for a ModelAction
- *
- * This function performs the search for the most recent conflicting action
- * against which we should perform backtracking. This primary includes pairs of
- * synchronizing actions which should be explored in the opposite execution
- * order.
- *
- * @param act The current action
- * @return The most recent action which conflicts with act
- */
-ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
-{
- switch (act->get_type()) {
- case ATOMIC_FENCE:
- /* Only seq-cst fences can (directly) cause backtracking */
- if (!act->is_seqcst())
- break;
- case ATOMIC_READ:
- case ATOMIC_WRITE:
- case ATOMIC_RMW: {
- ModelAction *ret = NULL;
-
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (prev == act)
- continue;
- if (prev->could_synchronize_with(act)) {
- ret = prev;
- break;
- }
- }
-
- ModelAction *ret2 = get_last_fence_conflict(act);
- if (!ret2)
- return ret;
- if (!ret)
- return ret2;
- if (*ret < *ret2)
- return ret2;
- return ret;
- }
- case ATOMIC_LOCK:
- case ATOMIC_TRYLOCK: {
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (act->is_conflicting_lock(prev))
- return prev;
- }
- break;
- }
- case ATOMIC_UNLOCK: {
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (!act->same_thread(prev) && prev->is_failed_trylock())
- return prev;
- }
- break;
- }
- case ATOMIC_WAIT: {
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (!act->same_thread(prev) && prev->is_failed_trylock())
- return prev;
- if (!act->same_thread(prev) && prev->is_notify())
- return prev;
- }
- break;
- }
-
- case ATOMIC_NOTIFY_ALL:
- case ATOMIC_NOTIFY_ONE: {
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (!act->same_thread(prev) && prev->is_wait())
- return prev;
- }
- break;
- }
- default:
- break;
- }
- return NULL;
-}
-
-/** This method finds backtracking points where we should try to
- * reorder the parameter ModelAction against.
- *
- * @param the ModelAction to find backtracking points for.
+ * Processes a read model action.
+ * @param curr is the read model action to process.
+ * @param rf_set is the set of model actions we can possibly read from
+ * @return True if processing this read updates the mo_graph.
*/
-void ModelExecution::set_backtracking(ModelAction *act)
+void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
{
- Thread *t = get_thread(act);
- ModelAction *prev = get_last_conflict(act);
- if (prev == NULL)
- return;
-
- Node *node = prev->get_node()->get_parent();
-
- /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
- int low_tid, high_tid;
- if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
- low_tid = id_to_int(act->get_tid());
- high_tid = low_tid + 1;
- } else {
- low_tid = 0;
- high_tid = get_num_threads();
- }
+ SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
+ while(true) {
- for (int i = low_tid; i < high_tid; i++) {
- thread_id_t tid = int_to_id(i);
+ int index = fuzzer->selectWrite(curr, rf_set);
+ const ModelAction *rf = (*rf_set)[index];
- /* Make sure this thread can be enabled here. */
- if (i >= node->get_num_threads())
- break;
- /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
- /* Don't backtrack into a point where the thread is disabled or sleeping. */
- if (node->enabled_status(tid) != THREAD_ENABLED)
- continue;
-
- /* Check if this has been explored already */
- if (node->has_been_explored(tid))
- continue;
-
- /* See if fairness allows */
- if (params->fairwindow != 0 && !node->has_priority(tid)) {
- bool unfair = false;
- for (int t = 0; t < node->get_num_threads(); t++) {
- thread_id_t tother = int_to_id(t);
- if (node->is_enabled(tother) && node->has_priority(tother)) {
- unfair = true;
- break;
- }
- }
- if (unfair)
- continue;
- }
-
- /* See if CHESS-like yield fairness allows */
- if (params->yieldon) {
- bool unfair = false;
- for (int t = 0; t < node->get_num_threads(); t++) {
- thread_id_t tother = int_to_id(t);
- if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
- unfair = true;
- break;
- }
- }
- if (unfair)
- continue;
- }
+ ASSERT(rf);
- /* Cache the latest backtracking point */
- set_latest_backtrack(prev);
-
- /* If this is a new backtracking point, mark the tree */
- if (!node->set_backtrack(tid))
- continue;
- DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
- id_to_int(prev->get_tid()),
- id_to_int(t->get_id()));
- if (DBG_ENABLED()) {
- prev->print();
- act->print();
- }
- }
-}
-
-/**
- * @brief Cache the a backtracking point as the "most recent", if eligible
- *
- * Note that this does not prepare the NodeStack for this backtracking
- * operation, it only caches the action on a per-execution basis
- *
- * @param act The operation at which we should explore a different next action
- * (i.e., backtracking point)
- * @return True, if this action is now the most recent backtracking point;
- * false otherwise
- */
-bool ModelExecution::set_latest_backtrack(ModelAction *act)
-{
- if (!priv->next_backtrack || *act > *priv->next_backtrack) {
- priv->next_backtrack = act;
- return true;
- }
- return false;
-}
-
-/**
- * Returns last backtracking point. The model checker will explore a different
- * path for this point in the next execution.
- * @return The ModelAction at which the next execution should diverge.
- */
-ModelAction * ModelExecution::get_next_backtrack()
-{
- ModelAction *next = priv->next_backtrack;
- priv->next_backtrack = NULL;
- return next;
-}
-
-/**
- * Processes a read model action.
- * @param curr is the read model action to process.
- * @return True if processing this read updates the mo_graph.
- */
-bool ModelExecution::process_read(ModelAction *curr)
-{
- Node *node = curr->get_node();
- while (true) {
- bool updated = false;
- switch (node->get_read_from_status()) {
- case READ_FROM_PAST: {
- const ModelAction *rf = node->get_read_from_past();
- ASSERT(rf);
-
- mo_graph->startChanges();
-
- ASSERT(!is_infeasible());
- if (!check_recency(curr, rf)) {
- if (node->increment_read_from()) {
- mo_graph->rollbackChanges();
- continue;
- } else {
- priv->too_many_reads = true;
- }
+ if (r_modification_order(curr, rf, priorset)) {
+ for(unsigned int i=0;i<priorset->size();i++) {
+ mo_graph->addEdge((*priorset)[i], rf);
}
-
- updated = r_modification_order(curr, rf);
read_from(curr, rf);
- mo_graph->commitChanges();
- mo_check_promises(curr, true);
- break;
- }
- case READ_FROM_PROMISE: {
- Promise *promise = curr->get_node()->get_read_from_promise();
- if (promise->add_reader(curr))
- priv->failed_promise = true;
- curr->set_read_from_promise(promise);
- mo_graph->startChanges();
- if (!check_recency(curr, promise))
- priv->too_many_reads = true;
- updated = r_modification_order(curr, promise);
- mo_graph->commitChanges();
- break;
- }
- case READ_FROM_FUTURE: {
- /* Read from future value */
- struct future_value fv = node->get_future_value();
- Promise *promise = new Promise(this, curr, fv);
- curr->set_read_from_promise(promise);
- promises.push_back(promise);
- mo_graph->startChanges();
- updated = r_modification_order(curr, promise);
- mo_graph->commitChanges();
- break;
- }
- default:
- ASSERT(false);
+ get_thread(curr)->set_return_value(curr->get_return_value());
+ delete priorset;
+ return;
}
- get_thread(curr)->set_return_value(curr->get_return_value());
- return updated;
+ priorset->clear();
+ (*rf_set)[index] = rf_set->back();
+ rf_set->pop_back();
}
}
}
get_thread(curr)->set_return_value(1);
}
- //otherwise fall into the lock case
+ //otherwise fall into the lock case
case ATOMIC_LOCK: {
if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
assert_bug("Lock access before initialization");
case ATOMIC_WAIT:
case ATOMIC_UNLOCK: {
/* wake up the other threads */
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *t = get_thread(int_to_id(i));
Thread *curr_thrd = get_thread(curr);
if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
state->locked = NULL;
if (!curr->is_wait())
- break; /* The rest is only for ATOMIC_WAIT */
+ break;/* The rest is only for ATOMIC_WAIT */
- /* Should we go to sleep? (simulate spurious failures) */
- if (curr->get_node()->get_misc() == 0) {
- get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
- /* disable us */
- scheduler->sleep(get_thread(curr));
- }
break;
}
case ATOMIC_NOTIFY_ALL: {
action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
//activate all the waiting threads
- for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
+ for (action_list_t::iterator rit = waiters->begin();rit != waiters->end();rit++) {
scheduler->wake(get_thread(*rit));
}
waiters->clear();
}
case ATOMIC_NOTIFY_ONE: {
action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
- int wakeupthread = curr->get_node()->get_misc();
- action_list_t::iterator it = waiters->begin();
-
- // WL
- if (it == waiters->end())
- break;
-
- advance(it, wakeupthread);
- scheduler->wake(get_thread(*it));
- waiters->erase(it);
+ Thread * thread = fuzzer->selectNotify(waiters);
+ scheduler->wake(thread);
break;
}
return false;
}
-/**
- * @brief Check if the current pending promises allow a future value to be sent
- *
- * It is unsafe to pass a future value back if there exists a pending promise Pr
- * such that:
- *
- * reader --exec-> Pr --exec-> writer
- *
- * If such Pr exists, we must save the pending future value until Pr is
- * resolved.
- *
- * @param writer The operation which sends the future value. Must be a write.
- * @param reader The operation which will observe the value. Must be a read.
- * @return True if the future value can be sent now; false if it must wait.
- */
-bool ModelExecution::promises_may_allow(const ModelAction *writer,
- const ModelAction *reader) const
-{
- for (int i = promises.size() - 1; i >= 0; i--) {
- ModelAction *pr = promises[i]->get_reader(0);
- //reader is after promise...doesn't cross any promise
- if (*reader > *pr)
- return true;
- //writer is after promise, reader before...bad...
- if (*writer > *pr)
- return false;
- }
- return true;
-}
-
-/**
- * @brief Add a future value to a reader
- *
- * This function performs a few additional checks to ensure that the future
- * value can be feasibly observed by the reader
- *
- * @param writer The operation whose value is sent. Must be a write.
- * @param reader The read operation which may read the future value. Must be a read.
- */
-void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
-{
- /* Do more ambitious checks now that mo is more complete */
- if (!mo_may_allow(writer, reader))
- return;
-
- Node *node = reader->get_node();
-
- /* Find an ancestor thread which exists at the time of the reader */
- Thread *write_thread = get_thread(writer);
- while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
- write_thread = write_thread->get_parent();
-
- struct future_value fv = {
- writer->get_write_value(),
- writer->get_seq_number() + params->maxfuturedelay,
- write_thread->get_id(),
- };
- if (node->add_future_value(fv))
- set_latest_backtrack(reader);
-}
-
/**
* Process a write ModelAction
* @param curr The ModelAction to process
- * @param work The work queue, for adding fixup work
* @return True if the mo_graph was updated or promises were resolved
*/
-bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
+void ModelExecution::process_write(ModelAction *curr)
{
- /* Readers to which we may send our future value */
- ModelVector<ModelAction *> send_fv;
-
- const ModelAction *earliest_promise_reader;
- bool updated_promises = false;
-
- bool updated_mod_order = w_modification_order(curr, &send_fv);
- Promise *promise = pop_promise_to_resolve(curr);
-
- if (promise) {
- earliest_promise_reader = promise->get_reader(0);
- updated_promises = resolve_promise(curr, promise, work);
- } else
- earliest_promise_reader = NULL;
-
- for (unsigned int i = 0; i < send_fv.size(); i++) {
- ModelAction *read = send_fv[i];
-
- /* Don't send future values to reads after the Promise we resolve */
- if (!earliest_promise_reader || *read < *earliest_promise_reader) {
- /* Check if future value can be sent immediately */
- if (promises_may_allow(curr, read)) {
- add_future_value(curr, read);
- } else {
- futurevalues.push_back(PendingFutureValue(curr, read));
- }
- }
- }
- /* Check the pending future values */
- for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
- struct PendingFutureValue pfv = futurevalues[i];
- if (promises_may_allow(pfv.writer, pfv.reader)) {
- add_future_value(pfv.writer, pfv.reader);
- futurevalues.erase(futurevalues.begin() + i);
- }
- }
+ w_modification_order(curr);
- mo_graph->commitChanges();
- mo_check_promises(curr, false);
get_thread(curr)->set_return_value(VALUE_NONE);
- return updated_mod_order || updated_promises;
}
/**
action_list_t *list = &action_trace;
action_list_t::reverse_iterator rit;
/* Find X : is_read(X) && X --sb-> curr */
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
if (act == curr)
continue;
/* Establish hypothetical release sequences */
rel_heads_list_t release_heads;
get_release_seq_heads(curr, act, &release_heads);
- for (unsigned int i = 0; i < release_heads.size(); i++)
+ for (unsigned int i = 0;i < release_heads.size();i++)
synchronize(release_heads[i], curr);
if (release_heads.size() != 0)
updated = true;
curr->set_thread_operand(th);
add_thread(th);
th->set_creation(curr);
- /* Promises can be satisfied by children */
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (promise->thread_is_available(curr->get_tid()))
- promise->add_thread(th->get_id());
- }
break;
}
case PTHREAD_CREATE: {
- (*(pthread_t *)curr->get_location()) = pthread_counter++;
+ (*(uint32_t *)curr->get_location()) = pthread_counter++;
struct pthread_params *params = (struct pthread_params *)curr->get_value();
Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
add_thread(th);
th->set_creation(curr);
- if ( pthread_map.size() < pthread_counter )
- pthread_map.resize( pthread_counter );
+ if ( pthread_map.size() < pthread_counter )
+ pthread_map.resize( pthread_counter );
pthread_map[ pthread_counter-1 ] = th;
- /* Promises can be satisfied by children */
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (promise->thread_is_available(curr->get_tid()))
- promise->add_thread(th->get_id());
- }
-
break;
}
case THREAD_JOIN: {
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- updated = true; /* trigger rel-seq checks */
+ updated = true; /* trigger rel-seq checks */
break;
}
case PTHREAD_JOIN: {
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- updated = true; /* trigger rel-seq checks */
- break; // WL: to be add (modified)
+ updated = true; /* trigger rel-seq checks */
+ break; // WL: to be add (modified)
}
case THREAD_FINISH: {
Thread *th = get_thread(curr);
/* Wake up any joining threads */
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *waiting = get_thread(int_to_id(i));
if (waiting->waiting_on() == th &&
waiting->get_pending()->is_thread_join())
scheduler->wake(waiting);
}
th->complete();
- /* Completed thread can't satisfy promises */
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (promise->thread_is_available(th->get_id()))
- if (promise->eliminate_thread(th->get_id()))
- priv->failed_promise = true;
- }
- updated = true; /* trigger rel-seq checks */
+ updated = true; /* trigger rel-seq checks */
break;
}
case THREAD_START: {
- check_promises(curr->get_tid(), NULL, curr->get_cv());
break;
}
default:
return updated;
}
-/**
- * @brief Process the current action for release sequence fixup activity
- *
- * Performs model-checker release sequence fixups for the current action,
- * forcing a single pending release sequence to break (with a given, potential
- * "loose" write) or to complete (i.e., synchronize). If a pending release
- * sequence forms a complete release sequence, then we must perform the fixup
- * synchronization, mo_graph additions, etc.
- *
- * @param curr The current action; must be a release sequence fixup action
- * @param work_queue The work queue to which to add work items as they are
- * generated
- */
-void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
-{
- const ModelAction *write = curr->get_node()->get_relseq_break();
- struct release_seq *sequence = pending_rel_seqs.back();
- pending_rel_seqs.pop_back();
- ASSERT(sequence);
- ModelAction *acquire = sequence->acquire;
- const ModelAction *rf = sequence->rf;
- const ModelAction *release = sequence->release;
- ASSERT(acquire);
- ASSERT(release);
- ASSERT(rf);
- ASSERT(release->same_thread(rf));
-
- if (write == NULL) {
- /**
- * @todo Forcing a synchronization requires that we set
- * modification order constraints. For instance, we can't allow
- * a fixup sequence in which two separate read-acquire
- * operations read from the same sequence, where the first one
- * synchronizes and the other doesn't. Essentially, we can't
- * allow any writes to insert themselves between 'release' and
- * 'rf'
- */
-
- /* Must synchronize */
- if (!synchronize(release, acquire))
- return;
-
- /* Propagate the changed clock vector */
- propagate_clockvector(acquire, work_queue);
- } else {
- /* Break release sequence with new edges:
- * release --mo--> write --mo--> rf */
- mo_graph->addEdge(release, write);
- mo_graph->addEdge(write, rf);
- }
-
- /* See if we have realized a data race */
- checkDataRaces();
-}
-
/**
* Initialize the current action by performing one or more of the following
* actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
newcurr = process_rmw(*curr);
delete *curr;
- if (newcurr->is_rmw())
- compute_promises(newcurr);
-
*curr = newcurr;
return false;
}
(*curr)->set_seq_number(get_next_seq_num());
- newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
+ newcurr = node_stack->explore_action(*curr);
if (newcurr) {
/* First restore type and order in case of RMW operation */
if ((*curr)->is_rmwr())
newcurr->create_cv(get_parent_action(newcurr->get_tid()));
*curr = newcurr;
- return false; /* Action was explored previously */
+ return false; /* Action was explored previously */
} else {
newcurr = *curr;
/* Assign most recent release fence */
newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
- /*
- * Perform one-time actions when pushing new ModelAction onto
- * NodeStack
- */
- if (newcurr->is_write())
- compute_promises(newcurr);
- else if (newcurr->is_relseq_fixup())
- compute_relseq_breakwrites(newcurr);
- else if (newcurr->is_wait())
- newcurr->get_node()->set_misc_max(2);
- else if (newcurr->is_notify_one()) {
- newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
- }
- return true; /* This was a new ModelAction */
+ return true; /* This was a new ModelAction */
}
}
rel_heads_list_t release_heads;
get_release_seq_heads(act, act, &release_heads);
int num_heads = release_heads.size();
- for (unsigned int i = 0; i < release_heads.size(); i++)
+ for (unsigned int i = 0;i < release_heads.size();i++)
if (!synchronize(release_heads[i], act))
num_heads--;
return num_heads > 0;
set_bad_synchronization();
return false;
}
- check_promises(first->get_tid(), second->get_cv(), first->get_cv());
return second->synchronize_with(first);
}
-/**
- * Check promises and eliminate potentially-satisfying threads when a thread is
- * blocked (e.g., join, lock). A thread which is waiting on another thread can
- * no longer satisfy a promise generated from that thread.
- *
- * @param blocker The thread on which a thread is waiting
- * @param waiting The waiting thread
- */
-void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
-{
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (!promise->thread_is_available(waiting->get_id()))
- continue;
- for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
- ModelAction *reader = promise->get_reader(j);
- if (reader->get_tid() != blocker->get_id())
- continue;
- if (promise->eliminate_thread(waiting->get_id())) {
- /* Promise has failed */
- priv->failed_promise = true;
- } else {
- /* Only eliminate the 'waiting' thread once */
- return;
- }
- }
- }
-}
-
/**
* @brief Check whether a model action is enabled.
*
} else if (curr->is_thread_join()) {
Thread *blocking = curr->get_thread_operand();
if (!blocking->is_complete()) {
- thread_blocking_check_promises(blocking, get_thread(curr));
return false;
}
- } else if (params->yieldblock && curr->is_yield()) {
- return false;
}
return true;
wake_up_sleeping_actions(curr);
- /* Compute fairness information for CHESS yield algorithm */
- if (params->yieldon) {
- curr->get_node()->update_yield(scheduler);
- }
-
/* Add the action to lists before any other model-checking tasks */
if (!second_part_of_rmw)
add_action_to_lists(curr);
+ SnapVector<ModelAction *> * rf_set = NULL;
/* Build may_read_from set for newly-created actions */
if (newly_explored && curr->is_read())
- build_may_read_from(curr);
-
- /* Initialize work_queue with the "current action" work */
- work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
- while (!work_queue.empty() && !has_asserted()) {
- WorkQueueEntry work = work_queue.front();
- work_queue.pop_front();
-
- switch (work.type) {
- case WORK_CHECK_CURR_ACTION: {
- ModelAction *act = work.action;
- bool update = false; /* update this location's release seq's */
- bool update_all = false; /* update all release seq's */
-
- if (process_thread_action(curr))
- update_all = true;
+ rf_set = build_may_read_from(curr);
- if (act->is_read() && !second_part_of_rmw && process_read(act))
- update = true;
+ process_thread_action(curr);
- if (act->is_write() && process_write(act, &work_queue))
- update = true;
+ if (curr->is_read() && !second_part_of_rmw) {
+ process_read(curr, rf_set);
+ delete rf_set;
+ } else {
+ ASSERT(rf_set == NULL);
+ }
- if (act->is_fence() && process_fence(act))
- update_all = true;
+ if (curr->is_write())
+ process_write(curr);
- if (act->is_mutex_op() && process_mutex(act))
- update_all = true;
+ if (curr->is_fence())
+ process_fence(curr);
- if (act->is_relseq_fixup())
- process_relseq_fixup(curr, &work_queue);
+ if (curr->is_mutex_op())
+ process_mutex(curr);
- if (update_all)
- work_queue.push_back(CheckRelSeqWorkEntry(NULL));
- else if (update)
- work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
- break;
- }
- case WORK_CHECK_RELEASE_SEQ:
- resolve_release_sequences(work.location, &work_queue);
- break;
- case WORK_CHECK_MO_EDGES: {
- /** @todo Complete verification of work_queue */
- ModelAction *act = work.action;
- bool updated = false;
-
- if (act->is_read()) {
- const ModelAction *rf = act->get_reads_from();
- const Promise *promise = act->get_reads_from_promise();
- if (rf) {
- if (r_modification_order(act, rf))
- updated = true;
- if (act->is_seqcst()) {
- ModelAction *last_sc_write = get_last_seq_cst_write(act);
- if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
- set_bad_sc_read();
- }
- }
- } else if (promise) {
- if (r_modification_order(act, promise))
- updated = true;
- }
- }
- if (act->is_write()) {
- if (w_modification_order(act, NULL))
- updated = true;
- }
- mo_graph->commitChanges();
-
- if (updated)
- work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
- break;
- }
- default:
- ASSERT(false);
- break;
- }
- }
-
- check_curr_backtracking(curr);
- set_backtracking(curr);
return curr;
}
-void ModelExecution::check_curr_backtracking(ModelAction *curr)
-{
- Node *currnode = curr->get_node();
- Node *parnode = currnode->get_parent();
-
- if ((parnode && !parnode->backtrack_empty()) ||
- !currnode->misc_empty() ||
- !currnode->read_from_empty() ||
- !currnode->promise_empty() ||
- !currnode->relseq_break_empty()) {
- set_latest_backtrack(curr);
- }
-}
-
-bool ModelExecution::promises_expired() const
-{
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (promise->get_expiration() < priv->used_sequence_numbers)
- return true;
- }
- return false;
-}
-
/**
* This is the strongest feasibility check available.
* @return whether the current trace (partial or complete) must be a prefix of
*/
bool ModelExecution::isfeasibleprefix() const
{
- return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
+ return !is_infeasible();
}
/**
{
char buf[100];
char *ptr = buf;
- if (mo_graph->checkForCycles())
- ptr += sprintf(ptr, "[mo cycle]");
- if (priv->failed_promise || priv->hard_failed_promise)
- ptr += sprintf(ptr, "[failed promise]");
- if (priv->too_many_reads)
- ptr += sprintf(ptr, "[too many reads]");
- if (priv->no_valid_reads)
- ptr += sprintf(ptr, "[no valid reads-from]");
if (priv->bad_synchronization)
ptr += sprintf(ptr, "[bad sw ordering]");
- if (priv->bad_sc_read)
- ptr += sprintf(ptr, "[bad sc read]");
- if (promises_expired())
- ptr += sprintf(ptr, "[promise expired]");
- if (promises.size() != 0)
- ptr += sprintf(ptr, "[unresolved promise]");
if (ptr != buf)
model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
}
-/**
- * Returns whether the current completed trace is feasible, except for pending
- * release sequences.
- */
-bool ModelExecution::is_feasible_prefix_ignore_relseq() const
-{
- return !is_infeasible() && promises.size() == 0 && ! priv->failed_promise;
-
-}
-
/**
* Check if the current partial trace is infeasible. Does not check any
* end-of-execution flags, which might rule out the execution. Thus, this is
*/
bool ModelExecution::is_infeasible() const
{
- return mo_graph->checkForCycles() ||
- priv->no_valid_reads ||
- priv->too_many_reads ||
- priv->bad_synchronization ||
- priv->bad_sc_read ||
- priv->hard_failed_promise ||
- promises_expired();
+ return priv->bad_synchronization;
}
/** Close out a RMWR by converting previous RMWR into a RMW or READ. */
ModelAction *lastread = get_last_action(act->get_tid());
lastread->process_rmw(act);
if (act->is_rmw()) {
- if (lastread->get_reads_from())
- mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
- else
- mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
- mo_graph->commitChanges();
+ mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
}
return lastread;
}
-/**
- * A helper function for ModelExecution::check_recency, to check if the current
- * thread is able to read from a different write/promise for 'params.maxreads'
- * number of steps and if that write/promise should become visible (i.e., is
- * ordered later in the modification order). This helps model memory liveness.
- *
- * @param curr The current action. Must be a read.
- * @param rf The write/promise from which we plan to read
- * @param other_rf The write/promise from which we may read
- * @return True if we were able to read from other_rf for params.maxreads steps
- */
-template <typename T, typename U>
-bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
-{
- /* Need a different write/promise */
- if (other_rf->equals(rf))
- return false;
-
- /* Only look for "newer" writes/promises */
- if (!mo_graph->checkReachable(rf, other_rf))
- return false;
-
- SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
- action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
- action_list_t::reverse_iterator rit = list->rbegin();
- ASSERT((*rit) == curr);
- /* Skip past curr */
- rit++;
-
- /* Does this write/promise work for everyone? */
- for (int i = 0; i < params->maxreads; i++, rit++) {
- ModelAction *act = *rit;
- if (!act->may_read_from(other_rf))
- return false;
- }
- return true;
-}
-
-/**
- * Checks whether a thread has read from the same write or Promise for too many
- * times without seeing the effects of a later write/Promise.
- *
- * Basic idea:
- * 1) there must a different write/promise that we could read from,
- * 2) we must have read from the same write/promise in excess of maxreads times,
- * 3) that other write/promise must have been in the reads_from set for maxreads times, and
- * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
- *
- * If so, we decide that the execution is no longer feasible.
- *
- * @param curr The current action. Must be a read.
- * @param rf The ModelAction/Promise from which we might read.
- * @return True if the read should succeed; false otherwise
- */
-template <typename T>
-bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
-{
- if (!params->maxreads)
- return true;
-
- //NOTE: Next check is just optimization, not really necessary....
- if (curr->get_node()->get_read_from_past_size() +
- curr->get_node()->get_read_from_promise_size() <= 1)
- return true;
-
- SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
- int tid = id_to_int(curr->get_tid());
- ASSERT(tid < (int)thrd_lists->size());
- action_list_t *list = &(*thrd_lists)[tid];
- action_list_t::reverse_iterator rit = list->rbegin();
- ASSERT((*rit) == curr);
- /* Skip past curr */
- rit++;
-
- action_list_t::reverse_iterator ritcopy = rit;
- /* See if we have enough reads from the same value */
- for (int count = 0; count < params->maxreads; ritcopy++, count++) {
- if (ritcopy == list->rend())
- return true;
- ModelAction *act = *ritcopy;
- if (!act->is_read())
- return true;
- if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
- return true;
- if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
- return true;
- if (act->get_node()->get_read_from_past_size() +
- act->get_node()->get_read_from_promise_size() <= 1)
- return true;
- }
- for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
- const ModelAction *write = curr->get_node()->get_read_from_past(i);
- if (should_read_instead(curr, rf, write))
- return false; /* liveness failure */
- }
- for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
- const Promise *promise = curr->get_node()->get_read_from_promise(i);
- if (should_read_instead(curr, rf, promise))
- return false; /* liveness failure */
- }
- return true;
-}
-
/**
* @brief Updates the mo_graph with the constraints imposed from the current
* read.
* @param rf The ModelAction or Promise that curr reads from. Must be a write.
* @return True if modification order edges were added; false otherwise
*/
-template <typename rf_type>
-bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
+
+bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
unsigned int i;
- bool added = false;
ASSERT(curr->is_read());
/* Last SC fence in the current thread */
last_sc_write = get_last_seq_cst_write(curr);
/* Iterate over all threads */
- for (i = 0; i < thrd_lists->size(); i++) {
+ for (i = 0;i < thrd_lists->size();i++) {
/* Last SC fence in thread i */
ModelAction *last_sc_fence_thread_local = NULL;
if (int_to_id((int)i) != curr->get_tid())
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
/* Skip curr */
/* C++, Section 29.3 statement 5 */
if (curr->is_seqcst() && last_sc_fence_thread_local &&
*act < *last_sc_fence_thread_local) {
- added = mo_graph->addEdge(act, rf) || added;
+ if (mo_graph->checkReachable(rf, act))
+ return false;
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 4 */
else if (act->is_seqcst() && last_sc_fence_local &&
- *act < *last_sc_fence_local) {
- added = mo_graph->addEdge(act, rf) || added;
+ *act < *last_sc_fence_local) {
+ if (mo_graph->checkReachable(rf, act))
+ return false;
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 6 */
else if (last_sc_fence_thread_before &&
- *act < *last_sc_fence_thread_before) {
- added = mo_graph->addEdge(act, rf) || added;
+ *act < *last_sc_fence_thread_before) {
+ if (mo_graph->checkReachable(rf, act))
+ return false;
+ priorset->push_back(act);
break;
}
}
*/
if (act->happens_before(curr)) {
if (act->is_write()) {
- added = mo_graph->addEdge(act, rf) || added;
+ if (mo_graph->checkReachable(rf, act))
+ return false;
+ priorset->push_back(act);
} else {
const ModelAction *prevrf = act->get_reads_from();
- const Promise *prevrf_promise = act->get_reads_from_promise();
- if (prevrf) {
- if (!prevrf->equals(rf))
- added = mo_graph->addEdge(prevrf, rf) || added;
- } else if (!prevrf_promise->equals(rf)) {
- added = mo_graph->addEdge(prevrf_promise, rf) || added;
+ if (!prevrf->equals(rf)) {
+ if (mo_graph->checkReachable(rf, prevrf))
+ return false;
+ priorset->push_back(prevrf);
}
}
break;
}
}
}
-
- /*
- * All compatible, thread-exclusive promises must be ordered after any
- * concrete loads from the same thread
- */
- for (unsigned int i = 0; i < promises.size(); i++)
- if (promises[i]->is_compatible_exclusive(curr))
- added = mo_graph->addEdge(rf, promises[i]) || added;
-
- return added;
+ return true;
}
/**
* value. If NULL, then don't record any future values.
* @return True if modification order edges were added; false otherwise
*/
-bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
+void ModelExecution::w_modification_order(ModelAction *curr)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
unsigned int i;
- bool added = false;
ASSERT(curr->is_write());
if (curr->is_seqcst()) {
/* We have to at least see the last sequentially consistent write,
- so we are initialized. */
+ so we are initialized. */
ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
if (last_seq_cst != NULL) {
- added = mo_graph->addEdge(last_seq_cst, curr) || added;
+ mo_graph->addEdge(last_seq_cst, curr);
}
}
ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
/* Iterate over all threads */
- for (i = 0; i < thrd_lists->size(); i++) {
+ for (i = 0;i < thrd_lists->size();i++) {
/* Last SC fence in thread i, before last SC fence in current thread */
ModelAction *last_sc_fence_thread_before = NULL;
if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ bool force_edge = false;
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
if (act == curr) {
/*
* 3) If normal write, we need to look at earlier actions, so
* continue processing list.
*/
+ force_edge = true;
if (curr->is_rmw()) {
if (curr->get_reads_from() != NULL)
break;
/* C++, Section 29.3 statement 7 */
if (last_sc_fence_thread_before && act->is_write() &&
*act < *last_sc_fence_thread_before) {
- added = mo_graph->addEdge(act, curr) || added;
+ mo_graph->addEdge(act, curr, force_edge);
break;
}
* readfrom(act) --mo--> act
*/
if (act->is_write())
- added = mo_graph->addEdge(act, curr) || added;
+ mo_graph->addEdge(act, curr, force_edge);
else if (act->is_read()) {
//if previous read accessed a null, just keep going
- if (act->get_reads_from() == NULL) {
- added = mo_graph->addEdge(act->get_reads_from_promise(), curr) || added;
- } else
- added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
+ mo_graph->addEdge(act->get_reads_from(), curr, force_edge);
}
break;
} else if (act->is_read() && !act->could_synchronize_with(curr) &&
- !act->same_thread(curr)) {
+ !act->same_thread(curr)) {
/* We have an action that:
(1) did not happen before us
(2) is a read and we are a write
*/
- if (send_fv && thin_air_constraint_may_allow(curr, act) && check_coherence_promise(curr, act)) {
- if (!is_infeasible())
- send_fv->push_back(act);
- else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
- add_future_value(curr, act);
- }
- }
- }
- }
-
- /*
- * All compatible, thread-exclusive promises must be ordered after any
- * concrete stores to the same thread, or else they can be merged with
- * this store later
- */
- for (unsigned int i = 0; i < promises.size(); i++)
- if (promises[i]->is_compatible_exclusive(curr))
- added = mo_graph->addEdge(curr, promises[i]) || added;
-
- return added;
-}
-
-//This procedure uses cohere to prune future values that are
-//guaranteed to generate a coherence violation.
-//
-//need to see if there is (1) a promise for thread write, (2)
-//the promise is sb before write, (3) the promise can only be
-//resolved by the thread read, and (4) the promise has same
-//location as read/write
-
-bool ModelExecution::check_coherence_promise(const ModelAction * write, const ModelAction *read) {
- thread_id_t write_tid=write->get_tid();
- for(unsigned int i = promises.size(); i>0; i--) {
- Promise *pr=promises[i-1];
- if (!pr->same_location(write))
- continue;
- //the reading thread is the only thread that can resolve the promise
- if (pr->get_num_was_available_threads()==1 && pr->thread_was_available(read->get_tid())) {
- for(unsigned int j=0;j<pr->get_num_readers();j++) {
- ModelAction *prreader=pr->get_reader(j);
- //the writing thread reads from the promise before the write
- if (prreader->get_tid()==write_tid &&
- (*prreader)<(*write)) {
- if ((*read)>(*prreader)) {
- //check that we don't have a read between the read and promise
- //from the same thread as read
- bool okay=false;
- for(const ModelAction *tmp=read;tmp!=prreader;) {
- tmp=tmp->get_node()->get_parent()->get_action();
- if (tmp->is_read() && tmp->same_thread(read)) {
- okay=true;
- break;
- }
- }
- if (okay)
- continue;
- }
- return false;
- }
}
}
}
- return true;
-}
-
-
-/** Arbitrary reads from the future are not allowed. Section 29.3
- * part 9 places some constraints. This method checks one result of constraint
- * constraint. Others require compiler support. */
-bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
-{
- if (!writer->is_rmw())
- return true;
-
- if (!reader->is_rmw())
- return true;
-
- for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
- if (search == reader)
- return false;
- if (search->get_tid() == reader->get_tid() &&
- search->happens_before(reader))
- break;
- }
-
- return true;
}
/**
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
unsigned int i;
/* Iterate over all threads */
- for (i = 0; i < thrd_lists->size(); i++) {
+ for (i = 0;i < thrd_lists->size();i++) {
const ModelAction *write_after_read = NULL;
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
/* Don't disallow due to act == reader */
* @param release_heads A pass-by-reference style return parameter. After
* execution of this function, release_heads will contain the heads of all the
* relevant release sequences, if any exists with certainty
- * @param pending A pass-by-reference style return parameter which is only used
- * when returning false (i.e., uncertain). Returns most information regarding
- * an uncertain release sequence, including any write operations that might
- * break the sequence.
* @return true, if the ModelExecution is certain that release_heads is complete;
* false otherwise
*/
-bool ModelExecution::release_seq_heads(const ModelAction *rf,
- rel_heads_list_t *release_heads,
- struct release_seq *pending) const
+bool ModelExecution::release_seq_heads(const ModelAction *rf, rel_heads_list_t *release_heads) const
{
- /* Only check for release sequences if there are no cycles */
- if (mo_graph->checkForCycles())
- return false;
- for ( ; rf != NULL; rf = rf->get_reads_from()) {
+ for ( ;rf != NULL;rf = rf->get_reads_from()) {
ASSERT(rf->is_write());
if (rf->is_release())
else if (rf->get_last_fence_release())
release_heads->push_back(rf->get_last_fence_release());
if (!rf->is_rmw())
- break; /* End of RMW chain */
+ break;/* End of RMW chain */
/** @todo Need to be smarter here... In the linux lock
* example, this will run to the beginning of the program for
/* acq_rel RMW is a sufficient stopping condition */
if (rf->is_acquire() && rf->is_release())
- return true; /* complete */
+ return true;/* complete */
};
- if (!rf) {
- /* read from future: need to settle this later */
- pending->rf = NULL;
- return false; /* incomplete */
- }
+ ASSERT(rf); // Needs to be real write
if (rf->is_release())
- return true; /* complete */
+ return true;/* complete */
/* else relaxed write
* - check for fence-release in the same thread (29.8, stmt. 3)
if (fence_release)
release_heads->push_back(fence_release);
- int tid = id_to_int(rf->get_tid());
- SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
- action_list_t *list = &(*thrd_lists)[tid];
- action_list_t::const_reverse_iterator rit;
-
- /* Find rf in the thread list */
- rit = std::find(list->rbegin(), list->rend(), rf);
- ASSERT(rit != list->rend());
-
- /* Find the last {write,fence}-release */
- for (; rit != list->rend(); rit++) {
- if (fence_release && *(*rit) < *fence_release)
- break;
- if ((*rit)->is_release())
- break;
- }
- if (rit == list->rend()) {
- /* No write-release in this thread */
- return true; /* complete */
- } else if (fence_release && *(*rit) < *fence_release) {
- /* The fence-release is more recent (and so, "stronger") than
- * the most recent write-release */
- return true; /* complete */
- } /* else, need to establish contiguous release sequence */
- ModelAction *release = *rit;
-
- ASSERT(rf->same_thread(release));
-
- pending->writes.clear();
-
- bool certain = true;
- for (unsigned int i = 0; i < thrd_lists->size(); i++) {
- if (id_to_int(rf->get_tid()) == (int)i)
- continue;
- list = &(*thrd_lists)[i];
-
- /* Can we ensure no future writes from this thread may break
- * the release seq? */
- bool future_ordered = false;
-
- ModelAction *last = get_last_action(int_to_id(i));
- Thread *th = get_thread(int_to_id(i));
- if ((last && rf->happens_before(last)) ||
- !is_enabled(th) ||
- th->is_complete())
- future_ordered = true;
-
- ASSERT(!th->is_model_thread() || future_ordered);
-
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- const ModelAction *act = *rit;
- /* Reach synchronization -> this thread is complete */
- if (act->happens_before(release))
- break;
- if (rf->happens_before(act)) {
- future_ordered = true;
- continue;
- }
-
- /* Only non-RMW writes can break release sequences */
- if (!act->is_write() || act->is_rmw())
- continue;
-
- /* Check modification order */
- if (mo_graph->checkReachable(rf, act)) {
- /* rf --mo--> act */
- future_ordered = true;
- continue;
- }
- if (mo_graph->checkReachable(act, release))
- /* act --mo--> release */
- break;
- if (mo_graph->checkReachable(release, act) &&
- mo_graph->checkReachable(act, rf)) {
- /* release --mo-> act --mo--> rf */
- return true; /* complete */
- }
- /* act may break release sequence */
- pending->writes.push_back(act);
- certain = false;
- }
- if (!future_ordered)
- certain = false; /* This thread is uncertain */
- }
-
- if (certain) {
- release_heads->push_back(release);
- pending->writes.clear();
- } else {
- pending->release = release;
- pending->rf = rf;
- }
- return certain;
+ return true; /* complete */
}
/**
* @see ModelExecution::release_seq_heads
*/
void ModelExecution::get_release_seq_heads(ModelAction *acquire,
- ModelAction *read, rel_heads_list_t *release_heads)
+ ModelAction *read, rel_heads_list_t *release_heads)
{
const ModelAction *rf = read->get_reads_from();
- struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
- sequence->acquire = acquire;
- sequence->read = read;
- if (!release_seq_heads(rf, release_heads, sequence)) {
- /* add act to 'lazy checking' list */
- pending_rel_seqs.push_back(sequence);
- } else {
- snapshot_free(sequence);
- }
-}
-
-/**
- * @brief Propagate a modified clock vector to actions later in the execution
- * order
- *
- * After an acquire operation lazily completes a release-sequence
- * synchronization, we must update all clock vectors for operations later than
- * the acquire in the execution order.
- *
- * @param acquire The ModelAction whose clock vector must be propagated
- * @param work The work queue to which we can add work items, if this
- * propagation triggers more updates (e.g., to the modification order)
- */
-void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
-{
- /* Re-check all pending release sequences */
- work->push_back(CheckRelSeqWorkEntry(NULL));
- /* Re-check read-acquire for mo_graph edges */
- work->push_back(MOEdgeWorkEntry(acquire));
-
- /* propagate synchronization to later actions */
- action_list_t::reverse_iterator rit = action_trace.rbegin();
- for (; (*rit) != acquire; rit++) {
- ModelAction *propagate = *rit;
- if (acquire->happens_before(propagate)) {
- synchronize(acquire, propagate);
- /* Re-check 'propagate' for mo_graph edges */
- work->push_back(MOEdgeWorkEntry(propagate));
- }
- }
-}
-
-/**
- * Attempt to resolve all stashed operations that might synchronize with a
- * release sequence for a given location. This implements the "lazy" portion of
- * determining whether or not a release sequence was contiguous, since not all
- * modification order information is present at the time an action occurs.
- *
- * @param location The location/object that should be checked for release
- * sequence resolutions. A NULL value means to check all locations.
- * @param work_queue The work queue to which to add work items as they are
- * generated
- * @return True if any updates occurred (new synchronization, new mo_graph
- * edges)
- */
-bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
-{
- bool updated = false;
- SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
- while (it != pending_rel_seqs.end()) {
- struct release_seq *pending = *it;
- ModelAction *acquire = pending->acquire;
- const ModelAction *read = pending->read;
-
- /* Only resolve sequences on the given location, if provided */
- if (location && read->get_location() != location) {
- it++;
- continue;
- }
-
- const ModelAction *rf = read->get_reads_from();
- rel_heads_list_t release_heads;
- bool complete;
- complete = release_seq_heads(rf, &release_heads, pending);
- for (unsigned int i = 0; i < release_heads.size(); i++)
- if (!acquire->has_synchronized_with(release_heads[i]))
- if (synchronize(release_heads[i], acquire))
- updated = true;
-
- if (updated) {
- /* Propagate the changed clock vector */
- propagate_clockvector(acquire, work_queue);
- }
- if (complete) {
- it = pending_rel_seqs.erase(it);
- snapshot_free(pending);
- } else {
- it++;
- }
- }
-
- // If we resolved promises or data races, see if we have realized a data race.
- checkDataRaces();
-
- return updated;
+ release_seq_heads(rf, release_heads);
}
/**
action_list_t *list = obj_map.get(location);
/* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); (*rit) != curr; rit++)
+ for (rit = list->rbegin();(*rit) != curr;rit++)
;
- rit++; /* Skip past curr */
- for ( ; rit != list->rend(); rit++)
+ rit++; /* Skip past curr */
+ for ( ;rit != list->rend();rit++)
if ((*rit)->is_write() && (*rit)->is_seqcst())
return *rit;
return NULL;
action_list_t::reverse_iterator rit = list->rbegin();
if (before_fence) {
- for (; rit != list->rend(); rit++)
+ for (;rit != list->rend();rit++)
if (*rit == before_fence)
break;
rit++;
}
- for (; rit != list->rend(); rit++)
+ for (;rit != list->rend();rit++)
if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
return *rit;
return NULL;
action_list_t *list = obj_map.get(location);
/* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++)
+ for (rit = list->rbegin();rit != list->rend();rit++)
if ((*rit)->is_unlock() || (*rit)->is_wait())
return *rit;
return NULL;
return get_parent_action(tid)->get_cv();
}
-/**
- * @brief Find the promise (if any) to resolve for the current action and
- * remove it from the pending promise vector
- * @param curr The current ModelAction. Should be a write.
- * @return The Promise to resolve, if any; otherwise NULL
- */
-Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
-{
- for (unsigned int i = 0; i < promises.size(); i++)
- if (curr->get_node()->get_promise(i)) {
- Promise *ret = promises[i];
- promises.erase(promises.begin() + i);
- return ret;
- }
- return NULL;
-}
-
-/**
- * Resolve a Promise with a current write.
- * @param write The ModelAction that is fulfilling Promises
- * @param promise The Promise to resolve
- * @param work The work queue, for adding new fixup work
- * @return True if the Promise was successfully resolved; false otherwise
- */
-bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise,
- work_queue_t *work)
-{
- ModelVector<ModelAction *> actions_to_check;
-
- for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
- ModelAction *read = promise->get_reader(i);
- if (read_from(read, write)) {
- /* Propagate the changed clock vector */
- propagate_clockvector(read, work);
- }
- actions_to_check.push_back(read);
- }
- /* Make sure the promise's value matches the write's value */
- ASSERT(promise->is_compatible(write) && promise->same_value(write));
- if (!mo_graph->resolvePromise(promise, write))
- priv->hard_failed_promise = true;
-
- /**
- * @todo It is possible to end up in an inconsistent state, where a
- * "resolved" promise may still be referenced if
- * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
- *
- * Note that the inconsistency only matters when dumping mo_graph to
- * file.
- *
- * delete promise;
- */
-
- //Check whether reading these writes has made threads unable to
- //resolve promises
- for (unsigned int i = 0; i < actions_to_check.size(); i++) {
- ModelAction *read = actions_to_check[i];
- mo_check_promises(read, true);
- }
-
- return true;
-}
-
-/**
- * Compute the set of promises that could potentially be satisfied by this
- * action. Note that the set computation actually appears in the Node, not in
- * ModelExecution.
- * @param curr The ModelAction that may satisfy promises
- */
-void ModelExecution::compute_promises(ModelAction *curr)
-{
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (!promise->is_compatible(curr) || !promise->same_value(curr))
- continue;
-
- bool satisfy = true;
- for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
- const ModelAction *act = promise->get_reader(j);
- if (act->happens_before(curr) ||
- act->could_synchronize_with(curr)) {
- satisfy = false;
- break;
- }
- }
- if (satisfy)
- curr->get_node()->set_promise(i);
- }
-}
-
-/** Checks promises in response to change in ClockVector Threads. */
-void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
-{
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (!promise->thread_is_available(tid))
- continue;
- for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
- const ModelAction *act = promise->get_reader(j);
- if ((!old_cv || !old_cv->synchronized_since(act)) &&
- merge_cv->synchronized_since(act)) {
- if (promise->eliminate_thread(tid)) {
- /* Promise has failed */
- priv->failed_promise = true;
- return;
- }
- }
- }
- }
-}
-
-void ModelExecution::check_promises_thread_disabled()
-{
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (promise->has_failed()) {
- priv->failed_promise = true;
- return;
- }
- }
-}
-
-/**
- * @brief Checks promises in response to addition to modification order for
- * threads.
- *
- * We test whether threads are still available for satisfying promises after an
- * addition to our modification order constraints. Those that are unavailable
- * are "eliminated". Once all threads are eliminated from satisfying a promise,
- * that promise has failed.
- *
- * @param act The ModelAction which updated the modification order
- * @param is_read_check Should be true if act is a read and we must check for
- * updates to the store from which it read (there is a distinction here for
- * RMW's, which are both a load and a store)
- */
-void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
-{
- const ModelAction *write = is_read_check ? act->get_reads_from() : act;
-
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
-
- // Is this promise on the same location?
- if (!promise->same_location(write))
- continue;
-
- for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
- const ModelAction *pread = promise->get_reader(j);
- if (!pread->happens_before(act))
- continue;
- if (mo_graph->checkPromise(write, promise)) {
- priv->hard_failed_promise = true;
- return;
- }
- break;
- }
-
- // Don't do any lookups twice for the same thread
- if (!promise->thread_is_available(act->get_tid()))
- continue;
-
- if (mo_graph->checkReachable(promise, write)) {
- if (mo_graph->checkPromise(write, promise)) {
- priv->hard_failed_promise = true;
- return;
- }
- }
- }
-}
-
-/**
- * Compute the set of writes that may break the current pending release
- * sequence. This information is extracted from previou release sequence
- * calculations.
- *
- * @param curr The current ModelAction. Must be a release sequence fixup
- * action.
- */
-void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
-{
- if (pending_rel_seqs.empty())
- return;
-
- struct release_seq *pending = pending_rel_seqs.back();
- for (unsigned int i = 0; i < pending->writes.size(); i++) {
- const ModelAction *write = pending->writes[i];
- curr->get_node()->add_relseq_break(write);
+bool valequals(uint64_t val1, uint64_t val2, int size) {
+ switch(size) {
+ case 1:
+ return ((uint8_t)val1) == ((uint8_t)val2);
+ case 2:
+ return ((uint16_t)val1) == ((uint16_t)val2);
+ case 4:
+ return ((uint32_t)val1) == ((uint32_t)val2);
+ case 8:
+ return val1==val2;
+ default:
+ ASSERT(0);
+ return false;
}
-
- /* NULL means don't break the sequence; just synchronize */
- curr->get_node()->add_relseq_break(NULL);
}
/**
* @param curr is the current ModelAction that we are exploring; it must be a
* 'read' operation.
*/
-void ModelExecution::build_may_read_from(ModelAction *curr)
+SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
unsigned int i;
if (curr->is_seqcst())
last_sc_write = get_last_seq_cst_write(curr);
+ SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
+
/* Iterate over all threads */
- for (i = 0; i < thrd_lists->size(); i++) {
+ for (i = 0;i < thrd_lists->size();i++) {
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
/* Only consider 'write' actions */
if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
allow_read = false;
- else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
- allow_read = false;
+
+ /* Need to check whether we will have two RMW reading from the same value */
+ if (curr->is_rmwr()) {
+ /* It is okay if we have a failing CAS */
+ if (!curr->is_rmwrcas() ||
+ valequals(curr->get_value(), act->get_value(), curr->getSize())) {
+ //Need to make sure we aren't the second RMW
+ CycleNode * node = mo_graph->getNode_noCreate(act);
+ if (node != NULL && node->getRMW() != NULL) {
+ //we are the second RMW
+ allow_read = false;
+ }
+ }
+ }
if (allow_read) {
/* Only add feasible reads */
- mo_graph->startChanges();
- r_modification_order(curr, act);
- if (!is_infeasible())
- curr->get_node()->add_read_from_past(act);
- mo_graph->rollbackChanges();
+ rf_set->push_back(act);
}
/* Include at most one act per-thread that "happens before" curr */
}
}
- /* Inherit existing, promised future values */
- for (i = 0; i < promises.size(); i++) {
- const Promise *promise = promises[i];
- const ModelAction *promise_read = promise->get_reader(0);
- if (promise_read->same_var(curr)) {
- /* Only add feasible future-values */
- mo_graph->startChanges();
- r_modification_order(curr, promise);
- if (!is_infeasible())
- curr->get_node()->add_read_from_promise(promise_read);
- mo_graph->rollbackChanges();
- }
- }
-
- /* We may find no valid may-read-from only if the execution is doomed */
- if (!curr->get_node()->read_from_size()) {
- priv->no_valid_reads = true;
- set_assert();
- }
-
if (DBG_ENABLED()) {
model_print("Reached read action:\n");
curr->print();
- model_print("Printing read_from_past\n");
- curr->get_node()->print_read_from_past();
model_print("End printing read_from_past\n");
}
-}
-
-bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
-{
- for ( ; write != NULL; write = write->get_reads_from()) {
- /* UNINIT actions don't have a Node, and they never sleep */
- if (write->is_uninitialized())
- return true;
- Node *prevnode = write->get_node()->get_parent();
-
- bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
- if (write->is_release() && thread_sleep)
- return true;
- if (!write->is_rmw())
- return false;
- }
- return true;
+ return rf_set;
}
/**
unsigned int hash = 0;
- for (it = list->begin(); it != list->end(); it++) {
+ for (it = list->begin();it != list->end();it++) {
const ModelAction *act = *it;
if (act->get_seq_number() > 0)
act->print();
mo_graph->dumpNodes(file);
ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
- for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
+ for (action_list_t::const_iterator it = action_trace.begin();it != action_trace.end();it++) {
ModelAction *act = *it;
if (act->is_read()) {
mo_graph->dot_print_node(file, act);
- if (act->get_reads_from())
- mo_graph->dot_print_edge(file,
- act->get_reads_from(),
- act,
- "label=\"rf\", color=red, weight=2");
- else
- mo_graph->dot_print_edge(file,
- act->get_reads_from_promise(),
- act,
- "label=\"rf\", color=red");
+ mo_graph->dot_print_edge(file,
+ act->get_reads_from(),
+ act,
+ "label=\"rf\", color=red, weight=2");
}
if (thread_array[act->get_tid()]) {
mo_graph->dot_print_edge(file,
- thread_array[id_to_int(act->get_tid())],
- act,
- "label=\"sb\", color=blue, weight=400");
+ thread_array[id_to_int(act->get_tid())],
+ act,
+ "label=\"sb\", color=blue, weight=400");
}
thread_array[act->get_tid()] = act;
model_print("Execution trace %d:", get_execution_number());
if (isfeasibleprefix()) {
- if (is_yieldblocked())
- model_print(" YIELD BLOCKED");
if (scheduler->all_threads_sleeping())
model_print(" SLEEP-SET REDUNDANT");
if (have_bug_reports())
print_list(&action_trace);
model_print("\n");
- if (!promises.empty()) {
- model_print("Pending promises:\n");
- for (unsigned int i = 0; i < promises.size(); i++) {
- model_print(" [P%u] ", i);
- promises[i]->print();
- }
- model_print("\n");
- }
}
/**
* @return A Thread reference
*/
Thread * ModelExecution::get_pthread(pthread_t pid) {
- if (pid < pthread_counter + 1) return pthread_map[pid];
- else return NULL;
-}
-
-/**
- * @brief Get a Promise's "promise number"
- *
- * A "promise number" is an index number that is unique to a promise, valid
- * only for a specific snapshot of an execution trace. Promises may come and go
- * as they are generated an resolved, so an index only retains meaning for the
- * current snapshot.
- *
- * @param promise The Promise to check
- * @return The promise index, if the promise still is valid; otherwise -1
- */
-int ModelExecution::get_promise_number(const Promise *promise) const
-{
- for (unsigned int i = 0; i < promises.size(); i++)
- if (promises[i] == promise)
- return i;
- /* Not found */
- return -1;
+ union {
+ pthread_t p;
+ uint32_t v;
+ } x;
+ x.p = pid;
+ uint32_t thread_id = x.v;
+ if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
+ else return NULL;
}
/**
if (curr->is_rmwr())
return get_thread(curr);
if (curr->is_write()) {
-// std::memory_order order = curr->get_mo();
-// switch(order) {
-// case std::memory_order_relaxed:
-// return get_thread(curr);
-// case std::memory_order_release:
-// return get_thread(curr);
-// defalut:
-// return NULL;
-// }
- return NULL;
+ std::memory_order order = curr->get_mo();
+ switch(order) {
+ case std::memory_order_relaxed:
+ return get_thread(curr);
+ case std::memory_order_release:
+ return get_thread(curr);
+ default:
+ return NULL;
+ }
}
/* Follow CREATE with the created thread */
/* which is not needed, because model.cc takes care of this */
if (curr->get_type() == THREAD_CREATE)
- return curr->get_thread_operand();
+ return curr->get_thread_operand();
if (curr->get_type() == PTHREAD_CREATE) {
return curr->get_thread_operand();
}
return NULL;
}
-/** @return True if the execution has taken too many steps */
-bool ModelExecution::too_many_steps() const
-{
- return params->bound != 0 && priv->used_sequence_numbers > params->bound;
-}
-
/**
* Takes the next step in the execution, if possible.
* @param curr The current step to take
Thread *curr_thrd = get_thread(curr);
ASSERT(curr_thrd->get_state() == THREAD_READY);
- ASSERT(check_action_enabled(curr)); /* May have side effects? */
+ ASSERT(check_action_enabled(curr)); /* May have side effects? */
curr = check_current_action(curr);
ASSERT(curr);
return action_select_next_thread(curr);
}
-/**
- * Launch end-of-execution release sequence fixups only when
- * the execution is otherwise feasible AND there are:
- *
- * (1) pending release sequences
- * (2) pending assertions that could be invalidated by a change
- * in clock vectors (i.e., data races)
- * (3) no pending promises
- */
-void ModelExecution::fixup_release_sequences()
-{
- while (!pending_rel_seqs.empty() &&
- is_feasible_prefix_ignore_relseq() &&
- haveUnrealizedRaces()) {
- model_print("*** WARNING: release sequence fixup action "
- "(%zu pending release seuqence(s)) ***\n",
- pending_rel_seqs.size());
- ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
- std::memory_order_seq_cst, NULL, VALUE_NONE,
- model_thread);
- take_step(fixup);
- };
+Fuzzer * ModelExecution::getFuzzer() {
+ return fuzzer;
}
#include "mymemory.h"
#include "hashtable.h"
-#include "workqueue.h"
#include "config.h"
#include "modeltypes.h"
#include "stl-model.h"
#include "params.h"
-
+#include "mypthread.h"
#include "mutex.h"
#include <condition_variable>
-
-/* Forward declaration */
-class Node;
-class NodeStack;
-class CycleGraph;
-class Promise;
-class Scheduler;
-class Thread;
-class ClockVector;
-struct model_snapshot_members;
-class ModelChecker;
-struct bug_message;
+#include "classlist.h"
/** @brief Shorthand for a list of release sequence heads */
typedef ModelVector<const ModelAction *> rel_heads_list_t;
class ModelExecution {
public:
ModelExecution(ModelChecker *m,
- const struct model_params *params,
- Scheduler *scheduler,
- NodeStack *node_stack);
+ Scheduler *scheduler,
+ NodeStack *node_stack);
~ModelExecution();
- const struct model_params * get_params() const { return params; }
+ struct model_params * get_params() const { return params; }
+ void setParams(struct model_params * _params) {params = _params;}
Thread * take_step(ModelAction *curr);
- void fixup_release_sequences();
void print_summary() const;
#if SUPPORT_MOD_ORDER_DUMP
Thread * get_thread(thread_id_t tid) const;
Thread * get_thread(const ModelAction *act) const;
- pthread_t get_pthread_counter() { return pthread_counter; }
+ uint32_t get_pthread_counter() { return pthread_counter; }
void incr_pthread_counter() { pthread_counter++; }
Thread * get_pthread(pthread_t pid);
- int get_promise_number(const Promise *promise) const;
-
bool is_enabled(Thread *t) const;
bool is_enabled(thread_id_t tid) const;
ClockVector * get_cv(thread_id_t tid) const;
ModelAction * get_parent_action(thread_id_t tid) const;
- void check_promises_thread_disabled();
bool isfeasibleprefix() const;
action_list_t * get_actions_on_obj(void * obj, thread_id_t tid) const;
bool is_complete_execution() const;
void print_infeasibility(const char *prefix) const;
- bool is_feasible_prefix_ignore_relseq() const;
bool is_infeasible() const;
bool is_deadlocked() const;
- bool is_yieldblocked() const;
- bool too_many_steps() const;
-
- ModelAction * get_next_backtrack();
action_list_t * get_action_trace() { return &action_trace; }
-
+ Fuzzer * getFuzzer();
CycleGraph * const get_mo_graph() { return mo_graph; }
-
- HashTable<pthread_mutex_t *, cdsc::mutex *, uintptr_t, 4> mutex_map;
- HashTable<pthread_cond_t *, cdsc::condition_variable *, uintptr_t, 4> cond_map;
+ HashTable<pthread_cond_t *, cdsc::condition_variable *, uintptr_t, 4> * getCondMap() {return &cond_map;}
+ HashTable<pthread_mutex_t *, cdsc::mutex *, uintptr_t, 4> * getMutexMap() {return &mutex_map;}
SNAPSHOTALLOC
private:
int get_execution_number() const;
- pthread_t pthread_counter;
ModelChecker *model;
- const model_params * const params;
+ struct model_params * params;
/** The scheduler to use: tracks the running/ready Threads */
Scheduler * const scheduler;
- bool sleep_can_read_from(ModelAction *curr, const ModelAction *write);
- bool thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const;
bool mo_may_allow(const ModelAction *writer, const ModelAction *reader);
- bool promises_may_allow(const ModelAction *writer, const ModelAction *reader) const;
void set_bad_synchronization();
- void set_bad_sc_read();
- bool promises_expired() const;
bool should_wake_up(const ModelAction *curr, const Thread *thread) const;
void wake_up_sleeping_actions(ModelAction *curr);
modelclock_t get_next_seq_num();
bool next_execution();
ModelAction * check_current_action(ModelAction *curr);
bool initialize_curr_action(ModelAction **curr);
- bool process_read(ModelAction *curr);
- bool process_write(ModelAction *curr, work_queue_t *work);
+ void process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set);
+ void process_write(ModelAction *curr);
bool process_fence(ModelAction *curr);
bool process_mutex(ModelAction *curr);
bool process_thread_action(ModelAction *curr);
- void process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue);
bool read_from(ModelAction *act, const ModelAction *rf);
bool synchronize(const ModelAction *first, ModelAction *second);
- template <typename T>
- bool check_recency(ModelAction *curr, const T *rf) const;
-
- template <typename T, typename U>
- bool should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const;
-
- ModelAction * get_last_fence_conflict(ModelAction *act) const;
- ModelAction * get_last_conflict(ModelAction *act) const;
- void set_backtracking(ModelAction *act);
- bool set_latest_backtrack(ModelAction *act);
- Promise * pop_promise_to_resolve(const ModelAction *curr);
- bool resolve_promise(ModelAction *curr, Promise *promise,
- work_queue_t *work);
- void compute_promises(ModelAction *curr);
- void compute_relseq_breakwrites(ModelAction *curr);
-
- void check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv);
- void mo_check_promises(const ModelAction *act, bool is_read_check);
- void thread_blocking_check_promises(Thread *blocker, Thread *waiting);
-
- void check_curr_backtracking(ModelAction *curr);
void add_action_to_lists(ModelAction *act);
ModelAction * get_last_fence_release(thread_id_t tid) const;
ModelAction * get_last_seq_cst_write(ModelAction *curr) const;
ModelAction * get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const;
ModelAction * get_last_unlock(ModelAction *curr) const;
- void build_may_read_from(ModelAction *curr);
+ SnapVector<ModelAction *> * build_may_read_from(ModelAction *curr);
ModelAction * process_rmw(ModelAction *curr);
- template <typename rf_type>
- bool r_modification_order(ModelAction *curr, const rf_type *rf);
-
- bool w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv);
+ bool r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> *priorset);
+ void w_modification_order(ModelAction *curr);
void get_release_seq_heads(ModelAction *acquire, ModelAction *read, rel_heads_list_t *release_heads);
- bool release_seq_heads(const ModelAction *rf, rel_heads_list_t *release_heads, struct release_seq *pending) const;
- void propagate_clockvector(ModelAction *acquire, work_queue_t *work);
- bool resolve_release_sequences(void *location, work_queue_t *work_queue);
- void add_future_value(const ModelAction *writer, ModelAction *reader);
- bool check_coherence_promise(const ModelAction *write, const ModelAction *read);
+ bool release_seq_heads(const ModelAction *rf, rel_heads_list_t *release_heads) const;
ModelAction * get_uninitialized_action(const ModelAction *curr) const;
action_list_t action_trace;
SnapVector<Thread *> thread_map;
SnapVector<Thread *> pthread_map;
+ uint32_t pthread_counter;
/** Per-object list of actions. Maps an object (i.e., memory location)
* to a trace of all actions performed on the object. */
HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> obj_thrd_map;
-// HashTable<pthread_mutex_t *, cdsc::mutex *, uintptr_t, 4> mutex_map;
-
- /**
- * @brief List of currently-pending promises
- *
- * Promises are sorted by the execution order of the read(s) which
- * created them
- */
- SnapVector<Promise *> promises;
- SnapVector<struct PendingFutureValue> futurevalues;
+ HashTable<pthread_mutex_t *, cdsc::mutex *, uintptr_t, 4> mutex_map;
+ HashTable<pthread_cond_t *, cdsc::condition_variable *, uintptr_t, 4> cond_map;
/**
* List of pending release sequences. Release sequences might be
* are established. Each entry in the list may only be partially
* filled, depending on its pending status.
*/
- SnapVector<struct release_seq *> pending_rel_seqs;
SnapVector<ModelAction *> thrd_last_action;
SnapVector<ModelAction *> thrd_last_fence_release;
*/
CycleGraph * const mo_graph;
+ Fuzzer * fuzzer;
+
Thread * action_select_next_thread(const ModelAction *curr) const;
};
-#endif /* __EXECUTION_H__ */
+#endif /* __EXECUTION_H__ */
// Copyright (C) 2015-2019 Free Software Foundation, Inc.
//
-// This is a reimplementation of libstdc++-v3/src/c++11/futex.cc.
+// This is a reimplementation of libstdc++-v3/src/c++11/futex.cc.
#include <bits/atomic_futex.h>
#ifdef _GLIBCXX_HAS_GTHREADS
namespace std _GLIBCXX_VISIBILITY(default)
{
-_GLIBCXX_BEGIN_NAMESPACE_VERSION
+ _GLIBCXX_BEGIN_NAMESPACE_VERSION
bool
__atomic_futex_unsigned_base::_M_futex_wait_until(unsigned *__addr,
- unsigned __val,
- bool __has_timeout, chrono::seconds __s, chrono::nanoseconds __ns)
+ unsigned __val,
+ bool __has_timeout, chrono::seconds __s, chrono::nanoseconds __ns)
{
// do nothing if the two values are not equal
if ( *__addr != __val ) {
return true;
}
- // if a timeout is specified, return immedialy. Letting the scheduler decide how long this thread will wait.
+ // if a timeout is specified, return immedialy. Letting the scheduler decide how long this thread will wait.
if (__has_timeout) {
return true;
}
cdsc::condition_variable *v = new cdsc::condition_variable();
cdsc::mutex *m = new cdsc::mutex();
- execution->cond_map.put( (pthread_cond_t *) __addr, v);
- execution->mutex_map.put( (pthread_mutex_t *) __addr, m);
+ execution->getCondMap()->put( (pthread_cond_t *) __addr, v);
+ execution->getMutexMap()->put( (pthread_mutex_t *) __addr, m);
v->wait(*m);
return true;
{
// INT_MAX wakes all the waiters at the address __addr
ModelExecution *execution = model->get_execution();
- cdsc::condition_variable *v = execution->cond_map.get( (pthread_cond_t *) __addr);
+ cdsc::condition_variable *v = execution->getCondMap()->get( (pthread_cond_t *) __addr);
v->notify_all();
}
-_GLIBCXX_END_NAMESPACE_VERSION
+ _GLIBCXX_END_NAMESPACE_VERSION
}
-#endif // defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
-#endif // _GLIBCXX_HAS_GTHREADS
+#endif // defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
+#endif // _GLIBCXX_HAS_GTHREADS
--- /dev/null
+#include "fuzzer.h"
+#include <stdlib.h>
+#include "threads-model.h"
+#include "model.h"
+
+int Fuzzer::selectWrite(ModelAction *read, SnapVector<ModelAction *> * rf_set) {
+ int random_index = random() % rf_set->size();
+ return random_index;
+}
+
+Thread * Fuzzer::selectThread(Node *n, int * threadlist, int numthreads) {
+ int random_index = random() % numthreads;
+ int thread = threadlist[random_index];
+ thread_id_t curr_tid = int_to_id(thread);
+ return model->get_thread(curr_tid);
+}
+
+Thread * Fuzzer::selectNotify(action_list_t * waiters) {
+ int numwaiters = waiters->size();
+ int random_index = random() % numwaiters;
+ action_list_t::iterator it = waiters->begin();
+ advance(it, random_index);
+ Thread *thread = model->get_thread(*it);
+ waiters->erase(it);
+ return thread;
+}
--- /dev/null
+#ifndef FUZZER_H
+#define FUZZER_H
+#include "classlist.h"
+#include "mymemory.h"
+#include "stl-model.h"
+
+class Fuzzer {
+public:
+ Fuzzer() {}
+ int selectWrite(ModelAction *read, SnapVector<ModelAction *>* rf_set);
+ Thread * selectThread(Node *n, int * threadlist, int numthreads);
+ Thread * selectNotify(action_list_t * waiters);
+ MEMALLOC
+private:
+};
+#endif
*/
template<typename _Key, typename _Val, typename _KeyInt, int _Shift = 0, void * (* _malloc)(size_t) = snapshot_malloc, void * (* _calloc)(size_t, size_t) = snapshot_calloc, void (*_free)(void *) = snapshot_free>
class HashTable {
- public:
+public:
/**
* @brief Hash table constructor
* @param initialcapacity Sets the initial capacity of the hash table.
capacitymask = initialcapacity - 1;
threshold = (unsigned int)(initialcapacity * loadfactor);
- size = 0; // Initial number of elements in the hash
+ size = 0; // Initial number of elements in the hash
}
/** @brief Hash table destructor */
exit(EXIT_FAILURE);
}
- table = newtable; // Update the global hashtable upon resize()
+ table = newtable; // Update the global hashtable upon resize()
capacity = newsize;
capacitymask = newsize - 1;
struct hashlistnode<_Key, _Val> *bin = &oldtable[0];
struct hashlistnode<_Key, _Val> *lastbin = &oldtable[oldcapacity];
- for (; bin < lastbin; bin++) {
+ for (;bin < lastbin;bin++) {
_Key key = bin->key;
struct hashlistnode<_Key, _Val> *search;
search->val = bin->val;
}
- _free(oldtable); // Free the memory of the old hash table
+ _free(oldtable); // Free the memory of the old hash table
}
- private:
+private:
struct hashlistnode<_Key, _Val> *table;
unsigned int capacity;
unsigned int size;
double loadfactor;
};
-#endif /* __HASHTABLE_H__ */
+#endif /* __HASHTABLE_H__ */
{ return atomic_flag_test_and_set_explicit( __a__, memory_order_seq_cst ); }
void atomic_flag_clear_explicit
-( volatile atomic_flag* __a__, memory_order __x__ )
+ ( volatile atomic_flag* __a__, memory_order __x__ )
{
volatile bool * __p__ = &((__a__)->__f__);
model->switch_to_master(new ModelAction(ATOMIC_WRITE, __x__, (void *) __p__, false));
void atomic_flag_clear( volatile atomic_flag* __a__ )
{ atomic_flag_clear_explicit( __a__, memory_order_seq_cst ); }
-void __atomic_flag_wait__( volatile atomic_flag* __a__ ) {
+void __atomic_flag_wait__( volatile atomic_flag* __a__ ) {
while ( atomic_flag_test_and_set( __a__ ) )
- ;
+ ;
}
void __atomic_flag_wait_explicit__( volatile atomic_flag* __a__,
- memory_order __x__ ) {
+ memory_order __x__ ) {
while ( atomic_flag_test_and_set_explicit( __a__, __x__ ))
;
}
void model_write_action(void * obj, memory_order ord, uint64_t val);
void model_init_action(void * obj, uint64_t val);
uint64_t model_rmwr_action(void *obj, memory_order ord);
+uint64_t model_rmwrcas_action(void *obj, memory_order ord, uint64_t oval, int size);
void model_rmw_action(void *obj, memory_order ord, uint64_t val);
void model_rmwc_action(void *obj, memory_order ord);
void model_fence_action(memory_order ord);
__typeof__(__e__) __q__ = (__e__); \
__typeof__(__m__) __v__ = (__m__); \
bool __r__; \
- __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
+ __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_rmwrcas_action((void *)__p__, __x__, (uint64_t) * __q__, sizeof((__a__)->__f__)); \
if (__t__ == * __q__ ) {; \
model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
else { model_rmwc_action((void *)__p__, __x__); *__q__ = __t__; __r__ = false;} \
#include <stddef.h>
#endif
-
typedef enum memory_order {
memory_order_relaxed, memory_order_consume, memory_order_acquire,
memory_order_release, memory_order_acq_rel, memory_order_seq_cst
} memory_order;
-
-
+
#ifdef __cplusplus
}
#endif
--- /dev/null
+/**
+ * @file pthread.h
+ * @brief C11 pthread.h interface header
+ */
+#ifndef PTHREAD_H
+#define PTHREAD_H
+
+#include <threads.h>
+#include <sched.h>
+//#include <bits/pthreadtypes.h>
+#include <pthread.h>
+
+typedef void *(*pthread_start_t)(void *);
+
+struct pthread_params {
+ pthread_start_t func;
+ void *arg;
+};
+
+extern "C" {
+int pthread_create(pthread_t *, const pthread_attr_t *,
+ void *(*start_routine) (void *), void * arg);
+void pthread_exit(void *);
+int pthread_join(pthread_t, void **);
+
+pthread_t pthread_self(void);
+
+int pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *);
+int pthread_mutex_lock(pthread_mutex_t *);
+int pthread_mutex_trylock(pthread_mutex_t *);
+int pthread_mutex_unlock(pthread_mutex_t *);
+int pthread_mutex_timedlock (pthread_mutex_t *__restrict p_mutex,
+ const struct timespec *__restrict __abstime);
+
+int pthread_cond_init(pthread_cond_t *p_cond, const pthread_condattr_t *attr);
+int pthread_cond_wait(pthread_cond_t *p_cond, pthread_mutex_t *p_mutex);
+int pthread_cond_timedwait(pthread_cond_t *p_cond,
+ pthread_mutex_t *p_mutex, const struct timespec *abstime);
+int pthread_cond_signal(pthread_cond_t *);
+
+
+int user_main(int, char**);
+
+// --- not implemented yet ---
+
+int pthread_attr_destroy(pthread_attr_t *);
+int pthread_attr_getdetachstate(const pthread_attr_t *, int *);
+int pthread_attr_getguardsize(const pthread_attr_t *, size_t *);
+int pthread_attr_getinheritsched(const pthread_attr_t *, int *);
+int pthread_attr_getschedparam(const pthread_attr_t *,
+ struct sched_param *);
+int pthread_attr_getschedpolicy(const pthread_attr_t *, int *);
+int pthread_attr_getscope(const pthread_attr_t *, int *);
+int pthread_attr_getstackaddr(const pthread_attr_t *, void **);
+int pthread_attr_getstacksize(const pthread_attr_t *, size_t *);
+int pthread_attr_init(pthread_attr_t *);
+int pthread_attr_setdetachstate(pthread_attr_t *, int);
+int pthread_attr_setguardsize(pthread_attr_t *, size_t);
+int pthread_attr_setinheritsched(pthread_attr_t *, int);
+int pthread_attr_setschedparam(pthread_attr_t *,
+ const struct sched_param *);
+int pthread_attr_setschedpolicy(pthread_attr_t *, int);
+int pthread_attr_setscope(pthread_attr_t *, int);
+int pthread_attr_setstackaddr(pthread_attr_t *, void *);
+int pthread_attr_setstacksize(pthread_attr_t *, size_t);
+int pthread_cancel(pthread_t);
+int pthread_cond_broadcast(pthread_cond_t *);
+int pthread_cond_destroy(pthread_cond_t *);
+int pthread_condattr_destroy(pthread_condattr_t *);
+int pthread_condattr_getpshared(const pthread_condattr_t *, int *);
+int pthread_condattr_init(pthread_condattr_t *);
+int pthread_condattr_setpshared(pthread_condattr_t *, int);
+
+int pthread_detach(pthread_t);
+int pthread_equal(pthread_t, pthread_t);
+int pthread_getconcurrency(void);
+int pthread_getschedparam(pthread_t, int *, struct sched_param *);
+void *pthread_getspecific(pthread_key_t);
+int pthread_key_create(pthread_key_t *, void (*)(void *));
+int pthread_key_delete(pthread_key_t);
+int pthread_mutex_destroy(pthread_mutex_t *);
+int pthread_mutex_getprioceiling(const pthread_mutex_t *, int *);
+int pthread_mutex_setprioceiling(pthread_mutex_t *, int, int *);
+int pthread_mutexattr_destroy(pthread_mutexattr_t *);
+int pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *,
+ int *);
+int pthread_mutexattr_getprotocol(const pthread_mutexattr_t *, int *);
+int pthread_mutexattr_getpshared(const pthread_mutexattr_t *, int *);
+int pthread_mutexattr_gettype(const pthread_mutexattr_t *, int *);
+int pthread_mutexattr_init(pthread_mutexattr_t *);
+int pthread_mutexattr_setprioceiling(pthread_mutexattr_t *, int);
+int pthread_mutexattr_setprotocol(pthread_mutexattr_t *, int);
+int pthread_mutexattr_setpshared(pthread_mutexattr_t *, int);
+int pthread_mutexattr_settype(pthread_mutexattr_t *, int);
+int pthread_once(pthread_once_t *, void (*)(void));
+int pthread_rwlock_destroy(pthread_rwlock_t *);
+int pthread_rwlock_init(pthread_rwlock_t *,
+ const pthread_rwlockattr_t *);
+int pthread_rwlock_rdlock(pthread_rwlock_t *);
+int pthread_rwlock_tryrdlock(pthread_rwlock_t *);
+int pthread_rwlock_trywrlock(pthread_rwlock_t *);
+int pthread_rwlock_unlock(pthread_rwlock_t *);
+int pthread_rwlock_wrlock(pthread_rwlock_t *);
+int pthread_rwlockattr_destroy(pthread_rwlockattr_t *);
+int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *,
+ int *);
+int pthread_rwlockattr_init(pthread_rwlockattr_t *);
+int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *, int);
+int pthread_setcancelstate(int, int *);
+int pthread_setcanceltype(int, int *);
+int pthread_setconcurrency(int);
+int pthread_setschedparam(pthread_t, int ,
+ const struct sched_param *);
+int pthread_setspecific(pthread_key_t, const void *);
+void pthread_testcancel(void);
+
+}
+
+void check();
+#endif
+++ /dev/null
-/**
- * @file pthread.h
- * @brief C11 pthread.h interface header
- */
-#ifndef PTHREAD_H
-#define PTHREAD_H 1
-
-#include <threads.h>
-#include <sched.h>
-#include <bits/pthreadtypes.h>
-#include <pthread.h>
-
-/* Mutex types. */
-enum
-{
- PTHREAD_MUTEX_TIMED_NP,
- PTHREAD_MUTEX_RECURSIVE_NP,
- PTHREAD_MUTEX_ERRORCHECK_NP,
- PTHREAD_MUTEX_ADAPTIVE_NP
-#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
- ,
- PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP,
- PTHREAD_MUTEX_RECURSIVE = PTHREAD_MUTEX_RECURSIVE_NP,
- PTHREAD_MUTEX_ERRORCHECK = PTHREAD_MUTEX_ERRORCHECK_NP,
- PTHREAD_MUTEX_DEFAULT = PTHREAD_MUTEX_NORMAL
-#endif
-#ifdef __USE_GNU
- /* For compatibility. */
- , PTHREAD_MUTEX_FAST_NP = PTHREAD_MUTEX_TIMED_NP
-#endif
-};
-
-typedef void *(*pthread_start_t)(void *);
-
-struct pthread_params {
- pthread_start_t func;
- void *arg;
-};
-
-extern "C" {
-int pthread_create(pthread_t *, const pthread_attr_t *,
- void *(*start_routine) (void *), void * arg);
-void pthread_exit(void *);
-int pthread_join(pthread_t, void **);
-
-pthread_t pthread_self(void);
-
-int pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *);
-int pthread_mutex_lock(pthread_mutex_t *);
-int pthread_mutex_trylock(pthread_mutex_t *);
-int pthread_mutex_unlock(pthread_mutex_t *);
-int pthread_mutex_timedlock (pthread_mutex_t *__restrict p_mutex,
- const struct timespec *__restrict __abstime);
-
-int pthread_cond_init(pthread_cond_t *p_cond, const pthread_condattr_t *attr);
-int pthread_cond_wait(pthread_cond_t *p_cond, pthread_mutex_t *p_mutex);
-int pthread_cond_timedwait(pthread_cond_t *p_cond,
- pthread_mutex_t *p_mutex, const struct timespec *abstime);
-int pthread_cond_signal(pthread_cond_t *);
-
-void pthread_cleanup_push(void (*routine)(void*), void *arg );
-
-int user_main(int, char**);
-
-// --- not implemented yet ---
-
-int pthread_attr_destroy(pthread_attr_t *);
-int pthread_attr_getdetachstate(const pthread_attr_t *, int *);
-int pthread_attr_getguardsize(const pthread_attr_t *, size_t *);
-int pthread_attr_getinheritsched(const pthread_attr_t *, int *);
-int pthread_attr_getschedparam(const pthread_attr_t *,
- struct sched_param *);
-int pthread_attr_getschedpolicy(const pthread_attr_t *, int *);
-int pthread_attr_getscope(const pthread_attr_t *, int *);
-int pthread_attr_getstackaddr(const pthread_attr_t *, void **);
-int pthread_attr_getstacksize(const pthread_attr_t *, size_t *);
-int pthread_attr_init(pthread_attr_t *);
-int pthread_attr_setdetachstate(pthread_attr_t *, int);
-int pthread_attr_setguardsize(pthread_attr_t *, size_t);
-int pthread_attr_setinheritsched(pthread_attr_t *, int);
-int pthread_attr_setschedparam(pthread_attr_t *,
- const struct sched_param *);
-int pthread_attr_setschedpolicy(pthread_attr_t *, int);
-int pthread_attr_setscope(pthread_attr_t *, int);
-int pthread_attr_setstackaddr(pthread_attr_t *, void *);
-int pthread_attr_setstacksize(pthread_attr_t *, size_t);
-int pthread_cancel(pthread_t);
-int pthread_cond_broadcast(pthread_cond_t *);
-int pthread_cond_destroy(pthread_cond_t *);
-int pthread_condattr_destroy(pthread_condattr_t *);
-int pthread_condattr_getpshared(const pthread_condattr_t *, int *);
-int pthread_condattr_init(pthread_condattr_t *);
-int pthread_condattr_setpshared(pthread_condattr_t *, int);
-
-int pthread_detach(pthread_t);
-int pthread_equal(pthread_t, pthread_t);
-int pthread_getconcurrency(void);
-int pthread_getschedparam(pthread_t, int *, struct sched_param *);
-void *pthread_getspecific(pthread_key_t);
-int pthread_key_create(pthread_key_t *, void (*)(void *));
-int pthread_key_delete(pthread_key_t);
-int pthread_mutex_destroy(pthread_mutex_t *);
-int pthread_mutex_getprioceiling(const pthread_mutex_t *, int *);
-int pthread_mutex_setprioceiling(pthread_mutex_t *, int, int *);
-int pthread_mutexattr_destroy(pthread_mutexattr_t *);
-int pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *,
- int *);
-int pthread_mutexattr_getprotocol(const pthread_mutexattr_t *, int *);
-int pthread_mutexattr_getpshared(const pthread_mutexattr_t *, int *);
-int pthread_mutexattr_gettype(const pthread_mutexattr_t *, int *);
-int pthread_mutexattr_init(pthread_mutexattr_t *);
-int pthread_mutexattr_setprioceiling(pthread_mutexattr_t *, int);
-int pthread_mutexattr_setprotocol(pthread_mutexattr_t *, int);
-int pthread_mutexattr_setpshared(pthread_mutexattr_t *, int);
-int pthread_mutexattr_settype(pthread_mutexattr_t *, int);
-int pthread_once(pthread_once_t *, void (*)(void));
-int pthread_rwlock_destroy(pthread_rwlock_t *);
-int pthread_rwlock_init(pthread_rwlock_t *,
- const pthread_rwlockattr_t *);
-int pthread_rwlock_rdlock(pthread_rwlock_t *);
-int pthread_rwlock_tryrdlock(pthread_rwlock_t *);
-int pthread_rwlock_trywrlock(pthread_rwlock_t *);
-int pthread_rwlock_unlock(pthread_rwlock_t *);
-int pthread_rwlock_wrlock(pthread_rwlock_t *);
-int pthread_rwlockattr_destroy(pthread_rwlockattr_t *);
-int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *,
- int *);
-int pthread_rwlockattr_init(pthread_rwlockattr_t *);
-int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *, int);
-int pthread_setcancelstate(int, int *);
-int pthread_setcanceltype(int, int *);
-int pthread_setconcurrency(int);
-int pthread_setschedparam(pthread_t, int ,
- const struct sched_param *);
-int pthread_setspecific(pthread_key_t, const void *);
-void pthread_testcancel(void);
-
-}
-
-void check();
-#endif
// helper functions used by CdsPass
// The CdsPass implementation does not replace normal load/stores with cds load/stores,
// but inserts cds load/stores to check dataraces. Thus, the cds load/stores do not
-// return anything.
+// return anything.
void cds_store8(void *addr)
{
- //DEBUG("addr = %p, val = %" PRIu8 "\n", addr, val);
+ //DEBUG("addr = %p, val = %" PRIu8 "\n", addr, val);
thread_id_t tid = thread_current()->get_id();
raceCheckWrite(tid, addr);
}
void cds_store16(void *addr)
{
- //DEBUG("addr = %p, val = %" PRIu16 "\n", addr, val);
+ //DEBUG("addr = %p, val = %" PRIu16 "\n", addr, val);
thread_id_t tid = thread_current()->get_id();
raceCheckWrite(tid, addr);
raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 1));
void cds_store32(void *addr)
{
- //DEBUG("addr = %p, val = %" PRIu32 "\n", addr, val);
+ //DEBUG("addr = %p, val = %" PRIu32 "\n", addr, val);
thread_id_t tid = thread_current()->get_id();
raceCheckWrite(tid, addr);
raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 1));
void cds_store64(void *addr)
{
- //DEBUG("addr = %p, val = %" PRIu64 "\n", addr, val);
+ //DEBUG("addr = %p, val = %" PRIu64 "\n", addr, val);
thread_id_t tid = thread_current()->get_id();
raceCheckWrite(tid, addr);
raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 1));
-#include <threads.h>
+#include "threads.h"
#include "common.h"
#include "threads-model.h"
#include "action.h"
+
/* global "model" object */
#include "model.h"
#include "model.h"
#include "params.h"
#include "snapshot-interface.h"
+#include "plugins.h"
static void param_defaults(struct model_params *params)
{
- params->maxreads = 0;
- params->maxfuturedelay = 6;
- params->fairwindow = 0;
- params->yieldon = false;
- params->yieldblock = false;
- params->enabledcount = 1;
- params->bound = 0;
- params->maxfuturevalues = 0;
- params->expireslop = 4;
params->verbose = !!DBG_ENABLED();
params->uninitvalue = 0;
- params->maxexecutions = 0;
+ params->maxexecutions = 10;
}
static void print_usage(const char *program_name, struct model_params *params)
{
+ ModelVector<TraceAnalysis *> * registeredanalysis=getRegisteredTraceAnalysis();
/* Reset defaults before printing */
param_defaults(params);
model_print(
-"Copyright (c) 2013 Regents of the University of California. All rights reserved.\n"
-"Distributed under the GPLv2\n"
-"Written by Brian Norris and Brian Demsky\n"
-"\n"
-"Usage: %s [MODEL-CHECKER OPTIONS] -- [PROGRAM ARGS]\n"
-"\n"
-"MODEL-CHECKER OPTIONS can be any of the model-checker options listed below. Arguments\n"
-"provided after the `--' (the PROGRAM ARGS) are passed to the user program.\n"
-"\n"
-"Model-checker options:\n"
-"-h, --help Display this help message and exit\n"
-"-m, --liveness=NUM Maximum times a thread can read from the same write\n"
-" while other writes exist.\n"
-" Default: %d\n"
-"-M, --maxfv=NUM Maximum number of future values that can be sent to\n"
-" the same read.\n"
-" Default: %d\n"
-"-s, --maxfvdelay=NUM Maximum actions that the model checker will wait for\n"
-" a write from the future past the expected number\n"
-" of actions.\n"
-" Default: %d\n"
-"-S, --fvslop=NUM Future value expiration sloppiness.\n"
-" Default: %u\n"
-"-y, --yield Enable CHESS-like yield-based fairness support\n"
-" (requires thrd_yield() in test program).\n"
-" Default: %s\n"
-"-Y, --yieldblock Prohibit an execution from running a yield.\n"
-" Default: %s\n"
-"-f, --fairness=WINDOW Specify a fairness window in which actions that are\n"
-" enabled sufficiently many times should receive\n"
-" priority for execution (not recommended).\n"
-" Default: %d\n"
-"-e, --enabled=COUNT Enabled count.\n"
-" Default: %d\n"
-"-b, --bound=MAX Upper length bound.\n"
-" Default: %d\n"
-"-v[NUM], --verbose[=NUM] Print verbose execution information. NUM is optional:\n"
-" 0 is quiet; 1 shows valid executions; 2 is noisy;\n"
-" 3 is noisier.\n"
-" Default: %d\n"
-"-u, --uninitialized=VALUE Return VALUE any load which may read from an\n"
-" uninitialized atomic.\n"
-" Default: %u\n"
-"-t, --analysis=NAME Use Analysis Plugin.\n"
-"-o, --options=NAME Option for previous analysis plugin. \n"
-"-x, --maxexec=NUM Maximum number of executions.\n"
-" Default: %u\n"
-" -o help for a list of options\n"
-" -- Program arguments follow.\n\n",
+ "Copyright (c) 2013 Regents of the University of California. All rights reserved.\n"
+ "Distributed under the GPLv2\n"
+ "Written by Brian Norris and Brian Demsky\n"
+ "\n"
+ "Usage: %s [MODEL-CHECKER OPTIONS] -- [PROGRAM ARGS]\n"
+ "\n"
+ "MODEL-CHECKER OPTIONS can be any of the model-checker options listed below. Arguments\n"
+ "provided after the `--' (the PROGRAM ARGS) are passed to the user program.\n"
+ "\n"
+ "Model-checker options:\n"
+ "-h, --help Display this help message and exit\n"
+ "-v[NUM], --verbose[=NUM] Print verbose execution information. NUM is optional:\n"
+ " 0 is quiet; 1 shows valid executions; 2 is noisy;\n"
+ " 3 is noisier.\n"
+ " Default: %d\n"
+ "-u, --uninitialized=VALUE Return VALUE any load which may read from an\n"
+ " uninitialized atomic.\n"
+ " Default: %u\n"
+ "-t, --analysis=NAME Use Analysis Plugin.\n"
+ "-o, --options=NAME Option for previous analysis plugin. \n"
+ "-x, --maxexec=NUM Maximum number of executions.\n"
+ " Default: %u\n"
+ " -o help for a list of options\n"
+ " -- Program arguments follow.\n\n",
program_name,
- params->maxreads,
- params->maxfuturevalues,
- params->maxfuturedelay,
- params->expireslop,
- params->yieldon ? "enabled" : "disabled",
- params->yieldblock ? "enabled" : "disabled",
- params->fairwindow,
- params->enabledcount,
- params->bound,
params->verbose,
- params->uninitvalue,
+ params->uninitvalue,
params->maxexecutions);
-
+ model_print("Analysis plugins:\n");
+ for(unsigned int i=0;i<registeredanalysis->size();i++) {
+ TraceAnalysis * analysis=(*registeredanalysis)[i];
+ model_print("%s\n", analysis->name());
+ }
exit(EXIT_SUCCESS);
}
+bool install_plugin(char * name) {
+ ModelVector<TraceAnalysis *> * registeredanalysis=getRegisteredTraceAnalysis();
+ ModelVector<TraceAnalysis *> * installedanalysis=getInstalledTraceAnalysis();
+
+ for(unsigned int i=0;i<registeredanalysis->size();i++) {
+ TraceAnalysis * analysis=(*registeredanalysis)[i];
+ if (strcmp(name, analysis->name())==0) {
+ installedanalysis->push_back(analysis);
+ return false;
+ }
+ }
+ model_print("Analysis %s Not Found\n", name);
+ return true;
+}
+
static void parse_options(struct model_params *params, int argc, char **argv)
{
- const char *shortopts = "hyYt:o:m:M:s:S:f:e:b:u:x:v::";
+ const char *shortopts = "ht:o:u:x:v::";
const struct option longopts[] = {
{"help", no_argument, NULL, 'h'},
- {"liveness", required_argument, NULL, 'm'},
- {"maxfv", required_argument, NULL, 'M'},
- {"maxfvdelay", required_argument, NULL, 's'},
- {"fvslop", required_argument, NULL, 'S'},
- {"fairness", required_argument, NULL, 'f'},
- {"yield", no_argument, NULL, 'y'},
- {"yieldblock", no_argument, NULL, 'Y'},
- {"enabled", required_argument, NULL, 'e'},
- {"bound", required_argument, NULL, 'b'},
{"verbose", optional_argument, NULL, 'v'},
{"uninitialized", required_argument, NULL, 'u'},
{"analysis", required_argument, NULL, 't'},
{"options", required_argument, NULL, 'o'},
{"maxexecutions", required_argument, NULL, 'x'},
- {0, 0, 0, 0} /* Terminator */
+ {0, 0, 0, 0} /* Terminator */
};
int opt, longindex;
bool error = false;
case 'x':
params->maxexecutions = atoi(optarg);
break;
- case 's':
- params->maxfuturedelay = atoi(optarg);
- break;
- case 'S':
- params->expireslop = atoi(optarg);
- break;
- case 'f':
- params->fairwindow = atoi(optarg);
- break;
- case 'e':
- params->enabledcount = atoi(optarg);
- break;
- case 'b':
- params->bound = atoi(optarg);
- break;
- case 'm':
- params->maxreads = atoi(optarg);
- break;
- case 'M':
- params->maxfuturevalues = atoi(optarg);
- break;
case 'v':
params->verbose = optarg ? atoi(optarg) : 1;
break;
case 'u':
params->uninitvalue = atoi(optarg);
break;
- case 'y':
- params->yieldon = true;
- break;
-/** case 't':
+ case 't':
if (install_plugin(optarg))
error = true;
break;
case 'o':
- {
- ModelVector<TraceAnalysis *> * analyses = getInstalledTraceAnalysis();
- if ( analyses->size() == 0 || (*analyses)[analyses->size()-1]->option(optarg))
- error = true;
- }
- break;
-*/
- case 'Y':
- params->yieldblock = true;
- break;
- default: /* '?' */
+ {
+ ModelVector<TraceAnalysis *> * analyses = getInstalledTraceAnalysis();
+ if ( analyses->size() == 0 || (*analyses)[analyses->size()-1]->option(optarg))
+ error = true;
+ }
+ break;
+ default: /* '?' */
error = true;
break;
}
int main_argc;
char **main_argv;
+static void install_trace_analyses(ModelExecution *execution)
+{
+ ModelVector<TraceAnalysis *> * installedanalysis=getInstalledTraceAnalysis();
+ for(unsigned int i=0;i<installedanalysis->size();i++) {
+ TraceAnalysis * ta=(*installedanalysis)[i];
+ ta->setExecution(execution);
+ model->add_trace_analysis(ta);
+ /** Call the installation event for each installed plugin */
+ ta->actionAtInstallation();
+ }
+}
+
/** The model_main function contains the main model checking loop. */
static void model_main()
{
struct model_params params;
param_defaults(¶ms);
+ register_plugins();
parse_options(¶ms, main_argc, main_argv);
snapshot_stack_init();
- model = new ModelChecker(params); // L: Model thread is created
-// install_trace_analyses(model->get_execution()); L: disable plugin
+ if (!model)
+ model = new ModelChecker();
+ model->setParams(params);
+ install_trace_analyses(model->get_execution());
snapshot_record(0);
model->run();
ModelChecker *model;
/** @brief Constructor */
-ModelChecker::ModelChecker(struct model_params params) :
+ModelChecker::ModelChecker() :
/* Initialize default scheduler */
- params(params),
+ params(),
restart_flag(false),
- exit_flag(false),
scheduler(new Scheduler()),
node_stack(new NodeStack()),
- execution(new ModelExecution(this, &this->params, scheduler, node_stack)), // L: Model thread is created inside
+ execution(new ModelExecution(this, scheduler, node_stack)),
execution_number(1),
- diverge(NULL),
- earliest_diverge(NULL),
trace_analyses(),
inspect_plugin(NULL)
{
delete scheduler;
}
+/** Method to set parameters */
+void ModelChecker::setParams(struct model_params params) {
+ this->params = params;
+ execution->setParams(¶ms);
+}
+
/**
* Restores user program to initial state and resets all model-checker data
* structures.
* those pending actions which were NOT pending before the rollback
* point
*/
- for (unsigned int i = 0; i < get_num_threads(); i++)
+ for (unsigned int i = 0;i < get_num_threads();i++)
delete get_thread(int_to_id(i))->get_pending();
snapshot_backtrack_before(0);
*/
Thread * ModelChecker::get_next_thread()
{
- thread_id_t tid;
/*
* Have we completed exploring the preselected path? Then let the
* scheduler decide
*/
- if (diverge == NULL) // W: diverge is set to NULL permanently
- return scheduler->select_next_thread(node_stack->get_head());
-
- /* Else, we are trying to replay an execution */
- ModelAction *next = node_stack->get_next()->get_action();
-
- if (next == diverge) {
- if (earliest_diverge == NULL || *diverge < *earliest_diverge)
- earliest_diverge = diverge;
-
- Node *nextnode = next->get_node();
- Node *prevnode = nextnode->get_parent();
- scheduler->update_sleep_set(prevnode);
-
- /* Reached divergence point */
- if (nextnode->increment_behaviors()) {
- /* Execute the same thread with a new behavior */
- tid = next->get_tid();
- node_stack->pop_restofstack(2);
- } else {
- ASSERT(prevnode);
- /* Make a different thread execute for next step */
- scheduler->add_sleep(get_thread(next->get_tid()));
- tid = prevnode->get_next_backtrack();
- /* Make sure the backtracked thread isn't sleeping. */
- node_stack->pop_restofstack(1);
- if (diverge == earliest_diverge) {
- earliest_diverge = prevnode->get_action();
- }
- }
- /* Start the round robin scheduler from this thread id */
- scheduler->set_scheduler_thread(tid);
- /* The correct sleep set is in the parent node. */
- execute_sleep_set();
-
- DEBUG("*** Divergence point ***\n");
-
- diverge = NULL;
- } else {
- tid = next->get_tid();
- }
- DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
- ASSERT(tid != THREAD_ID_T_NONE);
- return get_thread(id_to_int(tid));
-}
-
-/**
- * We need to know what the next actions of all threads in the sleep
- * set will be. This method computes them and stores the actions at
- * the corresponding thread object's pending action.
- */
-void ModelChecker::execute_sleep_set()
-{
- for (unsigned int i = 0; i < get_num_threads(); i++) {
- thread_id_t tid = int_to_id(i);
- Thread *thr = get_thread(tid);
- if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
- thr->get_pending()->set_sleep_flag();
- }
- }
+ return scheduler->select_next_thread(node_stack->get_head());
}
/**
SnapVector<bug_message *> *bugs = execution->get_bugs();
model_print("Bug report: %zu bug%s detected\n",
- bugs->size(),
- bugs->size() > 1 ? "s" : "");
- for (unsigned int i = 0; i < bugs->size(); i++)
+ bugs->size(),
+ bugs->size() > 1 ? "s" : "");
+ for (unsigned int i = 0;i < bugs->size();i++)
(*bugs)[i]->print();
}
model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
model_print("Total executions: %d\n", stats.num_total);
- if (params.verbose)
- model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
}
/**
void ModelChecker::print_execution(bool printbugs) const
{
model_print("Program output from execution %d:\n",
- get_execution_number());
+ get_execution_number());
print_program_output();
if (params.verbose >= 3) {
- model_print("\nEarliest divergence point since last feasible execution:\n");
- if (earliest_diverge)
- earliest_diverge->print();
- else
- model_print("(Not set)\n");
-
- model_print("\n");
print_stats();
}
DBG();
/* Is this execution a feasible execution that's worth bug-checking? */
bool complete = execution->isfeasibleprefix() &&
- (execution->is_complete_execution() ||
- execution->have_bug_reports());
+ (execution->is_complete_execution() ||
+ execution->have_bug_reports());
/* End-of-execution bug checks */
if (complete) {
checkDataRaces();
run_trace_analyses();
- }
+ }
record_stats();
/* Output */
// test code
execution_number++;
reset_to_initial_state();
- node_stack->pop_restofstack(2);
-// node_stack->full_reset();
- diverge = NULL;
+ node_stack->full_reset();
return false;
-/* test
- if (complete)
- earliest_diverge = NULL;
-
- if (exit_flag)
- return false;
-
-// diverge = execution->get_next_backtrack();
- if (diverge == NULL) {
- execution_number++;
- reset_to_initial_state();
- model_print("Does not diverge\n");
- return false;
- }
-
- if (DBG_ENABLED()) {
- model_print("Next execution will diverge at:\n");
- diverge->print();
- }
-
- execution_number++;
-
- if (params.maxexecutions != 0 && stats.num_complete >= params.maxexecutions)
- return false;
-
- reset_to_initial_state();
- return true;
-*/
-
}
/** @brief Run trace analyses on complete trace */
void ModelChecker::run_trace_analyses() {
- for (unsigned int i = 0; i < trace_analyses.size(); i++)
+ for (unsigned int i = 0;i < trace_analyses.size();i++)
trace_analyses[i]->analyze(execution->get_action_trace());
}
scheduler->set_current_thread(NULL);
ASSERT(!old->get_pending());
/* W: No plugin
- if (inspect_plugin != NULL) {
- inspect_plugin->inspectModelAction(act);
- }*/
+ if (inspect_plugin != NULL) {
+ inspect_plugin->inspectModelAction(act);
+ }*/
old->set_pending(act);
if (Thread::swap(old, &system_context) < 0) {
perror("swap threads");
execution->set_assert();
return true;
}
-
- if (execution->too_many_steps())
- return true;
return false;
}
-/** @brief Exit ModelChecker upon returning to the run loop of the
- * model checker. */
-void ModelChecker::exit_model_checker()
-{
- exit_flag = true;
-}
-
/** @brief Restart ModelChecker upon returning to the run loop of the
* model checker. */
void ModelChecker::restart()
void ModelChecker::do_restart()
{
restart_flag = false;
- diverge = NULL;
- earliest_diverge = NULL;
reset_to_initial_state();
node_stack->full_reset();
memset(&stats,0,sizeof(struct execution_stats));
/** @brief Run ModelChecker for the user program */
void ModelChecker::run()
{
- bool has_next;
- int i = 0;
- do {
+ //Need to initial random number generator state to avoid resets on rollback
+ char random_state[256];
+ initstate(423121, random_state, sizeof(random_state));
+
+ for(int exec = 0;exec < params.maxexecutions;exec++) {
thrd_t user_thread;
- Thread *t = new Thread(execution->get_next_id(), &user_thread, &user_main_wrapper, NULL, NULL); // L: user_main_wrapper passes the user program
+ Thread *t = new Thread(execution->get_next_id(), &user_thread, &user_main_wrapper, NULL, NULL); // L: user_main_wrapper passes the user program
execution->add_thread(t);
+ //Need to seed random number generator, otherwise its state gets reset
do {
/*
* Stash next pending action(s) for thread(s). There
* for any newly-created thread
*/
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
thread_id_t tid = int_to_id(i);
Thread *thr = get_thread(tid);
if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
- switch_from_master(thr); // L: context swapped, and action type of thr changed.
+ switch_from_master(thr); // L: context swapped, and action type of thr changed.
if (thr->is_waiting_on(thr))
assert_bug("Deadlock detected (thread %u)", i);
}
}
/* Don't schedule threads which should be disabled */
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *th = get_thread(int_to_id(i));
ModelAction *act = th->get_pending();
if (act && execution->is_enabled(th) && !execution->check_action_enabled(act)) {
}
}
- for (unsigned int i = 1; i < get_num_threads(); i++) {
+ for (unsigned int i = 1;i < get_num_threads();i++) {
Thread *th = get_thread(int_to_id(i));
ModelAction *act = th->get_pending();
- if (act && execution->is_enabled(th) && (th->get_state() != THREAD_BLOCKED) ){
- if (act->is_write()){
- std::memory_order order = act->get_mo();
- if (order == std::memory_order_relaxed || \
- order == std::memory_order_release) {
+ if (act && execution->is_enabled(th) && (th->get_state() != THREAD_BLOCKED) ) {
+ if (act->is_write()) {
+ std::memory_order order = act->get_mo();
+ if (order == std::memory_order_relaxed || \
+ order == std::memory_order_release) {
t = th;
break;
}
} else if (act->get_type() == THREAD_CREATE || \
- act->get_type() == PTHREAD_CREATE || \
- act->get_type() == THREAD_START || \
- act->get_type() == THREAD_FINISH) {
+ act->get_type() == PTHREAD_CREATE || \
+ act->get_type() == THREAD_START || \
+ act->get_type() == THREAD_FINISH) {
t = th;
break;
- }
+ }
}
}
/* Catch assertions from prior take_step or from
- * between-ModelAction bugs (e.g., data races) */
+ * between-ModelAction bugs (e.g., data races) */
if (execution->has_asserted())
break;
- if (!t)
+ if (!t)
t = get_next_thread();
if (!t || t->is_model_thread())
break;
t->set_pending(NULL);
t = execution->take_step(curr);
} while (!should_terminate_execution());
-
- has_next = next_execution();
- i++;
- } while (i<2); // while (has_next);
-
- execution->fixup_release_sequences();
+ next_execution();
+ //restore random number generator state after rollback
+ setstate(random_state);
+ }
model_print("******* Model-checking complete: *******\n");
print_stats();
/* Have the trace analyses dump their output. */
- for (unsigned int i = 0; i < trace_analyses.size(); i++)
+ for (unsigned int i = 0;i < trace_analyses.size();i++)
trace_analyses[i]->finish();
}
#include "stl-model.h"
#include "context.h"
#include "params.h"
-
-/* Forward declaration */
-class Node;
-class NodeStack;
-class CycleGraph;
-class Promise;
-class Scheduler;
-class Thread;
-class ClockVector;
-class TraceAnalysis;
-class ModelExecution;
-class ModelAction;
+#include "classlist.h"
typedef SnapList<ModelAction *> action_list_t;
/** @brief Model checker execution stats */
struct execution_stats {
- int num_total; /**< @brief Total number of executions */
- int num_infeasible; /**< @brief Number of infeasible executions */
- int num_buggy_executions; /** @brief Number of buggy executions */
- int num_complete; /**< @brief Number of feasible, non-buggy, complete executions */
- int num_redundant; /**< @brief Number of redundant, aborted executions */
+ int num_total; /**< @brief Total number of executions */
+ int num_infeasible; /**< @brief Number of infeasible executions */
+ int num_buggy_executions; /** @brief Number of buggy executions */
+ int num_complete; /**< @brief Number of feasible, non-buggy, complete executions */
+ int num_redundant; /**< @brief Number of redundant, aborted executions */
};
/** @brief The central structure for model-checking */
class ModelChecker {
public:
- ModelChecker(struct model_params params);
+ ModelChecker();
~ModelChecker();
-
+ void setParams(struct model_params params);
void run();
/** Restart the model checker, intended for pluggins. */
/** Exit the model checker, intended for pluggins. */
void exit_model_checker();
- /** Check the exit_flag. */
- bool get_exit_flag() const { return exit_flag; }
-
/** @returns the context for the main model-checking system thread */
ucontext_t * get_system_context() { return &system_context; }
bool assert_bug(const char *msg, ...);
void assert_user_bug(const char *msg);
- const model_params params;
- void add_trace_analysis(TraceAnalysis *a) { trace_analyses.push_back(a); }
- void set_inspect_plugin(TraceAnalysis *a) { inspect_plugin=a; }
+ model_params params;
+ void add_trace_analysis(TraceAnalysis *a) { trace_analyses.push_back(a); }
+ void set_inspect_plugin(TraceAnalysis *a) { inspect_plugin=a; }
MEMALLOC
private:
/** Flag indicates whether to restart the model checker. */
bool restart_flag;
- /** Flag indicates whether to exit the model checker. */
- bool exit_flag;
/** The scheduler to use: tracks the running/ready Threads */
Scheduler * const scheduler;
unsigned int get_num_threads() const;
- void execute_sleep_set();
-
bool next_execution();
bool should_terminate_execution();
Thread * get_next_thread();
void reset_to_initial_state();
- ModelAction *diverge;
- ModelAction *earliest_diverge;
-
ucontext_t system_context;
ModelVector<TraceAnalysis *> trace_analyses;
extern ModelChecker *model;
-#endif /* __MODEL_H__ */
+#endif /* __MODEL_H__ */
state.alloc_tid = tid;
state.alloc_clock = model->get_execution()->get_cv(tid)->getClock(tid);
}
-
+
void mutex::lock()
{
model->switch_to_master(new ModelAction(ATOMIC_LOCK, std::memory_order_seq_cst, this));
}
-
+
bool mutex::try_lock()
{
return model->switch_to_master(new ModelAction(ATOMIC_TRYLOCK, std::memory_order_seq_cst, this));
free(p);
}
-#else /* !USE_MPROTECT_SNAPSHOT */
+#else /* !USE_MPROTECT_SNAPSHOT */
/** @brief Snapshotting allocation function for use by the Thread class only */
void * Thread_malloc(size_t size)
free(ptr);
}
-#endif /* !USE_MPROTECT_SNAPSHOT */
+#endif /* !USE_MPROTECT_SNAPSHOT */
void operator delete[](void *p, size_t size) { \
model_free(p); \
} \
- void * operator new(size_t size, void *p) { /* placement new */ \
+ void * operator new(size_t size, void *p) { /* placement new */ \
return p; \
}
void operator delete[](void *p, size_t size) { \
snapshot_free(p); \
} \
- void * operator new(size_t size, void *p) { /* placement new */ \
+ void * operator new(size_t size, void *p) { /* placement new */ \
return p; \
}
*/
template <class T>
class ModelAlloc {
- public:
+public:
// type definitions
- typedef T value_type;
+ typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
- typedef size_t size_type;
- typedef size_t difference_type;
+ typedef size_t size_type;
+ typedef size_t difference_type;
// rebind allocator to type U
template <class U>
/** Return that all specializations of this allocator are interchangeable. */
template <class T1, class T2>
bool operator ==(const ModelAlloc<T1>&,
- const ModelAlloc<T2>&) throw() {
+ const ModelAlloc<T2>&) throw() {
return true;
}
/** Return that all specializations of this allocator are interchangeable. */
template <class T1, class T2>
bool operator!= (const ModelAlloc<T1>&,
- const ModelAlloc<T2>&) throw() {
+ const ModelAlloc<T2>&) throw() {
return false;
}
*/
template <class T>
class SnapshotAlloc {
- public:
+public:
// type definitions
- typedef T value_type;
+ typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
- typedef size_t size_type;
- typedef size_t difference_type;
+ typedef size_t size_type;
+ typedef size_t difference_type;
// rebind allocator to type U
template <class U>
/** Return that all specializations of this allocator are interchangeable. */
template <class T1, class T2>
bool operator ==(const SnapshotAlloc<T1>&,
- const SnapshotAlloc<T2>&) throw() {
+ const SnapshotAlloc<T2>&) throw() {
return true;
}
/** Return that all specializations of this allocator are interchangeable. */
template <class T1, class T2>
bool operator!= (const SnapshotAlloc<T1>&,
- const SnapshotAlloc<T2>&) throw() {
+ const SnapshotAlloc<T2>&) throw() {
return false;
}
#ifdef __cplusplus
extern "C" {
#endif
- typedef void * mspace;
- extern void * mspace_malloc(mspace msp, size_t bytes);
- extern void mspace_free(mspace msp, void* mem);
- extern void * mspace_realloc(mspace msp, void* mem, size_t newsize);
- extern void * mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
- extern mspace create_mspace_with_base(void* base, size_t capacity, int locked);
- extern mspace create_mspace(size_t capacity, int locked);
+typedef void * mspace;
+extern void * mspace_malloc(mspace msp, size_t bytes);
+extern void mspace_free(mspace msp, void* mem);
+extern void * mspace_realloc(mspace msp, void* mem, size_t newsize);
+extern void * mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
+extern mspace create_mspace_with_base(void* base, size_t capacity, int locked);
+extern mspace create_mspace(size_t capacity, int locked);
#if USE_MPROTECT_SNAPSHOT
- extern mspace user_snapshot_space;
+extern mspace user_snapshot_space;
#endif
- extern mspace model_snapshot_space;
+extern mspace model_snapshot_space;
#ifdef __cplusplus
-}; /* end of extern "C" */
+}; /* end of extern "C" */
#endif
-#endif /* _MY_MEMORY_H */
+#endif /* _MY_MEMORY_H */
* as an empty stub, to represent the first thread "choice") and up to one
* parent.
*
- * @param params The model-checker parameters
* @param act The ModelAction to associate with this Node. May be NULL.
- * @param par The parent Node in the NodeStack. May be NULL if there is no
- * parent.
* @param nthreads The number of threads which exist at this point in the
* execution trace.
*/
-Node::Node(const struct model_params *params, ModelAction *act, Node *par,
- int nthreads, Node *prevfairness) :
- read_from_status(READ_FROM_PAST),
+Node::Node(ModelAction *act) :
action(act),
- params(params),
- uninit_action(NULL),
- parent(par),
- num_threads(nthreads),
- explored_children(num_threads),
- backtrack(num_threads),
- fairness(num_threads),
- numBacktracks(0),
- enabled_array(NULL),
- read_from_past(),
- read_from_past_idx(0),
- read_from_promises(),
- read_from_promise_idx(-1),
- future_values(),
- future_index(-1),
- resolve_promise(),
- resolve_promise_idx(-1),
- relseq_break_writes(),
- relseq_break_index(0),
- misc_index(0),
- misc_max(0),
- yield_data(NULL)
+ uninit_action(NULL)
{
ASSERT(act);
act->set_node(this);
- int currtid = id_to_int(act->get_tid());
- int prevtid = prevfairness ? id_to_int(prevfairness->action->get_tid()) : 0;
-
- if (get_params()->fairwindow != 0) {
- for (int i = 0; i < num_threads; i++) {
- ASSERT(i < ((int)fairness.size()));
- struct fairness_info *fi = &fairness[i];
- struct fairness_info *prevfi = (parent && i < parent->get_num_threads()) ? &parent->fairness[i] : NULL;
- if (prevfi) {
- *fi = *prevfi;
- }
- if (parent && parent->is_enabled(int_to_id(i))) {
- fi->enabled_count++;
- }
- if (i == currtid) {
- fi->turns++;
- fi->priority = false;
- }
- /* Do window processing */
- if (prevfairness != NULL) {
- if (prevfairness->parent->is_enabled(int_to_id(i)))
- fi->enabled_count--;
- if (i == prevtid) {
- fi->turns--;
- }
- /* Need full window to start evaluating
- * conditions
- * If we meet the enabled count and have no
- * turns, give us priority */
- if ((fi->enabled_count >= get_params()->enabledcount) &&
- (fi->turns == 0))
- fi->priority = true;
- }
- }
- }
-}
-
-int Node::get_yield_data(int tid1, int tid2) const {
- if (tid1<num_threads && tid2 < num_threads)
- return yield_data[YIELD_INDEX(tid1,tid2,num_threads)];
- else
- return YIELD_S | YIELD_D;
-}
-
-void Node::update_yield(Scheduler * scheduler) {
- if (yield_data==NULL)
- yield_data=(int *) model_calloc(1, sizeof(int)*num_threads*num_threads);
- //handle base case
- if (parent == NULL) {
- for(int i = 0; i < num_threads*num_threads; i++) {
- yield_data[i] = YIELD_S | YIELD_D;
- }
- return;
- }
- int curr_tid=id_to_int(action->get_tid());
-
- for(int u = 0; u < num_threads; u++) {
- for(int v = 0; v < num_threads; v++) {
- int yield_state=parent->get_yield_data(u, v);
- bool next_enabled=scheduler->is_enabled(int_to_id(v));
- bool curr_enabled=parent->is_enabled(int_to_id(v));
- if (!next_enabled) {
- //Compute intersection of ES and E
- yield_state&=~YIELD_E;
- //Check to see if we disabled the thread
- if (u==curr_tid && curr_enabled)
- yield_state|=YIELD_D;
- }
- yield_data[YIELD_INDEX(u, v, num_threads)]=yield_state;
- }
- yield_data[YIELD_INDEX(u, curr_tid, num_threads)]=(yield_data[YIELD_INDEX(u, curr_tid, num_threads)]&~YIELD_P)|YIELD_S;
- }
- //handle curr.yield(t) part of computation
- if (action->is_yield()) {
- for(int v = 0; v < num_threads; v++) {
- int yield_state=yield_data[YIELD_INDEX(curr_tid, v, num_threads)];
- if ((yield_state & (YIELD_E | YIELD_D)) && (!(yield_state & YIELD_S)))
- yield_state |= YIELD_P;
- yield_state &= YIELD_P;
- if (scheduler->is_enabled(int_to_id(v))) {
- yield_state|=YIELD_E;
- }
- yield_data[YIELD_INDEX(curr_tid, v, num_threads)]=yield_state;
- }
- }
}
/** @brief Node desctructor */
delete action;
if (uninit_action)
delete uninit_action;
- if (enabled_array)
- model_free(enabled_array);
- if (yield_data)
- model_free(yield_data);
}
/** Prints debugging info for the ModelAction associated with this Node */
void Node::print() const
{
action->print();
- model_print(" thread status: ");
- if (enabled_array) {
- for (int i = 0; i < num_threads; i++) {
- char str[20];
- enabled_type_to_string(enabled_array[i], str);
- model_print("[%d: %s]", i, str);
- }
- model_print("\n");
- } else
- model_print("(info not available)\n");
- model_print(" backtrack: %s", backtrack_empty() ? "empty" : "non-empty ");
- for (int i = 0; i < (int)backtrack.size(); i++)
- if (backtrack[i] == true)
- model_print("[%d]", i);
- model_print("\n");
-
- model_print(" read from past: %s", read_from_past_empty() ? "empty" : "non-empty ");
- for (int i = read_from_past_idx + 1; i < (int)read_from_past.size(); i++)
- model_print("[%d]", read_from_past[i]->get_seq_number());
- model_print("\n");
-
- model_print(" read-from promises: %s", read_from_promise_empty() ? "empty" : "non-empty ");
- for (int i = read_from_promise_idx + 1; i < (int)read_from_promises.size(); i++)
- model_print("[%d]", read_from_promises[i]->get_seq_number());
- model_print("\n");
-
- model_print(" future values: %s", future_value_empty() ? "empty" : "non-empty ");
- for (int i = future_index + 1; i < (int)future_values.size(); i++)
- model_print("[%#" PRIx64 "]", future_values[i].value);
- model_print("\n");
-
- model_print(" promises: %s\n", promise_empty() ? "empty" : "non-empty");
- model_print(" misc: %s\n", misc_empty() ? "empty" : "non-empty");
- model_print(" rel seq break: %s\n", relseq_break_empty() ? "empty" : "non-empty");
-}
-
-/****************************** threads backtracking **************************/
-
-/**
- * Checks if the Thread associated with this thread ID has been explored from
- * this Node already.
- * @param tid is the thread ID to check
- * @return true if this thread choice has been explored already, false
- * otherwise
- */
-bool Node::has_been_explored(thread_id_t tid) const
-{
- int id = id_to_int(tid);
- return explored_children[id];
-}
-
-/**
- * Checks if the backtracking set is empty.
- * @return true if the backtracking set is empty
- */
-bool Node::backtrack_empty() const
-{
- return (numBacktracks == 0);
-}
-
-void Node::explore(thread_id_t tid)
-{
- int i = id_to_int(tid);
- ASSERT(i < ((int)backtrack.size()));
- if (backtrack[i]) {
- backtrack[i] = false;
- numBacktracks--;
- }
- explored_children[i] = true;
-}
-
-/**
- * Mark the appropriate backtracking information for exploring a thread choice.
- * @param act The ModelAction to explore
- */
-void Node::explore_child(ModelAction *act, enabled_type_t *is_enabled)
-{
- if (!enabled_array)
- enabled_array = (enabled_type_t *)model_malloc(sizeof(enabled_type_t) * num_threads);
- if (is_enabled != NULL)
- memcpy(enabled_array, is_enabled, sizeof(enabled_type_t) * num_threads);
- else {
- for (int i = 0; i < num_threads; i++)
- enabled_array[i] = THREAD_DISABLED;
- }
-
- explore(act->get_tid());
-}
-
-/**
- * Records a backtracking reference for a thread choice within this Node.
- * Provides feedback as to whether this thread choice is already set for
- * backtracking.
- * @return false if the thread was already set to be backtracked, true
- * otherwise
- */
-bool Node::set_backtrack(thread_id_t id)
-{
- int i = id_to_int(id);
- ASSERT(i < ((int)backtrack.size()));
- if (backtrack[i])
- return false;
- backtrack[i] = true;
- numBacktracks++;
- return true;
-}
-
-thread_id_t Node::get_next_backtrack()
-{
- /** @todo Find next backtrack */
- unsigned int i;
- for (i = 0; i < backtrack.size(); i++)
- if (backtrack[i] == true)
- break;
- /* Backtrack set was empty? */
- ASSERT(i != backtrack.size());
-
- backtrack[i] = false;
- numBacktracks--;
- return int_to_id(i);
-}
-
-void Node::clear_backtracking()
-{
- for (unsigned int i = 0; i < backtrack.size(); i++)
- backtrack[i] = false;
- for (unsigned int i = 0; i < explored_children.size(); i++)
- explored_children[i] = false;
- numBacktracks = 0;
-}
-
-/************************** end threads backtracking **************************/
-
-/*********************************** promise **********************************/
-
-/**
- * Sets a promise to explore meeting with the given node.
- * @param i is the promise index.
- */
-void Node::set_promise(unsigned int i)
-{
- if (i >= resolve_promise.size())
- resolve_promise.resize(i + 1, false);
- resolve_promise[i] = true;
-}
-
-/**
- * Looks up whether a given promise should be satisfied by this node.
- * @param i The promise index.
- * @return true if the promise should be satisfied by the given ModelAction.
- */
-bool Node::get_promise(unsigned int i) const
-{
- return (i < resolve_promise.size()) && (int)i == resolve_promise_idx;
-}
-
-/**
- * Increments to the next promise to resolve.
- * @return true if we have a valid combination.
- */
-bool Node::increment_promise()
-{
- DBG();
- if (resolve_promise.empty())
- return false;
- int prev_idx = resolve_promise_idx;
- resolve_promise_idx++;
- for ( ; resolve_promise_idx < (int)resolve_promise.size(); resolve_promise_idx++)
- if (resolve_promise[resolve_promise_idx])
- return true;
- resolve_promise_idx = prev_idx;
- return false;
-}
-
-/**
- * Returns whether the promise set is empty.
- * @return true if we have explored all promise combinations.
- */
-bool Node::promise_empty() const
-{
- for (int i = resolve_promise_idx + 1; i < (int)resolve_promise.size(); i++)
- if (i >= 0 && resolve_promise[i])
- return false;
- return true;
-}
-
-/** @brief Clear any promise-resolution information for this Node */
-void Node::clear_promise_resolutions()
-{
- resolve_promise.clear();
- resolve_promise_idx = -1;
-}
-
-/******************************* end promise **********************************/
-
-void Node::set_misc_max(int i)
-{
- misc_max = i;
-}
-
-int Node::get_misc() const
-{
- return misc_index;
-}
-
-bool Node::increment_misc()
-{
- return (misc_index < misc_max) && ((++misc_index) < misc_max);
-}
-
-bool Node::misc_empty() const
-{
- return (misc_index + 1) >= misc_max;
-}
-
-bool Node::is_enabled(Thread *t) const
-{
- int thread_id = id_to_int(t->get_id());
- return thread_id < num_threads && (enabled_array[thread_id] != THREAD_DISABLED);
-}
-
-enabled_type_t Node::enabled_status(thread_id_t tid) const
-{
- int thread_id = id_to_int(tid);
- if (thread_id < num_threads)
- return enabled_array[thread_id];
- else
- return THREAD_DISABLED;
-}
-
-bool Node::is_enabled(thread_id_t tid) const
-{
- int thread_id = id_to_int(tid);
- return thread_id < num_threads && (enabled_array[thread_id] != THREAD_DISABLED);
-}
-
-bool Node::has_priority(thread_id_t tid) const
-{
- return fairness[id_to_int(tid)].priority;
-}
-
-bool Node::has_priority_over(thread_id_t tid1, thread_id_t tid2) const
-{
- return get_yield_data(id_to_int(tid1), id_to_int(tid2)) & YIELD_P;
-}
-
-/*********************************** read from ********************************/
-
-/**
- * Get the current state of the may-read-from set iteration
- * @return The read-from type we should currently be checking (past or future)
- */
-read_from_type_t Node::get_read_from_status()
-{
- if (read_from_status == READ_FROM_PAST && read_from_past.empty())
- increment_read_from();
- return read_from_status;
-}
-
-/**
- * Iterate one step in the may-read-from iteration. This includes a step in
- * reading from the either the past or the future.
- * @return True if there is a new read-from to explore; false otherwise
- */
-bool Node::increment_read_from()
-{
- clear_promise_resolutions();
- if (increment_read_from_past()) {
- read_from_status = READ_FROM_PAST;
- return true;
- } else if (increment_read_from_promise()) {
- read_from_status = READ_FROM_PROMISE;
- return true;
- } else if (increment_future_value()) {
- read_from_status = READ_FROM_FUTURE;
- return true;
- }
- read_from_status = READ_FROM_NONE;
- return false;
-}
-
-/**
- * @return True if there are any new read-froms to explore
- */
-bool Node::read_from_empty() const
-{
- return read_from_past_empty() &&
- read_from_promise_empty() &&
- future_value_empty();
-}
-
-/**
- * Get the total size of the may-read-from set, including both past and future
- * values
- * @return The size of may-read-from
- */
-unsigned int Node::read_from_size() const
-{
- return read_from_past.size() +
- read_from_promises.size() +
- future_values.size();
-}
-
-/******************************* end read from ********************************/
-
-/****************************** read from past ********************************/
-
-/** @brief Prints info about read_from_past set */
-void Node::print_read_from_past()
-{
- for (unsigned int i = 0; i < read_from_past.size(); i++)
- read_from_past[i]->print();
-}
-
-/**
- * Add an action to the read_from_past set.
- * @param act is the action to add
- */
-void Node::add_read_from_past(const ModelAction *act)
-{
- read_from_past.push_back(act);
-}
-
-/**
- * Gets the next 'read_from_past' action from this Node. Only valid for a node
- * where this->action is a 'read'.
- * @return The first element in read_from_past
- */
-const ModelAction * Node::get_read_from_past() const
-{
- if (read_from_past_idx < read_from_past.size()) {
- int random_index = rand() % read_from_past.size();
- return read_from_past[random_index];
- }
-// return read_from_past[read_from_past_idx];
- else
- return NULL;
-}
-
-const ModelAction * Node::get_read_from_past(int i) const
-{
- return read_from_past[i];
-}
-
-int Node::get_read_from_past_size() const
-{
- return read_from_past.size();
-}
-
-/**
- * Checks whether the readsfrom set for this node is empty.
- * @return true if the readsfrom set is empty.
- */
-bool Node::read_from_past_empty() const
-{
- return ((read_from_past_idx + 1) >= read_from_past.size());
-}
-
-/**
- * Increments the index into the readsfrom set to explore the next item.
- * @return Returns false if we have explored all items.
- */
-bool Node::increment_read_from_past()
-{
- DBG();
- if (read_from_past_idx < read_from_past.size()) {
- read_from_past_idx++;
- return read_from_past_idx < read_from_past.size();
- }
- return false;
-}
-
-/************************** end read from past ********************************/
-
-/***************************** read_from_promises *****************************/
-
-/**
- * Add an action to the read_from_promises set.
- * @param reader The read which generated the Promise; we use the ModelAction
- * instead of the Promise because the Promise does not last across executions
- */
-void Node::add_read_from_promise(const ModelAction *reader)
-{
- read_from_promises.push_back(reader);
-}
-
-/**
- * Gets the next 'read-from-promise' from this Node. Only valid for a node
- * where this->action is a 'read'.
- * @return The current element in read_from_promises
- */
-Promise * Node::get_read_from_promise() const
-{
- ASSERT(read_from_promise_idx >= 0 && read_from_promise_idx < ((int)read_from_promises.size()));
- return read_from_promises[read_from_promise_idx]->get_reads_from_promise();
-}
-
-/**
- * Gets a particular 'read-from-promise' form this Node. Only vlaid for a node
- * where this->action is a 'read'.
- * @param i The index of the Promise to get
- * @return The Promise at index i, if the Promise is still available; NULL
- * otherwise
- */
-Promise * Node::get_read_from_promise(int i) const
-{
- return read_from_promises[i]->get_reads_from_promise();
-}
-
-/** @return The size of the read-from-promise set */
-int Node::get_read_from_promise_size() const
-{
- return read_from_promises.size();
-}
-
-/**
- * Checks whether the read_from_promises set for this node is empty.
- * @return true if the read_from_promises set is empty.
- */
-bool Node::read_from_promise_empty() const
-{
- return ((read_from_promise_idx + 1) >= ((int)read_from_promises.size()));
-}
-
-/**
- * Increments the index into the read_from_promises set to explore the next item.
- * @return Returns false if we have explored all promises.
- */
-bool Node::increment_read_from_promise()
-{
- DBG();
- if (read_from_promise_idx < ((int)read_from_promises.size())) {
- read_from_promise_idx++;
- return (read_from_promise_idx < ((int)read_from_promises.size()));
- }
- return false;
-}
-
-/************************* end read_from_promises *****************************/
-
-/****************************** future values *********************************/
-
-/**
- * Adds a value from a weakly ordered future write to backtrack to. This
- * operation may "fail" if the future value has already been run (within some
- * sloppiness window of this expiration), or if the futurevalues set has
- * reached its maximum.
- * @see model_params.maxfuturevalues
- *
- * @param value is the value to backtrack to.
- * @return True if the future value was successully added; false otherwise
- */
-bool Node::add_future_value(struct future_value fv)
-{
- uint64_t value = fv.value;
- modelclock_t expiration = fv.expiration;
- thread_id_t tid = fv.tid;
- int idx = -1; /* Highest index where value is found */
- for (unsigned int i = 0; i < future_values.size(); i++) {
- if (future_values[i].value == value && future_values[i].tid == tid) {
- if (expiration <= future_values[i].expiration)
- return false;
- idx = i;
- }
- }
- if (idx > future_index) {
- /* Future value hasn't been explored; update expiration */
- future_values[idx].expiration = expiration;
- return true;
- } else if (idx >= 0 && expiration <= future_values[idx].expiration + get_params()->expireslop) {
- /* Future value has been explored and is within the "sloppy" window */
- return false;
- }
-
- /* Limit the size of the future-values set */
- if (get_params()->maxfuturevalues > 0 &&
- (int)future_values.size() >= get_params()->maxfuturevalues)
- return false;
-
- future_values.push_back(fv);
- return true;
-}
-
-/**
- * Gets the next 'future_value' from this Node. Only valid for a node where
- * this->action is a 'read'.
- * @return The first element in future_values
- */
-struct future_value Node::get_future_value() const
-{
- ASSERT(future_index >= 0 && future_index < ((int)future_values.size()));
- return future_values[future_index];
-}
-
-/**
- * Checks whether the future_values set for this node is empty.
- * @return true if the future_values set is empty.
- */
-bool Node::future_value_empty() const
-{
- return ((future_index + 1) >= ((int)future_values.size()));
-}
-
-/**
- * Increments the index into the future_values set to explore the next item.
- * @return Returns false if we have explored all values.
- */
-bool Node::increment_future_value()
-{
- DBG();
- if (future_index < ((int)future_values.size())) {
- future_index++;
- return (future_index < ((int)future_values.size()));
- }
- return false;
-}
-
-/************************** end future values *********************************/
-
-/*********************** breaking release sequences ***************************/
-
-/**
- * Add a write ModelAction to the set of writes that may break the release
- * sequence. This is used during replay exploration of pending release
- * sequences. This Node must correspond to a release sequence fixup action.
- *
- * @param write The write that may break the release sequence. NULL means we
- * allow the release sequence to synchronize.
- */
-void Node::add_relseq_break(const ModelAction *write)
-{
- relseq_break_writes.push_back(write);
-}
-
-/**
- * Get the write that may break the current pending release sequence,
- * according to the replay / divergence pattern.
- *
- * @return A write that may break the release sequence. If NULL, that means
- * the release sequence should not be broken.
- */
-const ModelAction * Node::get_relseq_break() const
-{
- if (relseq_break_index < (int)relseq_break_writes.size())
- return relseq_break_writes[relseq_break_index];
- else
- return NULL;
-}
-
-/**
- * Increments the index into the relseq_break_writes set to explore the next
- * item.
- * @return Returns false if we have explored all values.
- */
-bool Node::increment_relseq_break()
-{
- DBG();
- if (relseq_break_index < ((int)relseq_break_writes.size())) {
- relseq_break_index++;
- return (relseq_break_index < ((int)relseq_break_writes.size()));
- }
- return false;
-}
-
-/**
- * @return True if all writes that may break the release sequence have been
- * explored
- */
-bool Node::relseq_break_empty() const
-{
- return ((relseq_break_index + 1) >= ((int)relseq_break_writes.size()));
-}
-
-/******************* end breaking release sequences ***************************/
-
-/**
- * Increments some behavior's index, if a new behavior is available
- * @return True if there is a new behavior available; otherwise false
- */
-bool Node::increment_behaviors()
-{
- /* satisfy a different misc_index values */
- if (increment_misc())
- return true;
- /* satisfy a different set of promises */
- if (increment_promise())
- return true;
- /* read from a different value */
- if (increment_read_from())
- return true;
- /* resolve a release sequence differently */
- if (increment_relseq_break())
- return true;
- return false;
}
NodeStack::NodeStack() :
node_list(),
- head_idx(-1),
- total_nodes(0)
+ head_idx(-1)
{
- total_nodes++;
}
NodeStack::~NodeStack()
{
- for (unsigned int i = 0; i < node_list.size(); i++)
+ for (unsigned int i = 0;i < node_list.size();i++)
delete node_list[i];
}
{
model_print("............................................\n");
model_print("NodeStack printing node_list:\n");
- for (unsigned int it = 0; it < node_list.size(); it++) {
+ for (unsigned int it = 0;it < node_list.size();it++) {
if ((int)it == this->head_idx)
model_print("vvv following action is the current iterator vvv\n");
node_list[it]->print();
/** Note: The is_enabled set contains what actions were enabled when
* act was chosen. */
-ModelAction * NodeStack::explore_action(ModelAction *act, enabled_type_t *is_enabled)
+ModelAction * NodeStack::explore_action(ModelAction *act)
{
DBG();
- if ((head_idx + 1) < (int)node_list.size()) {
- head_idx++;
- return node_list[head_idx]->get_action();
- }
-
- /* Record action */
- Node *head = get_head();
- Node *prevfairness = NULL;
- if (head) {
- head->explore_child(act, is_enabled);
- if (get_params()->fairwindow != 0 && head_idx > (int)get_params()->fairwindow)
- prevfairness = node_list[head_idx - get_params()->fairwindow];
- }
-
- int next_threads = execution->get_num_threads();
- if (act->get_type() == THREAD_CREATE || act->get_type() == PTHREAD_CREATE ) // may need to be changed
- next_threads++;
- node_list.push_back(new Node(get_params(), act, head, next_threads, prevfairness));
- total_nodes++;
+ node_list.push_back(new Node(act));
head_idx++;
return NULL;
}
-/**
- * Empties the stack of all trailing nodes after a given position and calls the
- * destructor for each. This function is provided an offset which determines
- * how many nodes (relative to the current replay state) to save before popping
- * the stack.
- * @param numAhead gives the number of Nodes (including this Node) to skip over
- * before removing nodes.
- */
-void NodeStack::pop_restofstack(int numAhead)
-{
- /* Diverging from previous execution; clear out remainder of list */
- unsigned int it = head_idx + numAhead;
- for (unsigned int i = it; i < node_list.size(); i++)
- delete node_list[i];
- node_list.resize(it);
- node_list.back()->clear_backtracking();
-}
/** Reset the node stack. */
-void NodeStack::full_reset()
+void NodeStack::full_reset()
{
- for (unsigned int i = 0; i < node_list.size(); i++)
+ for (unsigned int i = 0;i < node_list.size();i++)
delete node_list[i];
node_list.clear();
reset_execution();
- total_nodes = 1;
}
Node * NodeStack::get_head() const
/** @file nodestack.h
* @brief Stack of operations for use in backtracking.
-*/
+ */
#ifndef __NODESTACK_H__
#define __NODESTACK_H__
#include "mymemory.h"
#include "schedule.h"
-#include "promise.h"
#include "stl-model.h"
-
-class ModelAction;
-class Thread;
-
-struct fairness_info {
- unsigned int enabled_count;
- unsigned int turns;
- bool priority;
-};
-
-/**
- * @brief Types of read-from relations
- *
- * Our "may-read-from" set is composed of multiple types of reads, and we have
- * to iterate through all of them in the backtracking search. This enumeration
- * helps to identify which type of read-from we are currently observing.
- */
-typedef enum {
- READ_FROM_PAST, /**< @brief Read from a prior, existing store */
- READ_FROM_PROMISE, /**< @brief Read from an existing promised future value */
- READ_FROM_FUTURE, /**< @brief Read from a newly-asserted future value */
- READ_FROM_NONE, /**< @brief A NULL state, which should not be reached */
-} read_from_type_t;
-
-#define YIELD_E 1
-#define YIELD_D 2
-#define YIELD_S 4
-#define YIELD_P 8
-#define YIELD_INDEX(tid1, tid2, num_threads) (tid1*num_threads+tid2)
-
+#include "classlist.h"
/**
* @brief A single node in a NodeStack
*/
class Node {
public:
- Node(const struct model_params *params, ModelAction *act, Node *par,
- int nthreads, Node *prevfairness);
+ Node(ModelAction *act);
~Node();
- /* return true = thread choice has already been explored */
- bool has_been_explored(thread_id_t tid) const;
- /* return true = backtrack set is empty */
- bool backtrack_empty() const;
-
- void clear_backtracking();
- void explore_child(ModelAction *act, enabled_type_t *is_enabled);
- /* return false = thread was already in backtrack */
- bool set_backtrack(thread_id_t id);
- thread_id_t get_next_backtrack();
- bool is_enabled(Thread *t) const;
- bool is_enabled(thread_id_t tid) const;
- enabled_type_t enabled_status(thread_id_t tid) const;
ModelAction * get_action() const { return action; }
void set_uninit_action(ModelAction *act) { uninit_action = act; }
ModelAction * get_uninit_action() const { return uninit_action; }
-
- bool has_priority(thread_id_t tid) const;
- void update_yield(Scheduler *);
- bool has_priority_over(thread_id_t tid, thread_id_t tid2) const;
- int get_num_threads() const { return num_threads; }
- /** @return the parent Node to this Node; that is, the action that
- * occurred previously in the stack. */
- Node * get_parent() const { return parent; }
-
- read_from_type_t get_read_from_status();
- bool increment_read_from();
- bool read_from_empty() const;
- unsigned int read_from_size() const;
-
- void print_read_from_past();
- void add_read_from_past(const ModelAction *act);
- const ModelAction * get_read_from_past() const;
- const ModelAction * get_read_from_past(int i) const;
- int get_read_from_past_size() const;
-
- void add_read_from_promise(const ModelAction *reader);
- Promise * get_read_from_promise() const;
- Promise * get_read_from_promise(int i) const;
- int get_read_from_promise_size() const;
-
- bool add_future_value(struct future_value fv);
- struct future_value get_future_value() const;
-
- void set_promise(unsigned int i);
- bool get_promise(unsigned int i) const;
- bool increment_promise();
- bool promise_empty() const;
- void clear_promise_resolutions();
-
- enabled_type_t *get_enabled_array() {return enabled_array;}
-
- void set_misc_max(int i);
- int get_misc() const;
- bool increment_misc();
- bool misc_empty() const;
-
- void add_relseq_break(const ModelAction *write);
- const ModelAction * get_relseq_break() const;
- bool increment_relseq_break();
- bool relseq_break_empty() const;
-
- bool increment_behaviors();
-
void print() const;
MEMALLOC
private:
- void explore(thread_id_t tid);
- int get_yield_data(int tid1, int tid2) const;
- bool read_from_past_empty() const;
- bool increment_read_from_past();
- bool read_from_promise_empty() const;
- bool increment_read_from_promise();
- bool future_value_empty() const;
- bool increment_future_value();
- read_from_type_t read_from_status;
- const struct model_params * get_params() const { return params; }
-
ModelAction * const action;
- const struct model_params * const params;
-
/** @brief ATOMIC_UNINIT action which was created at this Node */
ModelAction *uninit_action;
-
- Node * const parent;
- const int num_threads;
- ModelVector<bool> explored_children;
- ModelVector<bool> backtrack;
- ModelVector<struct fairness_info> fairness;
- int numBacktracks;
- enabled_type_t *enabled_array;
-
- /**
- * The set of past ModelActions that this the action at this Node may
- * read from. Only meaningful if this Node represents a 'read' action.
- */
- ModelVector<const ModelAction *> read_from_past;
- unsigned int read_from_past_idx;
-
- ModelVector<const ModelAction *> read_from_promises;
- int read_from_promise_idx;
-
- ModelVector<struct future_value> future_values;
- int future_index;
-
- ModelVector<bool> resolve_promise;
- int resolve_promise_idx;
-
- ModelVector<const ModelAction *> relseq_break_writes;
- int relseq_break_index;
-
- int misc_index;
- int misc_max;
- int * yield_data;
};
typedef ModelVector<Node *> node_list_t;
~NodeStack();
void register_engine(const ModelExecution *exec);
-
- ModelAction * explore_action(ModelAction *act, enabled_type_t * is_enabled);
+ ModelAction * explore_action(ModelAction *act);
Node * get_head() const;
Node * get_next() const;
void reset_execution();
- void pop_restofstack(int numAhead);
void full_reset();
- int get_total_nodes() { return total_nodes; }
-
void print() const;
MEMALLOC
private:
node_list_t node_list;
-
const struct model_params * get_params() const;
/** @brief The model-checker execution object */
* current head Node. It is negative when the list is empty.
*/
int head_idx;
-
- int total_nodes;
};
-#endif /* __NODESTACK_H__ */
+#endif /* __NODESTACK_H__ */
void redirect_output();
void clear_program_output();
void print_program_output();
-#endif /* ! CONFIG_DEBUG */
+#endif /* ! CONFIG_DEBUG */
-#endif /* __OUTPUT_H__ */
+#endif /* __OUTPUT_H__ */
* the model checker.
*/
struct model_params {
- int maxreads;
- int maxfuturedelay;
- bool yieldon;
- bool yieldblock;
- unsigned int fairwindow;
- unsigned int enabledcount;
- unsigned int bound;
unsigned int uninitvalue;
int maxexecutions;
- /** @brief Maximum number of future values that can be sent to the same
- * read */
- int maxfuturevalues;
-
- /** @brief Only generate a new future value/expiration pair if the
- * expiration time exceeds the existing one by more than the slop
- * value */
- unsigned int expireslop;
-
/** @brief Verbosity (0 = quiet; 1 = noisy; 2 = noisier) */
int verbose;
char **argv;
};
-#endif /* __PARAMS_H__ */
+#endif /* __PARAMS_H__ */
--- /dev/null
+#include "plugins.h"
+
+ModelVector<TraceAnalysis *> * registered_analysis;
+ModelVector<TraceAnalysis *> * installed_analysis;
+
+void register_plugins() {
+ registered_analysis=new ModelVector<TraceAnalysis *>();
+ installed_analysis=new ModelVector<TraceAnalysis *>();
+}
+
+ModelVector<TraceAnalysis *> * getRegisteredTraceAnalysis() {
+ return registered_analysis;
+}
+
+ModelVector<TraceAnalysis *> * getInstalledTraceAnalysis() {
+ return installed_analysis;
+}
--- /dev/null
+#ifndef PLUGINS_H
+#define PLUGINS_H
+#include "traceanalysis.h"
+#include "stl-model.h"
+
+void register_plugins();
+ModelVector<TraceAnalysis *> * getRegisteredTraceAnalysis();
+ModelVector<TraceAnalysis *> * getInstalledTraceAnalysis();
+
+#endif
+++ /dev/null
-#define __STDC_FORMAT_MACROS
-#include <inttypes.h>
-
-#include "promise.h"
-#include "execution.h"
-#include "schedule.h"
-#include "action.h"
-#include "threads-model.h"
-
-/**
- * @brief Promise constructor
- * @param execution The execution which is creating this Promise
- * @param read The read which reads from a promised future value
- * @param fv The future value that is promised
- */
-Promise::Promise(const ModelExecution *execution, ModelAction *read, struct future_value fv) :
- execution(execution),
- num_available_threads(0),
- num_was_available_threads(0),
- fv(fv),
- readers(1, read),
- write(NULL)
-{
- add_thread(fv.tid);
- eliminate_thread(read->get_tid());
-}
-
-/**
- * Add a reader that reads from this Promise. Must be added in an order
- * consistent with execution order.
- *
- * @param reader The ModelAction that reads from this promise. Must be a read.
- * @return True if this new reader has invalidated the promise; false otherwise
- */
-bool Promise::add_reader(ModelAction *reader)
-{
- readers.push_back(reader);
- return eliminate_thread(reader->get_tid());
-}
-
-/**
- * Access a reader that read from this Promise. Readers must be inserted in
- * order by execution order, so they can be returned in this order.
- *
- * @param i The index of the reader to return
- * @return The i'th reader of this Promise
- */
-ModelAction * Promise::get_reader(unsigned int i) const
-{
- return i < readers.size() ? readers[i] : NULL;
-}
-
-/**
- * Eliminate a thread which no longer can satisfy this promise. Once all
- * enabled threads have been eliminated, this promise is unresolvable.
- *
- * @param tid The thread ID of the thread to eliminate
- * @return True, if this elimination has invalidated the promise; false
- * otherwise
- */
-bool Promise::eliminate_thread(thread_id_t tid)
-{
- unsigned int id = id_to_int(tid);
- if (!thread_is_available(tid))
- return false;
-
- available_thread[id] = false;
- num_available_threads--;
- return has_failed();
-}
-
-/**
- * Add a thread which may resolve this promise
- *
- * @param tid The thread ID
- */
-void Promise::add_thread(thread_id_t tid)
-{
- unsigned int id = id_to_int(tid);
- if (id >= available_thread.size())
- available_thread.resize(id + 1, false);
- if (!available_thread[id]) {
- available_thread[id] = true;
- num_available_threads++;
- }
- if (id >= was_available_thread.size())
- was_available_thread.resize(id + 1, false);
- if (!was_available_thread[id]) {
- was_available_thread[id] = true;
- num_was_available_threads++;
- }
-}
-
-/**
- * Check if a thread is available for resolving this promise. That is, the
- * thread must have been previously marked for resolving this promise, and it
- * cannot have been eliminated due to synchronization, etc.
- *
- * @param tid Thread ID of the thread to check
- * @return True if the thread is available; false otherwise
- */
-bool Promise::thread_is_available(thread_id_t tid) const
-{
- unsigned int id = id_to_int(tid);
- if (id >= available_thread.size())
- return false;
- return available_thread[id];
-}
-
-bool Promise::thread_was_available(thread_id_t tid) const
-{
- unsigned int id = id_to_int(tid);
- if (id >= was_available_thread.size())
- return false;
- return was_available_thread[id];
-}
-
-/**
- * @brief Get an upper bound on the number of available threads
- *
- * Gets an upper bound on the number of threads in the available threads set,
- * useful for iterating over "thread_is_available()".
- *
- * @return The upper bound
- */
-unsigned int Promise::max_available_thread_idx() const
-{
- return available_thread.size();
-}
-
-/** @brief Print debug info about the Promise */
-void Promise::print() const
-{
- model_print("Promised value %#" PRIx64 ", first read from thread %d, available threads to resolve: ",
- fv.value, id_to_int(get_reader(0)->get_tid()));
- bool failed = true;
- for (unsigned int i = 0; i < available_thread.size(); i++)
- if (available_thread[i]) {
- model_print("[%d]", i);
- failed = false;
- }
- if (failed)
- model_print("(none)");
- model_print("\n");
-}
-
-/**
- * Check if this promise has failed. A promise can fail when all threads which
- * could possibly satisfy the promise have been eliminated.
- *
- * @return True, if this promise has failed; false otherwise
- */
-bool Promise::has_failed() const
-{
- return num_available_threads == 0;
-}
-
-/**
- * @brief Check if an action's thread and location are compatible for resolving
- * this promise
- * @param act The action to check against
- * @return True if we are compatible; false otherwise
- */
-bool Promise::is_compatible(const ModelAction *act) const
-{
- return thread_is_available(act->get_tid()) && get_reader(0)->same_var(act);
-}
-
-/**
- * @brief Check if an action's thread and location are compatible for resolving
- * this promise, and that the promise is thread-exclusive
- * @param act The action to check against
- * @return True if we are compatible and exclusive; false otherwise
- */
-bool Promise::is_compatible_exclusive(const ModelAction *act) const
-{
- return get_num_available_threads() == 1 && is_compatible(act);
-}
-
-/**
- * @brief Check if a store's value matches this Promise
- * @param write The store to check
- * @return True if the store's written value matches this Promise
- */
-bool Promise::same_value(const ModelAction *write) const
-{
- return get_value() == write->get_write_value();
-}
-
-/**
- * @brief Check if a ModelAction's location matches this Promise
- * @param act The ModelAction to check
- * @return True if the action's location matches this Promise
- */
-bool Promise::same_location(const ModelAction *act) const
-{
- return get_reader(0)->same_var(act);
-}
-
-/** @brief Get this Promise's index within the execution's promise array */
-int Promise::get_index() const
-{
- return execution->get_promise_number(this);
-}
+++ /dev/null
-/** @file promise.h
- *
- * @brief Promise class --- tracks future obligations for execution
- * related to weakly ordered writes.
- */
-
-#ifndef __PROMISE_H__
-#define __PROMISE_H__
-
-#include <inttypes.h>
-
-#include "modeltypes.h"
-#include "mymemory.h"
-#include "stl-model.h"
-
-class ModelAction;
-class ModelExecution;
-
-struct future_value {
- uint64_t value;
- modelclock_t expiration;
- thread_id_t tid;
-};
-
-class Promise {
- public:
- Promise(const ModelExecution *execution, ModelAction *read, struct future_value fv);
- bool add_reader(ModelAction *reader);
- ModelAction * get_reader(unsigned int i) const;
- unsigned int get_num_readers() const { return readers.size(); }
- bool eliminate_thread(thread_id_t tid);
- void add_thread(thread_id_t tid);
- bool thread_is_available(thread_id_t tid) const;
- bool thread_was_available(thread_id_t tid) const;
- unsigned int max_available_thread_idx() const;
- bool has_failed() const;
- void set_write(const ModelAction *act) { write = act; }
- const ModelAction * get_write() const { return write; }
- int get_num_available_threads() const { return num_available_threads; }
- int get_num_was_available_threads() const { return num_was_available_threads; }
- bool is_compatible(const ModelAction *act) const;
- bool is_compatible_exclusive(const ModelAction *act) const;
- bool same_value(const ModelAction *write) const;
- bool same_location(const ModelAction *act) const;
-
- modelclock_t get_expiration() const { return fv.expiration; }
- uint64_t get_value() const { return fv.value; }
- struct future_value get_fv() const { return fv; }
-
- int get_index() const;
-
- void print() const;
-
- bool equals(const Promise *x) const { return this == x; }
- bool equals(const ModelAction *x) const { return false; }
-
- SNAPSHOTALLOC
- private:
- /** @brief The execution which created this Promise */
- const ModelExecution *execution;
-
- /** @brief Thread ID(s) for thread(s) that potentially can satisfy this
- * promise */
- SnapVector<bool> available_thread;
- SnapVector<bool> was_available_thread;
-
- int num_available_threads;
- int num_was_available_threads;
-
- const future_value fv;
-
- /** @brief The action(s) which read the promised future value */
- SnapVector<ModelAction *> readers;
-
- const ModelAction *write;
-};
-
-#endif
#include "common.h"
#include "threads-model.h"
#include "action.h"
-#include "pthread.h"
+#include "mypthread.h"
#include "snapshot-interface.h"
#include "datarace.h"
#include "model.h"
#include "execution.h"
-static void param_defaults(struct model_params *params)
-{
- params->maxreads = 0;
- params->maxfuturedelay = 6;
- params->fairwindow = 0;
- params->yieldon = false;
- params->yieldblock = false;
- params->enabledcount = 1;
- params->bound = 0;
- params->maxfuturevalues = 0;
- params->expireslop = 4;
- params->verbose = !!DBG_ENABLED();
- params->uninitvalue = 0;
- params->maxexecutions = 0;
-}
-
-static void model_main()
-{
- struct model_params params;
-
- param_defaults(¶ms);
-
- //parse_options(¶ms, main_argc, main_argv);
-
- //Initialize race detector
- initRaceDetector();
-
- snapshot_stack_init();
-
- model = new ModelChecker(params); // L: Model thread is created
-// install_trace_analyses(model->get_execution()); L: disable plugin
-
- snapshot_record(0);
- model->run();
- delete model;
-
- DEBUG("Exiting\n");
-}
-
int pthread_create(pthread_t *t, const pthread_attr_t * attr,
- pthread_start_t start_routine, void * arg) {
+ pthread_start_t start_routine, void * arg) {
struct pthread_params params = { start_routine, arg };
ModelAction *act = new ModelAction(PTHREAD_CREATE, std::memory_order_seq_cst, t, (uint64_t)¶ms);
// store return value
void *rtval = th->get_pthread_return();
*value_ptr = rtval;
- }
+ }
return 0;
}
void pthread_exit(void *value_ptr) {
Thread * th = thread_current();
model->switch_to_master(new ModelAction(THREAD_FINISH, std::memory_order_seq_cst, th));
+ while(1) ;//make warning goaway
}
int pthread_mutex_init(pthread_mutex_t *p_mutex, const pthread_mutexattr_t *) {
if (!model) {
- snapshot_system_init(10000, 1024, 1024, 40000, &model_main);
+ model = new ModelChecker();
}
cdsc::mutex *m = new cdsc::mutex();
ModelExecution *execution = model->get_execution();
- execution->mutex_map.put(p_mutex, m);
+ execution->getMutexMap()->put(p_mutex, m);
return 0;
}
int pthread_mutex_lock(pthread_mutex_t *p_mutex) {
ModelExecution *execution = model->get_execution();
- /* to protect the case where PTHREAD_MUTEX_INITIALIZER is used
+ /* to protect the case where PTHREAD_MUTEX_INITIALIZER is used
instead of pthread_mutex_init, or where *p_mutex is not stored
in the execution->mutex_map for some reason. */
- if (!execution->mutex_map.contains(p_mutex)) {
+ if (!execution->getMutexMap()->contains(p_mutex)) {
pthread_mutex_init(p_mutex, NULL);
}
- cdsc::mutex *m = execution->mutex_map.get(p_mutex);
+ cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
if (m != NULL) {
m->lock();
int pthread_mutex_trylock(pthread_mutex_t *p_mutex) {
ModelExecution *execution = model->get_execution();
- cdsc::mutex *m = execution->mutex_map.get(p_mutex);
+ cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
return m->try_lock();
}
-int pthread_mutex_unlock(pthread_mutex_t *p_mutex) {
+int pthread_mutex_unlock(pthread_mutex_t *p_mutex) {
ModelExecution *execution = model->get_execution();
- cdsc::mutex *m = execution->mutex_map.get(p_mutex);
+ cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
if (m != NULL) {
m->unlock();
}
int pthread_mutex_timedlock (pthread_mutex_t *__restrict p_mutex,
- const struct timespec *__restrict abstime) {
+ const struct timespec *__restrict abstime) {
// timedlock just gives the option of giving up the lock, so return and let the scheduler decide which thread goes next
/*
- ModelExecution *execution = model->get_execution();
- if (!execution->mutex_map.contains(p_mutex)) {
- pthread_mutex_init(p_mutex, NULL);
- }
- cdsc::mutex *m = execution->mutex_map.get(p_mutex);
-
- if (m != NULL) {
- m->lock();
- } else {
- printf("something is wrong with pthread_mutex_timedlock\n");
- }
-
- printf("pthread_mutex_timedlock is called. It is currently implemented as a normal lock operation without no timeout\n");
-*/
+ ModelExecution *execution = model->get_execution();
+ if (!execution->mutex_map.contains(p_mutex)) {
+ pthread_mutex_init(p_mutex, NULL);
+ }
+ cdsc::mutex *m = execution->mutex_map.get(p_mutex);
+
+ if (m != NULL) {
+ m->lock();
+ } else {
+ printf("something is wrong with pthread_mutex_timedlock\n");
+ }
+
+ printf("pthread_mutex_timedlock is called. It is currently implemented as a normal lock operation without no timeout\n");
+ */
return 0;
}
pthread_t pthread_self() {
Thread* th = model->get_current_thread();
- return th->get_id();
+ return (pthread_t)th->get_id();
}
int pthread_key_delete(pthread_key_t) {
cdsc::condition_variable *v = new cdsc::condition_variable();
ModelExecution *execution = model->get_execution();
- execution->cond_map.put(p_cond, v);
+ execution->getCondMap()->put(p_cond, v);
return 0;
}
int pthread_cond_wait(pthread_cond_t *p_cond, pthread_mutex_t *p_mutex) {
ModelExecution *execution = model->get_execution();
- if ( !execution->cond_map.contains(p_cond) )
+ if ( !execution->getCondMap()->contains(p_cond) )
pthread_cond_init(p_cond, NULL);
- cdsc::condition_variable *v = execution->cond_map.get(p_cond);
- cdsc::mutex *m = execution->mutex_map.get(p_mutex);
+ cdsc::condition_variable *v = execution->getCondMap()->get(p_cond);
+ cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
v->wait(*m);
return 0;
}
-int pthread_cond_timedwait(pthread_cond_t *p_cond,
- pthread_mutex_t *p_mutex, const struct timespec *abstime) {
+int pthread_cond_timedwait(pthread_cond_t *p_cond,
+ pthread_mutex_t *p_mutex, const struct timespec *abstime) {
// implement cond_timedwait as a noop and let the scheduler decide which thread goes next
ModelExecution *execution = model->get_execution();
- if ( !execution->cond_map.contains(p_cond) )
+ if ( !execution->getCondMap()->contains(p_cond) )
pthread_cond_init(p_cond, NULL);
- if ( !execution->mutex_map.contains(p_mutex) )
+ if ( !execution->getMutexMap()->contains(p_mutex) )
pthread_mutex_init(p_mutex, NULL);
- cdsc::condition_variable *v = execution->cond_map.get(p_cond);
- cdsc::mutex *m = execution->mutex_map.get(p_mutex);
+ cdsc::condition_variable *v = execution->getCondMap()->get(p_cond);
+ cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
- model->switch_to_master(new ModelAction(NOOP, std::memory_order_seq_cst, v, NULL));
+ model->switch_to_master(new ModelAction(NOOP, std::memory_order_seq_cst, v));
// v->wait(*m);
// printf("timed_wait called\n");
return 0;
int pthread_cond_signal(pthread_cond_t *p_cond) {
// notify only one blocked thread
ModelExecution *execution = model->get_execution();
- if ( !execution->cond_map.contains(p_cond) )
+ if ( !execution->getCondMap()->contains(p_cond) )
pthread_cond_init(p_cond, NULL);
- cdsc::condition_variable *v = execution->cond_map.get(p_cond);
+ cdsc::condition_variable *v = execution->getCondMap()->get(p_cond);
v->notify_one();
return 0;
#include "model.h"
#include "nodestack.h"
#include "execution.h"
+#include "fuzzer.h"
/**
* Format an "enabled_type_t" for printing
enabled_len = threadid + 1;
}
enabled[threadid] = enabled_status;
- if (enabled_status == THREAD_DISABLED)
- execution->check_promises_thread_disabled();
}
/**
bool Scheduler::all_threads_sleeping() const
{
bool sleeping = false;
- for (int i = 0; i < enabled_len; i++)
+ for (int i = 0;i < enabled_len;i++)
if (enabled[i] == THREAD_ENABLED)
return false;
else if (enabled[i] == THREAD_SLEEP_SET)
return enabled[id];
}
-void Scheduler::update_sleep_set(Node *n) {
- enabled_type_t *enabled_array = n->get_enabled_array();
- for (int i = 0; i < enabled_len; i++) {
- if (enabled_array[i] == THREAD_SLEEP_SET) {
- enabled[i] = THREAD_SLEEP_SET;
- }
- }
-}
-
/**
* Add a Thread to the sleep set.
* @param t The Thread to add
*/
Thread * Scheduler::select_next_thread(Node *n)
{
- bool have_enabled_thread_with_priority = false;
- if (model->params.fairwindow != 0) {
- for (int i = 0; i < enabled_len; i++) {
- thread_id_t tid = int_to_id(i);
- if (n->has_priority(tid)) {
- DEBUG("Node (tid %d) has priority\n", i);
- if (enabled[i] != THREAD_DISABLED)
- have_enabled_thread_with_priority = true;
- }
- }
- }
-
- int avail_threads = enabled_len; // number of available threads
- int thread_list[enabled_len]; // keep a list of threads to select from
- for (int i = 0; i< enabled_len; i++){
- thread_list[i] = i;
+ int avail_threads = 0;
+ int thread_list[enabled_len];
+ for (int i = 0;i< enabled_len;i++) {
+ if (enabled[i] == THREAD_ENABLED)
+ thread_list[avail_threads++] = i;
}
- while (avail_threads > 0) {
- int random_index = rand() % avail_threads;
- curr_thread_index = thread_list[random_index]; // randomly select a thread from available threads
-
- // curr_thread_index = (curr_thread_index + i + 1) % enabled_len;
- thread_id_t curr_tid = int_to_id(curr_thread_index);
- if (model->params.yieldon) {
- bool bad_thread = false;
- for (int j = 0; j < enabled_len; j++) {
- thread_id_t tother = int_to_id(j);
- if ((enabled[j] != THREAD_DISABLED) && n->has_priority_over(curr_tid, tother)) {
- bad_thread=true;
- break;
- }
- }
- if (bad_thread) {
- thread_list[random_index] = thread_list[avail_threads - 1]; // remove this threads from available threads
- avail_threads--;
+ if (avail_threads == 0)
+ return NULL;// No threads availablex
- continue;
- }
- }
-
- if (enabled[curr_thread_index] == THREAD_ENABLED &&
- (!have_enabled_thread_with_priority || n->has_priority(curr_tid))) {
- return model->get_thread(curr_tid);
- } else { // remove this threads from available threads
- thread_list[random_index] = thread_list[avail_threads - 1];
- avail_threads--;
- }
- }
-
- /* No thread was enabled */
- return NULL;
+ Thread * thread = execution->getFuzzer()->selectThread(n, thread_list, avail_threads);
+ curr_thread_index = id_to_int(thread->get_id());
+ return thread;
}
void Scheduler::set_scheduler_thread(thread_id_t tid) {
int curr_id = current ? id_to_int(current->get_id()) : -1;
model_print("Scheduler: ");
- for (int i = 0; i < enabled_len; i++) {
+ for (int i = 0;i < enabled_len;i++) {
char str[20];
enabled_type_to_string(enabled[i], str);
model_print("[%i: %s%s]", i, i == curr_id ? "current, " : "", str);
#include "mymemory.h"
#include "modeltypes.h"
-
-/* Forward declaration */
-class Thread;
-class Node;
-class ModelExecution;
+#include "classlist.h"
typedef enum enabled_type {
THREAD_DISABLED,
void remove_sleep(Thread *t);
void add_sleep(Thread *t);
enabled_type_t get_enabled(const Thread *t) const;
- void update_sleep_set(Node *n);
bool is_enabled(const Thread *t) const;
bool is_enabled(thread_id_t tid) const;
bool is_sleep_set(const Thread *t) const;
Thread *current;
};
-#endif /* __SCHEDULE_H__ */
+#endif /* __SCHEDULE_H__ */
};
class SnapshotStack {
- public:
+public:
int backTrackBeforeStep(int seq_index);
void snapshotStep(int seq_index);
MEMALLOC
- private:
+private:
ModelVector<struct snapshot_entry> stack;
};
sscanf(buf, "%22s %p-%p", type, &begin, &end);
char * secondpart = strstr(buf, "]");
-
+
sscanf(&secondpart[2], "%c%c%c/%c%c%c SM=%3s %200s\n", &r, &w, &x, &mr, &mw, &mx, smstr, regionname);
if (w == 'w' && strstr(regionname, MYBINARYNAME)) {
size_t len = ((uintptr_t)end - (uintptr_t)begin) / PAGESIZE;
int SnapshotStack::backTrackBeforeStep(int seqindex)
{
int i;
- for (i = (int)stack.size() - 1; i >= 0; i++)
+ for (i = (int)stack.size() - 1;i >= 0;i++)
if (stack[i].index <= seqindex)
break;
else
typedef void (*VoidFuncPtr)();
void snapshot_system_init(unsigned int numbackingpages,
- unsigned int numsnapshots, unsigned int nummemoryregions,
- unsigned int numheappages, VoidFuncPtr entryPoint);
+ unsigned int numsnapshots, unsigned int nummemoryregions,
+ unsigned int numheappages, VoidFuncPtr entryPoint);
void snapshot_stack_init();
void snapshot_record(int seq_index);
/* Struct for each memory region */
struct MemoryRegion {
- void *basePtr; // base of memory region
- int sizeInPages; // size of memory region in pages
+ void *basePtr; // base of memory region
+ int sizeInPages; // size of memory region in pages
};
/** ReturnPageAlignedAddress returns a page aligned address for the
mprot_snapshotter(unsigned int numbackingpages, unsigned int numsnapshots, unsigned int nummemoryregions);
~mprot_snapshotter();
- struct MemoryRegion *regionsToSnapShot; //This pointer references an array of memory regions to snapshot
- snapshot_page_t *backingStore; //This pointer references an array of snapshotpage's that form the backing store
- void *backingStoreBasePtr; //This pointer references an array of snapshotpage's that form the backing store
- struct BackingPageRecord *backingRecords; //This pointer references an array of backingpagerecord's (same number of elements as backingstore
- struct SnapShotRecord *snapShots; //This pointer references the snapshot array
+ struct MemoryRegion *regionsToSnapShot; //This pointer references an array of memory regions to snapshot
+ snapshot_page_t *backingStore; //This pointer references an array of snapshotpage's that form the backing store
+ void *backingStoreBasePtr; //This pointer references an array of snapshotpage's that form the backing store
+ struct BackingPageRecord *backingRecords; //This pointer references an array of backingpagerecord's (same number of elements as backingstore
+ struct SnapShotRecord *snapShots; //This pointer references the snapshot array
- unsigned int lastSnapShot; //Stores the next snapshot record we should use
- unsigned int lastBackingPage; //Stores the next backingpage we should use
- unsigned int lastRegion; //Stores the next memory region to be used
+ unsigned int lastSnapShot; //Stores the next snapshot record we should use
+ unsigned int lastBackingPage; //Stores the next backingpage we should use
+ unsigned int lastRegion; //Stores the next memory region to be used
- unsigned int maxRegions; //Stores the max number of memory regions we support
- unsigned int maxBackingPages; //Stores the total number of backing pages
- unsigned int maxSnapShots; //Stores the total number of snapshots we allow
+ unsigned int maxRegions; //Stores the max number of memory regions we support
+ unsigned int maxBackingPages; //Stores the total number of backing pages
+ unsigned int maxSnapShots; //Stores the total number of snapshots we allow
MEMALLOC
};
if (si->si_code == SEGV_MAPERR) {
model_print("Segmentation fault at %p\n", si->si_addr);
model_print("For debugging, place breakpoint at: %s:%d\n",
- __FILE__, __LINE__);
+ __FILE__, __LINE__);
// print_trace(); // Trace printing may cause dynamic memory allocation
- exit(EXIT_FAILURE);
+ exit(EXIT_FAILURE);
}
void* addr = ReturnPageAlignedAddress(si->si_addr);
- unsigned int backingpage = mprot_snap->lastBackingPage++; //Could run out of pages...
+ unsigned int backingpage = mprot_snap->lastBackingPage++; //Could run out of pages...
if (backingpage == mprot_snap->maxBackingPages) {
model_print("Out of backing pages at %p\n", si->si_addr);
exit(EXIT_FAILURE);
}
static void mprot_snapshot_init(unsigned int numbackingpages,
- unsigned int numsnapshots, unsigned int nummemoryregions,
- unsigned int numheappages, VoidFuncPtr entryPoint)
+ unsigned int numsnapshots, unsigned int nummemoryregions,
+ unsigned int numheappages, VoidFuncPtr entryPoint)
{
/* Setup a stack for our signal handler.... */
stack_t ss;
memset(&si, 0, sizeof(si));
si.si_addr = ss.ss_sp;
mprot_handle_pf(SIGSEGV, &si, NULL);
- mprot_snap->lastBackingPage--; //remove the fake page we copied
+ mprot_snap->lastBackingPage--; //remove the fake page we copied
void *basemySpace = model_malloc((numheappages + 1) * PAGESIZE);
void *pagealignedbase = PageAlignAddressUpward(basemySpace);
}
DEBUG("snapshot region %p-%p (%u page%s)\n",
- addr, (char *)addr + numPages * PAGESIZE, numPages,
- numPages > 1 ? "s" : "");
+ addr, (char *)addr + numPages * PAGESIZE, numPages,
+ numPages > 1 ? "s" : "");
mprot_snap->regionsToSnapShot[memoryregion].basePtr = addr;
mprot_snap->regionsToSnapShot[memoryregion].sizeInPages = numPages;
}
static snapshot_id mprot_take_snapshot()
{
- for (unsigned int region = 0; region < mprot_snap->lastRegion; region++) {
+ for (unsigned int region = 0;region < mprot_snap->lastRegion;region++) {
if (mprotect(mprot_snap->regionsToSnapShot[region].basePtr, mprot_snap->regionsToSnapShot[region].sizeInPages * sizeof(snapshot_page_t), PROT_READ) == -1) {
perror("mprotect");
model_print("Failed to mprotect inside of takeSnapShot\n");
{
#if USE_MPROTECT_SNAPSHOT == 2
if (mprot_snap->lastSnapShot == (theID + 1)) {
- for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage; page < mprot_snap->lastBackingPage; page++) {
+ for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage;page < mprot_snap->lastBackingPage;page++) {
memcpy(mprot_snap->backingRecords[page].basePtrOfPage, &mprot_snap->backingStore[page], sizeof(snapshot_page_t));
}
return;
#endif
HashTable< void *, bool, uintptr_t, 4, model_malloc, model_calloc, model_free> duplicateMap;
- for (unsigned int region = 0; region < mprot_snap->lastRegion; region++) {
+ for (unsigned int region = 0;region < mprot_snap->lastRegion;region++) {
if (mprotect(mprot_snap->regionsToSnapShot[region].basePtr, mprot_snap->regionsToSnapShot[region].sizeInPages * sizeof(snapshot_page_t), PROT_READ | PROT_WRITE) == -1) {
perror("mprotect");
model_print("Failed to mprotect inside of takeSnapShot\n");
exit(EXIT_FAILURE);
}
}
- for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage; page < mprot_snap->lastBackingPage; page++) {
+ for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage;page < mprot_snap->lastBackingPage;page++) {
if (!duplicateMap.contains(mprot_snap->backingRecords[page].basePtrOfPage)) {
duplicateMap.put(mprot_snap->backingRecords[page].basePtrOfPage, true);
memcpy(mprot_snap->backingRecords[page].basePtrOfPage, &mprot_snap->backingStore[page], sizeof(snapshot_page_t));
}
mprot_snap->lastSnapShot = theID;
mprot_snap->lastBackingPage = mprot_snap->snapShots[theID].firstBackingPage;
- mprot_take_snapshot(); //Make sure current snapshot is still good...All later ones are cleared
+ mprot_take_snapshot(); //Make sure current snapshot is still good...All later ones are cleared
}
-#else /* !USE_MPROTECT_SNAPSHOT */
+#else /* !USE_MPROTECT_SNAPSHOT */
-#define SHARED_MEMORY_DEFAULT (100 * ((size_t)1 << 20)) // 100mb for the shared memory
-#define STACK_SIZE_DEFAULT (((size_t)1 << 20) * 20) // 20 mb out of the above 100 mb for my stack
+#define SHARED_MEMORY_DEFAULT (100 * ((size_t)1 << 20)) // 100mb for the shared memory
+#define STACK_SIZE_DEFAULT (((size_t)1 << 20) * 20) // 20 mb out of the above 100 mb for my stack
struct fork_snapshotter {
/** @brief Pointer to the shared (non-snapshot) memory heap base
static struct fork_snapshotter *fork_snap = NULL;
/** @statics
-* These variables are necessary because the stack is shared region and
-* there exists a race between all processes executing the same function.
-* To avoid the problem above, we require variables allocated in 'safe' regions.
-* The bug was actually observed with the forkID, these variables below are
-* used to indicate the various contexts to which to switch to.
-*
-* @private_ctxt: the context which is internal to the current process. Used
-* for running the internal snapshot/rollback loop.
-* @exit_ctxt: a special context used just for exiting from a process (so we
-* can use swapcontext() instead of setcontext() + hacks)
-* @snapshotid: it is a running counter for the various forked processes
-* snapshotid. it is incremented and set in a persistently shared record
-*/
+ * These variables are necessary because the stack is shared region and
+ * there exists a race between all processes executing the same function.
+ * To avoid the problem above, we require variables allocated in 'safe' regions.
+ * The bug was actually observed with the forkID, these variables below are
+ * used to indicate the various contexts to which to switch to.
+ *
+ * @private_ctxt: the context which is internal to the current process. Used
+ * for running the internal snapshot/rollback loop.
+ * @exit_ctxt: a special context used just for exiting from a process (so we
+ * can use swapcontext() instead of setcontext() + hacks)
+ * @snapshotid: it is a running counter for the various forked processes
+ * snapshotid. it is incremented and set in a persistently shared record
+ */
static ucontext_t private_ctxt;
static ucontext_t exit_ctxt;
static snapshot_id snapshotid = 0;
* @param func The entry point function for the context
*/
static void create_context(ucontext_t *ctxt, void *stack, size_t stacksize,
- void (*func)(void))
+ void (*func)(void))
{
getcontext(ctxt);
ctxt->uc_stack.ss_sp = stack;
}
static void fork_snapshot_init(unsigned int numbackingpages,
- unsigned int numsnapshots, unsigned int nummemoryregions,
- unsigned int numheappages, VoidFuncPtr entryPoint)
+ unsigned int numsnapshots, unsigned int nummemoryregions,
+ unsigned int numheappages, VoidFuncPtr entryPoint)
{
if (!fork_snap)
createSharedMemory();
/* setup the shared-stack context */
create_context(&fork_snap->shared_ctxt, fork_snap->mStackBase,
- STACK_SIZE_DEFAULT, entryPoint);
+ STACK_SIZE_DEFAULT, entryPoint);
/* switch to a new entryPoint context, on a new stack */
model_swapcontext(&private_ctxt, &fork_snap->shared_ctxt);
setcontext(&fork_snap->shared_ctxt);
} else {
DEBUG("parent PID: %d, child PID: %d, snapshot ID: %d\n",
- getpid(), forkedID, snapshotid);
+ getpid(), forkedID, snapshotid);
while (waitpid(forkedID, NULL, 0) < 0) {
/* waitpid() may be interrupted */
fork_snap->mIDToRollback = -1;
}
-#endif /* !USE_MPROTECT_SNAPSHOT */
+#endif /* !USE_MPROTECT_SNAPSHOT */
/**
* @brief Initializes the snapshot system
* @param entryPoint the function that should run the program.
*/
void snapshot_system_init(unsigned int numbackingpages,
- unsigned int numsnapshots, unsigned int nummemoryregions,
- unsigned int numheappages, VoidFuncPtr entryPoint)
+ unsigned int numsnapshots, unsigned int nummemoryregions,
+ unsigned int numheappages, VoidFuncPtr entryPoint)
{
#if USE_MPROTECT_SNAPSHOT
mprot_snapshot_init(numbackingpages, numsnapshots, nummemoryregions, numheappages, entryPoint);
// iterate over the returned symbol lines. skip the first, it is the
// address of this function.
- for (int i = 1; i < addrlen; i++) {
+ for (int i = 1;i < addrlen;i++) {
char *begin_name = 0, *begin_offset = 0, *end_offset = 0;
// find parentheses and +address offset surrounding the mangled name:
// ./module(function+0x15c) [0x8048a6d]
- for (char *p = symbollist[i]; *p; ++p) {
+ for (char *p = symbollist[i];*p;++p) {
if (*p == '(')
begin_name = p;
else if (*p == '+')
int status;
char* ret = abi::__cxa_demangle(begin_name,
- funcname, &funcnamesize, &status);
+ funcname, &funcnamesize, &status);
if (status == 0) {
- funcname = ret; // use possibly realloc()-ed string
+ funcname = ret; // use possibly realloc()-ed string
dprintf(fd, " %s : %s+%s\n",
- symbollist[i], funcname, begin_offset);
+ symbollist[i], funcname, begin_offset);
} else {
// demangling failed. Output function name as a C function with
// no arguments.
dprintf(fd, " %s : %s()+%s\n",
- symbollist[i], begin_name, begin_offset);
+ symbollist[i], begin_name, begin_offset);
}
} else {
// couldn't parse the line? print the whole line.
print_stacktrace(fileno(out), max_frames);
}
-#endif // __STACKTRACE_H__
+#endif // __STACKTRACE_H__
template<typename _Tp>
class ModelList : public std::list<_Tp, ModelAlloc<_Tp> >
{
- public:
+public:
typedef std::list< _Tp, ModelAlloc<_Tp> > list;
ModelList() :
template<typename _Tp>
class SnapList : public std::list<_Tp, SnapshotAlloc<_Tp> >
{
- public:
+public:
typedef std::list<_Tp, SnapshotAlloc<_Tp> > list;
SnapList() :
template<typename _Tp>
class ModelVector : public std::vector<_Tp, ModelAlloc<_Tp> >
{
- public:
+public:
typedef std::vector< _Tp, ModelAlloc<_Tp> > vector;
ModelVector() :
template<typename _Tp>
class SnapVector : public std::vector<_Tp, SnapshotAlloc<_Tp> >
{
- public:
+public:
typedef std::vector< _Tp, SnapshotAlloc<_Tp> > vector;
SnapVector() :
SNAPSHOTALLOC
};
-#endif /* __STL_MODEL_H__ */
+#endif /* __STL_MODEL_H__ */
atomic_int x[2], idx, y;
-int r1, r2, r3; /* "local" variables */
+int r1, r2, r3; /* "local" variables */
static void a(void *obj)
{
static void a(void *obj)
{
-
+
m->lock();
while(load_32(&shareddata)==0)
v->wait(*m);
atomic_int x, y;
atomic_intptr_t z, z2;
-int r1, r2, r3; /* "local" variables */
+int r1, r2, r3; /* "local" variables */
/**
- This example illustrates a self-satisfying cycle involving
- synchronization. A failed synchronization creates the store that
- causes the synchronization to fail.
+ This example illustrates a self-satisfying cycle involving
+ synchronization. A failed synchronization creates the store that
+ causes the synchronization to fail.
- The C++11 memory model nominally allows r1=0, r2=1, r3=5.
+ The C++11 memory model nominally allows r1=0, r2=1, r3=5.
- This example is insane, we don't support that behavior.
-*/
+ This example is insane, we don't support that behavior.
+ */
static void a(void *obj)
using namespace std;
atomic_int x, y;
-int r1, r2, r3, r4; /* "local" variables */
+int r1, r2, r3, r4; /* "local" variables */
static void a(void *obj)
{
using namespace std;
atomic_int x, y;
-int r1, r2, r3, r4; /* "local" variables */
+int r1, r2, r3, r4; /* "local" variables */
static void a(void *obj)
{
static void a(void *obj)
{
int i;
- for(i = 0; i < 2; i++) {
+ for(i = 0;i < 2;i++) {
if ((i % 2) == 0) {
read_lock(&mylock);
load_32(&shareddata);
static void a(void *obj)
{
int i;
- for(i = 0; i < 2; i++) {
+ for(i = 0;i < 2;i++) {
if ((i % 2) == 0) {
read_lock(&mylock);
load_32(&shareddata);
using namespace std;
atomic_int x, y;
-int r0, r1, r2, r3; /* "local" variables */
+int r0, r1, r2, r3; /* "local" variables */
static void a(void *obj)
{
atomic_int z;
static void a(void *obj)
{
- (void)atomic_load_explicit(&z, memory_order_relaxed); // this is only for schedule control
+ (void)atomic_load_explicit(&z, memory_order_relaxed); // this is only for schedule control
int t1=atomic_load_explicit(&x, memory_order_relaxed);
atomic_store_explicit(&y, 1, memory_order_relaxed);
printf("t1=%d\n",t1);
printf("expected: %d\n", expected);
/*
short v1 = atomic_exchange_explicit(&x, 8, memory_order_relaxed);
- short v2 = atomic_exchange_explicit(&x, -10, memory_order_relaxed);
- short v3 = atomic_load_explicit(&x, memory_order_relaxed);
- printf("v1 = %d, v2 = %d, v3 = %d\n", v1, v2, v3);
-*/
+ short v2 = atomic_exchange_explicit(&x, -10, memory_order_relaxed);
+ short v3 = atomic_load_explicit(&x, memory_order_relaxed);
+ printf("v1 = %d, v2 = %d, v3 = %d\n", v1, v2, v3);
+ */
}
static void b(void *obj)
static void a(void *obj)
{
int i;
- for (i = 0; i < N; i++)
+ for (i = 0;i < N;i++)
atomic_fetch_add_explicit(&x, 1, memory_order_relaxed);
}
printf("r2=%d\n",r2);
}
-int main(int argc, char **argv)
+int user_main(int argc, char **argv)
{
thrd_t t1, t2;
#include <threads.h>
#include <stdatomic.h>
#include "librace.h"
- atomic_int x1;
- atomic_int x2;
- atomic_int x3;
- atomic_int x4;
- atomic_int x5;
- atomic_int x6;
- atomic_int x7;
+atomic_int x1;
+atomic_int x2;
+atomic_int x3;
+atomic_int x4;
+atomic_int x5;
+atomic_int x6;
+atomic_int x7;
static void a(void *obj)
{
atomic_store_explicit(&x1, 1,memory_order_relaxed);
int user_main(int argc, char **argv)
{
thrd_t t1, t2, t3, t4, t5, t6, t7, t8;
- atomic_init(&x1, 0);
- atomic_init(&x2, 0);
- atomic_init(&x3, 0);
- atomic_init(&x4, 0);
- atomic_init(&x5, 0);
- atomic_init(&x6, 0);
- atomic_init(&x7, 0);
+ atomic_init(&x1, 0);
+ atomic_init(&x2, 0);
+ atomic_init(&x3, 0);
+ atomic_init(&x4, 0);
+ atomic_init(&x5, 0);
+ atomic_init(&x6, 0);
+ atomic_init(&x7, 0);
- thrd_create(&t1, (thrd_start_t)&a, NULL);
- thrd_create(&t2, (thrd_start_t)&b, NULL);
- thrd_create(&t3, (thrd_start_t)&c, NULL);
- thrd_create(&t4, (thrd_start_t)&d, NULL);
- thrd_create(&t5, (thrd_start_t)&e, NULL);
- thrd_create(&t6, (thrd_start_t)&f, NULL);
- thrd_create(&t7, (thrd_start_t)&g, NULL);
- thrd_create(&t8, (thrd_start_t)&h, NULL);
+ thrd_create(&t1, (thrd_start_t)&a, NULL);
+ thrd_create(&t2, (thrd_start_t)&b, NULL);
+ thrd_create(&t3, (thrd_start_t)&c, NULL);
+ thrd_create(&t4, (thrd_start_t)&d, NULL);
+ thrd_create(&t5, (thrd_start_t)&e, NULL);
+ thrd_create(&t6, (thrd_start_t)&f, NULL);
+ thrd_create(&t7, (thrd_start_t)&g, NULL);
+ thrd_create(&t8, (thrd_start_t)&h, NULL);
- thrd_join(t1);
- thrd_join(t2);
- thrd_join(t3);
- thrd_join(t4);
- thrd_join(t5);
- thrd_join(t6);
- thrd_join(t7);
- thrd_join(t8);
+ thrd_join(t1);
+ thrd_join(t2);
+ thrd_join(t3);
+ thrd_join(t4);
+ thrd_join(t5);
+ thrd_join(t6);
+ thrd_join(t7);
+ thrd_join(t8);
- return 0;
+ return 0;
}
#include <threads.h>
#include <stdatomic.h>
#include "librace.h"
- atomic_int x1;
- atomic_int x2;
- atomic_int x3;
- atomic_int x4;
- atomic_int x5;
- atomic_int x6;
- atomic_int x7;
+atomic_int x1;
+atomic_int x2;
+atomic_int x3;
+atomic_int x4;
+atomic_int x5;
+atomic_int x6;
+atomic_int x7;
static void a(void *obj)
{
atomic_store_explicit(&x1, 1,memory_order_seq_cst);
int user_main(int argc, char **argv)
{
thrd_t t1, t2, t3, t4, t5, t6, t7, t8;
- atomic_init(&x1, 0);
- atomic_init(&x2, 0);
- atomic_init(&x3, 0);
- atomic_init(&x4, 0);
- atomic_init(&x5, 0);
- atomic_init(&x6, 0);
- atomic_init(&x7, 0);
+ atomic_init(&x1, 0);
+ atomic_init(&x2, 0);
+ atomic_init(&x3, 0);
+ atomic_init(&x4, 0);
+ atomic_init(&x5, 0);
+ atomic_init(&x6, 0);
+ atomic_init(&x7, 0);
- thrd_create(&t1, (thrd_start_t)&a, NULL);
- thrd_create(&t2, (thrd_start_t)&b, NULL);
- thrd_create(&t3, (thrd_start_t)&c, NULL);
- thrd_create(&t4, (thrd_start_t)&d, NULL);
- thrd_create(&t5, (thrd_start_t)&e, NULL);
- thrd_create(&t6, (thrd_start_t)&f, NULL);
- thrd_create(&t7, (thrd_start_t)&g, NULL);
- thrd_create(&t8, (thrd_start_t)&h, NULL);
+ thrd_create(&t1, (thrd_start_t)&a, NULL);
+ thrd_create(&t2, (thrd_start_t)&b, NULL);
+ thrd_create(&t3, (thrd_start_t)&c, NULL);
+ thrd_create(&t4, (thrd_start_t)&d, NULL);
+ thrd_create(&t5, (thrd_start_t)&e, NULL);
+ thrd_create(&t6, (thrd_start_t)&f, NULL);
+ thrd_create(&t7, (thrd_start_t)&g, NULL);
+ thrd_create(&t8, (thrd_start_t)&h, NULL);
- thrd_join(t1);
- thrd_join(t2);
- thrd_join(t3);
- thrd_join(t4);
- thrd_join(t5);
- thrd_join(t6);
- thrd_join(t7);
- thrd_join(t8);
+ thrd_join(t1);
+ thrd_join(t2);
+ thrd_join(t3);
+ thrd_join(t4);
+ thrd_join(t5);
+ thrd_join(t6);
+ thrd_join(t7);
+ thrd_join(t8);
- return 0;
+ return 0;
}
#include <stdint.h>
#include "mymemory.h"
-#include <threads.h>
+#include "threads.h"
#include "modeltypes.h"
#include "stl-model.h"
#include "context.h"
+#include "classlist.h"
struct thread_params {
thrd_start_t func;
THREAD_COMPLETED
} thread_state;
-class ModelAction;
/** @brief A Thread is created for each user-space thread */
class Thread {
return id;
}
-#endif /* __THREADS_MODEL_H__ */
+#endif /* __THREADS_MODEL_H__ */
stack(NULL),
user_thread(NULL),
id(tid),
- state(THREAD_READY), /* Thread is always ready? */
+ state(THREAD_READY), /* Thread is always ready? */
last_action_val(0),
model_thread(true)
{
if (ret)
model_print("Error in create_context\n");
- user_thread->priv = this; // WL
+ user_thread->priv = this; // WL
}
/**
bool Thread::is_waiting_on(const Thread *t) const
{
Thread *wait;
- for (wait = waiting_on(); wait != NULL; wait = wait->waiting_on())
+ for (wait = waiting_on();wait != NULL;wait = wait->waiting_on())
if (wait == t)
return true;
return false;
class TraceAnalysis {
- public:
+public:
/** setExecution is called once after installation with a reference to
* the ModelExecution object. */
virtual void setExecution(ModelExecution * execution) = 0;
-
+
/** analyze is called once for each feasible trace with the complete
* action_list object. */
+++ /dev/null
-/**
- * @file workqueue.h
- * @brief Provides structures for queueing ModelChecker actions to be taken
- */
-
-#ifndef __WORKQUEUE_H__
-#define __WORKQUEUE_H__
-
-#include "mymemory.h"
-#include "stl-model.h"
-
-class ModelAction;
-
-typedef enum {
- WORK_NONE = 0, /**< No work to be done */
- WORK_CHECK_CURR_ACTION, /**< Check the current action; used for the
- first action of the work loop */
- WORK_CHECK_RELEASE_SEQ, /**< Check if any pending release sequences
- are resolved */
- WORK_CHECK_MO_EDGES, /**< Check if new mo_graph edges can be added */
-} model_work_t;
-
-/**
- */
-class WorkQueueEntry {
- public:
- /** @brief Type of work queue entry */
- model_work_t type;
-
- /**
- * @brief Object affected
- * @see CheckRelSeqWorkEntry
- */
- void *location;
-
- /**
- * @brief The ModelAction to work on
- * @see MOEdgeWorkEntry
- */
- ModelAction *action;
-};
-
-/**
- * @brief Work: perform initial promise, mo_graph checks on the current action
- *
- * This WorkQueueEntry performs the normal, first-pass checks for a ModelAction
- * that is currently being explored. The current ModelAction (@a action) is the
- * only relevant parameter to this entry.
- */
-class CheckCurrWorkEntry : public WorkQueueEntry {
- public:
- /**
- * @brief Constructor for a "check current action" work entry
- * @param curr The current action
- */
- CheckCurrWorkEntry(ModelAction *curr) {
- type = WORK_CHECK_CURR_ACTION;
- location = NULL;
- action = curr;
- }
-};
-
-/**
- * @brief Work: check an object location for resolved release sequences
- *
- * This WorkQueueEntry checks synchronization and the mo_graph for resolution
- * of any release sequences. The object @a location is the only relevant
- * parameter to this entry.
- */
-class CheckRelSeqWorkEntry : public WorkQueueEntry {
- public:
- /**
- * @brief Constructor for a "check release sequences" work entry
- * @param l The location which must be checked for release sequences
- */
- CheckRelSeqWorkEntry(void *l) {
- type = WORK_CHECK_RELEASE_SEQ;
- location = l;
- action = NULL;
- }
-};
-
-/**
- * @brief Work: check a ModelAction for new mo_graph edges
- *
- * This WorkQueueEntry checks for new mo_graph edges for a particular
- * ModelAction (e.g., that was just generated or that updated its
- * synchronization). The ModelAction @a action is the only relevant parameter
- * to this entry.
- */
-class MOEdgeWorkEntry : public WorkQueueEntry {
- public:
- /**
- * @brief Constructor for a mo_edge work entry
- * @param updated The ModelAction which was updated, triggering this work
- */
- MOEdgeWorkEntry(ModelAction *updated) {
- type = WORK_CHECK_MO_EDGES;
- location = NULL;
- action = updated;
- }
-};
-
-/** @brief typedef for the work queue type */
-typedef ModelList<WorkQueueEntry> work_queue_t;
-
-#endif /* __WORKQUEUE_H__ */