--- /dev/null
+****************************************
+CDSChecker Readme
+****************************************
+
+This is an evaluation-only version of CDSChecker. Please do not distribute.
+
+CDSChecker compiles as a dynamically-linked shared library by simply running
+'make'. It should compile on Linux and Mac OSX, and has been tested with LLVM
+(clang/clang++) and GCC.
+
+Test programs should use the standard C11/C++11 library headers
+(<atomic>/<stdatomic.h>, <mutex>, <condition_variable>, <thread.h>) and must
+name their main routine as user_main(int, char**) rather than main(int, char**).
+We only support C11 thread syntax (thrd_t, etc. from <thread.h>).
+
+Test programs may also use our included happens-before race detector by
+including <librace.h> and utilizing the appropriate functions
+(store_{8,16,32,64}() and load_{8,16,32,64}()) for loading/storing data from/to
+from non-atomic shared memory.
+
+Test programs should be compiled against our shared library (libmodel.so) using
+the headers in the include/ directory. Then the shared library must be made
+available to the dynamic linker, using the LD_LIBRARY_PATH environment
+variable, for instance.
+
+Sample run instructions:
+
+$ make
+$ export LD_LIBRARY_PATH=.
+$ ./test/userprog.o # Runs simple test program
+$ ./test/userprog.o -h # Prints help information
+Usage: <program name> [MC_OPTIONS] -- [PROGRAM ARGUMENTS]
+
+Options:
+-h Display this help message and exit
+-m Maximum times a thread can read from the same write
+ while other writes exist. Default: 0
+-M Maximum number of future values that can be sent to
+ the same read. Default: 0
+-s Maximum actions that the model checker will wait for
+ a write from the future past the expected number of
+ actions. Default: 100
+-S Future value expiration sloppiness. Default: 10
+-f Specify a fairness window in which actions that are
+ enabled sufficiently many times should receive
+ priority for execution. Default: 0
+-e Enabled count. Default: 1
+-b Upper length bound. Default: 0
+-- Program arguments follow.
+
+
+Note that we also provide a series of benchmarks (distributed separately),
+which can be placed under the benchmarks/ directory. After building CDSChecker,
+you can build and run the benchmarks as follows:
+
+ cd benchmarks
+ make
+ ./run.sh barrier/barrier -f 10 -m 2 # runs barrier test with fairness/memory liveness
+ ./bench.sh <dir> # run all benchmarks twice, with timing results; all logged to <dir>
#include <stdio.h>
#include <stdlib.h>
+#include <model-assert.h>
+
#include "common.h"
#include "model.h"
#include "stacktrace.h"
{
printf("Add breakpoint to line %u in file %s.\n",__LINE__,__FILE__);
}
+
+void model_assert(bool expr, const char *file, int line)
+{
+ if (!expr) {
+ printf(" [BUG] Program has hit assertion in file %s at line %d\n",
+ file, line);
+ model->set_assert();
+ model->switch_to_master(NULL);
+ }
+}
void assert_hook(void);
+#ifdef CONFIG_ASSERT
#define ASSERT(expr) \
do { \
if (!(expr)) { \
exit(EXIT_FAILURE); \
} \
} while (0)
+#else
+#define ASSERT(expr) \
+ do { } while (0)
+#endif /* CONFIG_ASSERT */
#define error_msg(...) fprintf(stderr, "Error: " __VA_ARGS__)
/* #ifndef CONFIG_DEBUG
#define CONFIG_DEBUG
#endif
+
+ #ifndef CONFIG_ASSERT
+ #define CONFIG_ASSERT
+ #endif
*/
/** Turn on support for dumping cyclegraphs as dot files at each
/* Size of stack to allocate for a thread. */
#define STACK_SIZE (1024 * 1024)
+/** How many shadow tables of memory to preallocate for data race detector. */
+#define SHADOWBASETABLES 4
+
+/** Enable debugging assertions (via ASSERT()) */
+#define CONFIG_ASSERT
#endif
#include <cstring>
#include "mymemory.h"
#include "clockvector.h"
+#include "config.h"
struct ShadowTable *root;
std::vector<struct DataRace *> unrealizedraces;
+void *memory_base;
+void *memory_top;
+
/** This function initialized the data race detector. */
void initRaceDetector() {
root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
+ memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable)*SHADOWBASETABLES, 1);
+ memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable)*SHADOWBASETABLES;
+}
+
+void * table_calloc(size_t size) {
+ if ((((char *)memory_base)+size)>memory_top) {
+ return snapshot_calloc(size, 1);
+ } else {
+ void *tmp=memory_base;
+ memory_base=((char *)memory_base)+size;
+ return tmp;
+ }
}
/** This function looks up the entry in the shadow table corresponding to a
* given address.*/
-static uint64_t * lookupAddressEntry(void * address) {
+static uint64_t * lookupAddressEntry(const void * address) {
struct ShadowTable *currtable=root;
#if BIT48
currtable=(struct ShadowTable *) currtable->array[(((uintptr_t)address)>>32)&MASK16BIT];
if (currtable==NULL) {
- currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address)>>32)&MASK16BIT] = snapshot_calloc(sizeof(struct ShadowTable), 1));
+ currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address)>>32)&MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
}
#endif
struct ShadowBaseTable * basetable=(struct ShadowBaseTable *) currtable->array[(((uintptr_t)address)>>16)&MASK16BIT];
if (basetable==NULL) {
- basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address)>>16)&MASK16BIT] = snapshot_calloc(sizeof(struct ShadowBaseTable), 1));
+ basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address)>>16)&MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
}
return &basetable->array[((uintptr_t)address)&MASK16BIT];
}
}
/** This function is called when we detect a data race.*/
-static void reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, void *address) {
+static void reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address) {
struct DataRace *race = (struct DataRace *)snapshot_malloc(sizeof(struct DataRace));
race->oldthread=oldthread;
race->oldclock=oldclock;
}
/** This function does race detection on a read for an expanded record. */
-void fullRaceCheckRead(thread_id_t thread, void *location, uint64_t * shadow, ClockVector *currClock) {
+void fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t * shadow, ClockVector *currClock) {
struct RaceRecord * record=(struct RaceRecord *) (*shadow);
/* Check for datarace against last write. */
}
/** This function does race detection on a read. */
-void raceCheckRead(thread_id_t thread, void *location, ClockVector *currClock) {
+void raceCheckRead(thread_id_t thread, const void *location, ClockVector *currClock) {
uint64_t * shadow=lookupAddressEntry(location);
uint64_t shadowval=*shadow;
bool isnewwrite;
/* Address of data race. */
- void *address;
+ const void *address;
};
#define MASK16BIT 0xffff
void initRaceDetector();
void raceCheckWrite(thread_id_t thread, void *location, ClockVector *currClock);
-void raceCheckRead(thread_id_t thread, void *location, ClockVector *currClock);
+void raceCheckRead(thread_id_t thread, const void *location, ClockVector *currClock);
bool checkDataRaces();
void printRace(struct DataRace *race);
void store_32(void *addr, uint32_t val);
void store_64(void *addr, uint64_t val);
- uint8_t load_8(void *addr);
- uint16_t load_16(void *addr);
- uint32_t load_32(void *addr);
- uint64_t load_64(void *addr);
+ uint8_t load_8(const void *addr);
+ uint16_t load_16(const void *addr);
+ uint32_t load_32(const void *addr);
+ uint64_t load_64(const void *addr);
#ifdef __cplusplus
}
--- /dev/null
+#ifndef __MODEL_ASSERT_H__
+#define __MODEL_ASSERT_H__
+
+#if __cplusplus
+extern "C" {
+#endif
+
+void model_assert(bool expr, const char *file, int line);
+#define MODEL_ASSERT(expr) model_assert((expr), __FILE__, __LINE__)
+
+#if __cplusplus
+}
+#endif
+
+#endif /* __MODEL_ASSERT_H__ */
(*(uint64_t *)addr) = val;
}
-uint8_t load_8(void *addr)
+uint8_t load_8(const void *addr)
{
DEBUG("addr = %p\n", addr);
thread_id_t tid=thread_current()->get_id();
return *((uint8_t *)addr);
}
-uint16_t load_16(void *addr)
+uint16_t load_16(const void *addr)
{
DEBUG("addr = %p\n", addr);
thread_id_t tid=thread_current()->get_id();
ClockVector * cv=model->get_cv(tid);
raceCheckRead(tid, addr, cv);
- raceCheckRead(tid, (void *)(((uintptr_t)addr)+1), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr)+1), cv);
return *((uint16_t *)addr);
}
-uint32_t load_32(void *addr)
+uint32_t load_32(const void *addr)
{
DEBUG("addr = %p\n", addr);
thread_id_t tid=thread_current()->get_id();
ClockVector * cv=model->get_cv(tid);
raceCheckRead(tid, addr, cv);
- raceCheckRead(tid, (void *)(((uintptr_t)addr)+1), cv);
- raceCheckRead(tid, (void *)(((uintptr_t)addr)+2), cv);
- raceCheckRead(tid, (void *)(((uintptr_t)addr)+3), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr)+1), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr)+2), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr)+3), cv);
return *((uint32_t *)addr);
}
-uint64_t load_64(void *addr)
+uint64_t load_64(const void *addr)
{
DEBUG("addr = %p\n", addr);
thread_id_t tid=thread_current()->get_id();
ClockVector * cv=model->get_cv(tid);
raceCheckRead(tid, addr, cv);
- raceCheckRead(tid, (void *)(((uintptr_t)addr)+1), cv);
- raceCheckRead(tid, (void *)(((uintptr_t)addr)+2), cv);
- raceCheckRead(tid, (void *)(((uintptr_t)addr)+3), cv);
- raceCheckRead(tid, (void *)(((uintptr_t)addr)+4), cv);
- raceCheckRead(tid, (void *)(((uintptr_t)addr)+5), cv);
- raceCheckRead(tid, (void *)(((uintptr_t)addr)+6), cv);
- raceCheckRead(tid, (void *)(((uintptr_t)addr)+7), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr)+1), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr)+2), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr)+3), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr)+4), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr)+5), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr)+6), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr)+7), cv);
return *((uint64_t *)addr);
}
params->fairwindow = 0;
params->enabledcount = 1;
params->bound = 0;
+ params->maxfuturevalues = 0;
+ params->expireslop = 10;
}
static void print_usage(struct model_params *params) {
"-h Display this help message and exit\n"
"-m Maximum times a thread can read from the same write\n"
" while other writes exist. Default: %d\n"
+"-M Maximum number of future values that can be sent to\n"
+" the same read. Default: %d\n"
"-s Maximum actions that the model checker will wait for\n"
" a write from the future past the expected number of\n"
" actions. Default: %d\n"
+"-S Future value expiration sloppiness. Default: %u\n"
"-f Specify a fairness window in which actions that are\n"
" enabled sufficiently many times should receive\n"
" priority for execution. Default: %d\n"
"-e Enabled count. Default: %d\n"
"-b Upper length bound. Default: %d\n"
"-- Program arguments follow.\n\n",
-params->maxreads, params->maxfuturedelay, params->fairwindow, params->enabledcount, params->bound);
+params->maxreads, params->maxfuturevalues, params->maxfuturedelay, params->expireslop, params->fairwindow, params->enabledcount, params->bound);
exit(EXIT_SUCCESS);
}
static void parse_options(struct model_params *params, int *argc, char ***argv) {
- const char *shortopts = "hm:s:f:e:b:";
+ const char *shortopts = "hm:M:s:S:f:e:b:";
int opt;
bool error = false;
while (!error && (opt = getopt(*argc, *argv, shortopts)) != -1) {
case 's':
params->maxfuturedelay = atoi(optarg);
break;
+ case 'S':
+ params->expireslop = atoi(optarg);
+ break;
case 'f':
params->fairwindow = atoi(optarg);
break;
case 'm':
params->maxreads = atoi(optarg);
break;
+ case 'M':
+ params->maxfuturevalues = atoi(optarg);
+ break;
default: /* '?' */
error = true;
break;
}
}
- (*argc) -= optind;
- (*argv) += optind;
+ (*argv)[optind - 1] = (*argv)[0];
+ (*argc) -= (optind - 1);
+ (*argv) += (optind - 1);
+ optind = 1;
if (error)
print_usage(params);
ModelChecker *model;
+/**
+ * Structure for holding small ModelChecker members that should be snapshotted
+ */
+struct model_snapshot_members {
+ ModelAction *current_action;
+ unsigned int next_thread_id;
+ modelclock_t used_sequence_numbers;
+ Thread *nextThread;
+ ModelAction *next_backtrack;
+};
+
/** @brief Constructor */
ModelChecker::ModelChecker(struct model_params params) :
/* Initialize default scheduler */
}
/** @return the number of user threads created during this execution */
-unsigned int ModelChecker::get_num_threads()
+unsigned int ModelChecker::get_num_threads() const
{
return priv->next_thread_id;
}
}
}
+/**
+ * Check if we are in a deadlock. Should only be called at the end of an
+ * execution, although it should not give false positives in the middle of an
+ * execution (there should be some ENABLED thread).
+ *
+ * @return True if program is in a deadlock; false otherwise
+ */
+bool ModelChecker::is_deadlocked() const
+{
+ bool blocking_threads = false;
+ for (unsigned int i = 0; i < get_num_threads(); i++) {
+ thread_id_t tid = int_to_id(i);
+ if (is_enabled(tid))
+ return false;
+ Thread *t = get_thread(tid);
+ if (!t->is_model_thread() && t->get_pending())
+ blocking_threads = true;
+ }
+ return blocking_threads;
+}
+
/**
* Queries the model-checker for more executions to explore and, if one
* exists, resets the model-checker state to execute a new execution.
num_executions++;
+ if (is_deadlocked())
+ printf("ERROR: DEADLOCK\n");
if (isfinalfeasible()) {
printf("Earliest divergence point since last feasible execution:\n");
if (earliest_diverge)
pending_rel_seqs->size());
- if (isfinalfeasible() || (params.bound != 0 && priv->used_sequence_numbers > params.bound ) || DBG_ENABLED() ) {
+ if (isfinalfeasible() || DBG_ENABLED()) {
checkDataRaces();
print_summary();
}
return true;
}
+/**
+ * Stores the ModelAction for the current thread action. Call this
+ * immediately before switching from user- to system-context to pass
+ * data between them.
+ * @param act The ModelAction created by the user-thread action
+ */
+void ModelChecker::set_current_action(ModelAction *act) {
+ priv->current_action = act;
+}
+
/**
* This is the heart of the model checker routine. It performs model-checking
* actions corresponding to a given "current action." Among other processes, it
ModelAction *last = get_last_action(int_to_id(i));
Thread *th = get_thread(int_to_id(i));
if ((last && rf->happens_before(last)) ||
- !scheduler->is_enabled(th) ||
+ !is_enabled(th) ||
th->is_complete())
future_ordered = true;
/* Don't consider more than one seq_cst write if we are a seq_cst read. */
if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
- DEBUG("Adding action to may_read_from:\n");
- if (DBG_ENABLED()) {
- act->print();
- curr->print();
- }
-
- if (curr->get_sleep_flag() && ! curr->is_seqcst()) {
- if (sleep_can_read_from(curr, act))
- curr->get_node()->add_read_from(act);
- } else
+ if (!curr->get_sleep_flag() || curr->is_seqcst() || sleep_can_read_from(curr, act)) {
+ DEBUG("Adding action to may_read_from:\n");
+ if (DBG_ENABLED()) {
+ act->print();
+ curr->print();
+ }
curr->get_node()->add_read_from(act);
+ }
}
/* Include at most one act per-thread that "happens before" curr */
return get_thread(act->get_tid());
}
+/**
+ * @brief Check if a Thread is currently enabled
+ * @param t The Thread to check
+ * @return True if the Thread is currently enabled
+ */
+bool ModelChecker::is_enabled(Thread *t) const
+{
+ return scheduler->is_enabled(t);
+}
+
+/**
+ * @brief Check if a Thread is currently enabled
+ * @param tid The ID of the Thread to check
+ * @return True if the Thread is currently enabled
+ */
+bool ModelChecker::is_enabled(thread_id_t tid) const
+{
+ return scheduler->is_enabled(tid);
+}
+
/**
* Switch from a user-context to the "master thread" context (a.k.a. system
* context). This switch is made with the intention of exploring a particular
class Promise;
class Scheduler;
class Thread;
+struct model_snapshot_members;
/** @brief Shorthand for a list of release sequence heads */
typedef std::vector< const ModelAction *, ModelAlloc<const ModelAction *> > rel_heads_list_t;
unsigned int fairwindow;
unsigned int enabledcount;
unsigned int bound;
+
+ /** @brief Maximum number of future values that can be sent to the same
+ * read */
+ int maxfuturevalues;
+
+ /** @brief Only generate a new future value/expiration pair if the
+ * expiration time exceeds the existing one by more than the slop
+ * value */
+ unsigned int expireslop;
};
struct PendingFutureValue {
ModelAction *writer;
- ModelAction * act;
-};
-
-/**
- * Structure for holding small ModelChecker members that should be snapshotted
- */
-struct model_snapshot_members {
- ModelAction *current_action;
- unsigned int next_thread_id;
- modelclock_t used_sequence_numbers;
- Thread *nextThread;
- ModelAction *next_backtrack;
+ ModelAction *act;
};
/** @brief Records information regarding a single pending release sequence */
Thread * get_thread(thread_id_t tid) const;
Thread * get_thread(ModelAction *act) const;
+ bool is_enabled(Thread *t) const;
+ bool is_enabled(thread_id_t tid) const;
+
thread_id_t get_next_id();
- unsigned int get_num_threads();
+ unsigned int get_num_threads() const;
Thread * get_current_thread();
int switch_to_master(ModelAction *act);
void finish_execution();
bool isfeasibleprefix();
void set_assert() {asserted=true;}
+ bool is_deadlocked() const;
/** @brief Alert the model-checker that an incorrectly-ordered
* synchronization was made */
void set_bad_synchronization() { bad_synchronization = true; }
const model_params params;
- Scheduler * get_scheduler() { return scheduler;}
Node * get_curr_node();
MEMALLOC
void wake_up_sleeping_actions(ModelAction * curr);
modelclock_t get_next_seq_num();
- /**
- * Stores the ModelAction for the current thread action. Call this
- * immediately before switching from user- to system-context to pass
- * data between them.
- * @param act The ModelAction created by the user-thread action
- */
- void set_current_action(ModelAction *act) { priv->current_action = act; }
+ void set_current_action(ModelAction *act);
Thread * check_current_action(ModelAction *curr);
bool initialize_curr_action(ModelAction **curr);
bool process_read(ModelAction *curr, bool second_part_of_rmw);
/**
- * Adds a value from a weakly ordered future write to backtrack to.
+ * Adds a value from a weakly ordered future write to backtrack to. This
+ * operation may "fail" if the future value has already been run (within some
+ * sloppiness window of this expiration), or if the futurevalues set has
+ * reached its maximum.
+ * @see model_params.maxfuturevalues
+ *
* @param value is the value to backtrack to.
+ * @return True if the future value was successully added; false otherwise
*/
bool Node::add_future_value(uint64_t value, modelclock_t expiration) {
- int suitableindex=-1;
+ int idx = -1; /* Highest index where value is found */
for (unsigned int i = 0; i < future_values.size(); i++) {
if (future_values[i].value == value) {
- if (future_values[i].expiration>=expiration)
+ if (expiration <= future_values[i].expiration)
return false;
- if (future_index < ((int) i)) {
- suitableindex=i;
- }
+ idx = i;
}
}
-
- if (suitableindex!=-1) {
- future_values[suitableindex].expiration=expiration;
+ if (idx > future_index) {
+ /* Future value hasn't been explored; update expiration */
+ future_values[idx].expiration = expiration;
return true;
+ } else if (idx >= 0 && expiration <= future_values[idx].expiration + model->params.expireslop) {
+ /* Future value has been explored and is within the "sloppy" window */
+ return false;
}
- struct future_value newfv={value, expiration};
+
+ /* Limit the size of the future-values set */
+ if (model->params.maxfuturevalues > 0 &&
+ (int)future_values.size() >= model->params.maxfuturevalues)
+ return false;
+
+ struct future_value newfv = {value, expiration};
future_values.push_back(newfv);
return true;
}
return false;
synced_thread[id]=true;
- enabled_type_t * enabled=model->get_scheduler()->get_enabled();
unsigned int sync_size=synced_thread.size();
int promise_tid=id_to_int(read->get_tid());
for(unsigned int i=1;i<model->get_num_threads();i++) {
- if ((i >= sync_size || !synced_thread[i]) && ( (int)i != promise_tid ) && (enabled[i] != THREAD_DISABLED)) {
+ if ((i >= sync_size || !synced_thread[i]) && ( (int)i != promise_tid ) && model->is_enabled(int_to_id(i))) {
return false;
}
}
}
bool Promise::check_promise() {
- enabled_type_t * enabled=model->get_scheduler()->get_enabled();
unsigned int sync_size=synced_thread.size();
for(unsigned int i=1;i<model->get_num_threads();i++) {
- if ((i >= sync_size || !synced_thread[i]) && (enabled[i] != THREAD_DISABLED)) {
+ if ((i >= sync_size || !synced_thread[i]) && model->is_enabled(int_to_id(i))) {
return false;
}
}
/**
* @brief Check if a Thread is currently enabled
+ *
+ * Check if a Thread is currently enabled. "Enabled" includes both
+ * THREAD_ENABLED and THREAD_SLEEP_SET.
* @param t The Thread to check
* @return True if the Thread is currently enabled
*/
bool Scheduler::is_enabled(Thread *t) const
{
- int id = id_to_int(t->get_id());
- return (id >= enabled_len) ? false : (enabled[id] != THREAD_DISABLED);
+ return is_enabled(t->get_id());
+}
+
+/**
+ * @brief Check if a Thread is currently enabled
+ *
+ * Check if a Thread is currently enabled. "Enabled" includes both
+ * THREAD_ENABLED and THREAD_SLEEP_SET.
+ * @param tid The ID of the Thread to check
+ * @return True if the Thread is currently enabled
+ */
+bool Scheduler::is_enabled(thread_id_t tid) const
+{
+ int i = id_to_int(tid);
+ return (i >= enabled_len) ? false : (enabled[i] != THREAD_DISABLED);
}
enabled_type_t Scheduler::get_enabled(Thread *t) {
enabled_type_t get_enabled(Thread *t);
void update_sleep_set(Node *n);
bool is_enabled(Thread *t) const;
+ bool is_enabled(thread_id_t tid) const;
SNAPSHOTALLOC
private:
#include <iostream>
#include <fstream>
#include <unistd.h>
-#include <sys/types.h>
#include <sstream>
#include <cstring>
#include <string>
#include <inttypes.h>
#include "common.h"
-
+/* MYBINARYNAME only works because our pathname usually includes 'model' (e.g.,
+ * /.../model-checker/test/userprog.o) */
#define MYBINARYNAME "model"
#define MYLIBRARYNAME "libmodel.so"
-#define MAPFILE_FORMAT "/proc/%d/maps"
+#define MAPFILE "/proc/self/maps"
SnapshotStack * snapshotObject;
pclose(map);
}
#else
+
+static void get_binary_name(char *buf, size_t len)
+{
+ if (readlink("/proc/self/exe", buf, len) == -1) {
+ perror("readlink");
+ exit(EXIT_FAILURE);
+ }
+}
+
/** The SnapshotGlobalSegments function computes the memory regions
* that may contain globals and then configures the snapshotting
* library to snapshot them.
*/
static void SnapshotGlobalSegments(){
- int pid = getpid();
- char buf[9000], filename[100];
+ char buf[9000];
+ char binary_name[800];
FILE *map;
- sprintf(filename, MAPFILE_FORMAT, pid);
- map = fopen(filename, "r");
+ map = fopen(MAPFILE, "r");
if (!map) {
perror("fopen");
exit(EXIT_FAILURE);
}
+ get_binary_name(binary_name, sizeof(binary_name));
while (fgets(buf, sizeof(buf), map)) {
char regionname[200] = "";
char r, w, x, p;
void *begin, *end;
sscanf(buf, "%p-%p %c%c%c%c %*x %*x:%*x %*u %200s\n", &begin, &end, &r, &w, &x, &p, regionname);
- if (w == 'w' && (strstr(regionname, MYBINARYNAME) || strstr(regionname, MYLIBRARYNAME))) {
+ if (w == 'w' && (strstr(regionname, binary_name) || strstr(regionname, MYLIBRARYNAME))) {
size_t len = ((uintptr_t)end - (uintptr_t)begin) / PAGESIZE;
if (len != 0)
addMemoryRegionToSnapShot(begin, len);
#include <inttypes.h>
#include <fcntl.h>
#include <sys/mman.h>
-#include <sys/types.h>
#include <csignal>
#define SHARED_MEMORY_DEFAULT (100 * ((size_t)1 << 20)) // 100mb for the shared memory
#define STACK_SIZE_DEFAULT (((size_t)1 << 20) * 20) // 20 mb out of the above 100 mb for my stack
--- /dev/null
+#include <stdio.h>
+#include <threads.h>
+#include <mutex>
+
+#include "librace.h"
+
+std::mutex *x;
+std::mutex *y;
+uint32_t shared = 0;
+
+static void a(void *obj)
+{
+ x->lock();
+ y->lock();
+ printf("shared = %u\n", load_32(&shared));
+ y->unlock();
+ x->unlock();
+}
+
+static void b(void *obj)
+{
+ y->lock();
+ x->lock();
+ store_32(&shared, 16);
+ printf("write shared = 16\n");
+ x->unlock();
+ y->unlock();
+}
+
+int user_main(int argc, char **argv)
+{
+ thrd_t t1, t2;
+
+ x = new std::mutex();
+ y = new std::mutex();
+
+ printf("Thread %d: creating 2 threads\n", thrd_current());
+ thrd_create(&t1, (thrd_start_t)&a, NULL);
+ thrd_create(&t2, (thrd_start_t)&b, NULL);
+
+ thrd_join(t1);
+ thrd_join(t2);
+ printf("Thread %d is finished\n", thrd_current());
+
+ return 0;
+}