sp_before_semi = ignore
sp_before_semi_for_empty = ignore
sp_after_semi_for_empty = ignore
+sp_before_tr_emb_cmt = force
+sp_num_before_tr_emb_cmt = 2
+indent_relative_single_line_comments = true
\ No newline at end of file
ASSERT(is_read());
if (reads_from)
return reads_from->get_write_value();
- return VALUE_NONE; // Only for new actions with no reads-from
+ return VALUE_NONE; // Only for new actions with no reads-from
}
/**
reads_from = act;
- if (act->is_uninitialized()) { // WL
+ if (act->is_uninitialized()) { // WL
uint64_t val = *((uint64_t *) location);
ModelAction * act_initialized = (ModelAction *)act;
act_initialized->set_value(val);
/** @brief Represents an action type, identifying one of several types of
* ModelAction */
typedef enum action_type {
- THREAD_CREATE, // < A thread creation action
- THREAD_START, // < First action in each thread
- THREAD_YIELD, // < A thread yield action
- THREAD_JOIN, // < A thread join action
- THREAD_FINISH, // < A thread completion action
- PTHREAD_CREATE, // < A pthread creation action
- PTHREAD_JOIN, // < A pthread join action
- ATOMIC_UNINIT, // < Represents an uninitialized atomic
- ATOMIC_READ, // < An atomic read action
- ATOMIC_WRITE, // < An atomic write action
- ATOMIC_RMWR, // < The read part of an atomic RMW action
- ATOMIC_RMWRCAS, // < The read part of an atomic RMW action
- ATOMIC_RMW, // < The write part of an atomic RMW action
- ATOMIC_RMWC, // < Convert an atomic RMW action into a READ
- ATOMIC_INIT, // < Initialization of an atomic object (e.g., atomic_init())
- ATOMIC_FENCE, // < A fence action
- ATOMIC_LOCK, // < A lock action
- ATOMIC_TRYLOCK, // < A trylock action
- ATOMIC_UNLOCK, // < An unlock action
- ATOMIC_NOTIFY_ONE, // < A notify_one action
- ATOMIC_NOTIFY_ALL, // < A notify all action
- ATOMIC_WAIT, // < A wait action
- ATOMIC_ANNOTATION, // < An annotation action to pass information to a trace analysis
+ THREAD_CREATE, // < A thread creation action
+ THREAD_START, // < First action in each thread
+ THREAD_YIELD, // < A thread yield action
+ THREAD_JOIN, // < A thread join action
+ THREAD_FINISH, // < A thread completion action
+ PTHREAD_CREATE, // < A pthread creation action
+ PTHREAD_JOIN, // < A pthread join action
+ ATOMIC_UNINIT, // < Represents an uninitialized atomic
+ ATOMIC_READ, // < An atomic read action
+ ATOMIC_WRITE, // < An atomic write action
+ ATOMIC_RMWR, // < The read part of an atomic RMW action
+ ATOMIC_RMWRCAS, // < The read part of an atomic RMW action
+ ATOMIC_RMW, // < The write part of an atomic RMW action
+ ATOMIC_RMWC, // < Convert an atomic RMW action into a READ
+ ATOMIC_INIT, // < Initialization of an atomic object (e.g., atomic_init())
+ ATOMIC_FENCE, // < A fence action
+ ATOMIC_LOCK, // < A lock action
+ ATOMIC_TRYLOCK, // < A trylock action
+ ATOMIC_UNLOCK, // < An unlock action
+ ATOMIC_NOTIFY_ONE, // < A notify_one action
+ ATOMIC_NOTIFY_ALL, // < A notify all action
+ ATOMIC_WAIT, // < A wait action
+ ATOMIC_ANNOTATION, // < An annotation action to pass information to a trace analysis
NOOP
} action_type_t;
modelclock_t seq_number;
};
-#endif/* __ACTION_H__ */
+#endif /* __ACTION_H__ */
SNAPSHOTALLOC
};
-#endif/* __BUGMESSAGE_H__ */
+#endif /* __BUGMESSAGE_H__ */
int num_threads;
};
-#endif/* __CLOCKVECTOR_H__ */
+#endif /* __CLOCKVECTOR_H__ */
model_print("\t%s\n", strings[i]);
free(strings);
-#endif/* CONFIG_STACKTRACE */
+#endif /* CONFIG_STACKTRACE */
}
void assert_hook(void)
model_print("---- END PROGRAM OUTPUT ----\n");
}
-#endif/* ! CONFIG_DEBUG */
+#endif /* ! CONFIG_DEBUG */
#else
#define ASSERT(expr) \
do { } while (0)
-#endif/* CONFIG_ASSERT */
+#endif /* CONFIG_ASSERT */
#define error_msg(...) fprintf(stderr, "Error: " __VA_ARGS__)
void print_trace(void);
-#endif/* __COMMON_H__ */
+#endif /* __COMMON_H__ */
#else
#define BIT48 0
#endif
-#endif/* BIT48 */
+#endif /* BIT48 */
/** Snapshotting configurables */
return 0;
}
-#endif/* MAC */
+#endif /* MAC */
return swapcontext(oucp, ucp);
}
-#endif/* !MAC */
+#endif /* !MAC */
-#endif/* __CONTEXT_H__ */
+#endif /* __CONTEXT_H__ */
if (!hasCycles)
hasCycles = checkReachable(tonode, fromnode);
} else
- return false; /* No new edge */
+ return false;/* No new edge */
/*
* If the fromnode has a rmwnode that is not the tonode, we should
CycleNode *hasRMW;
};
-#endif/* __CYCLEGRAPH_H__ */
+#endif /* __CYCLEGRAPH_H__ */
#define MAXREADVECTOR (READMASK-1)
#define MAXWRITEVECTOR (WRITEMASK-1)
-#endif/* __DATARACE_H__ */
+#endif /* __DATARACE_H__ */
params(NULL),
scheduler(scheduler),
action_trace(),
- thread_map(2), /* We'll always need at least 2 threads */
+ thread_map(2), /* We'll always need at least 2 threads */
pthread_map(0),
pthread_counter(0),
obj_map(),
state->locked = NULL;
if (!curr->is_wait())
- break; /* The rest is only for ATOMIC_WAIT */
+ break;/* The rest is only for ATOMIC_WAIT */
break;
}
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- updated = true; /* trigger rel-seq checks */
+ updated = true; /* trigger rel-seq checks */
break;
}
case PTHREAD_JOIN: {
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- updated = true; /* trigger rel-seq checks */
- break; // WL: to be add (modified)
+ updated = true; /* trigger rel-seq checks */
+ break; // WL: to be add (modified)
}
case THREAD_FINISH: {
scheduler->wake(waiting);
}
th->complete();
- updated = true; /* trigger rel-seq checks */
+ updated = true; /* trigger rel-seq checks */
break;
}
case THREAD_START: {
newcurr->create_cv(get_parent_action(newcurr->get_tid()));
*curr = newcurr;
- return false; /* Action was explored previously */
+ return false; /* Action was explored previously */
} else {
newcurr = *curr;
/* Assign most recent release fence */
newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
- return true; /* This was a new ModelAction */
+ return true; /* This was a new ModelAction */
}
}
else if (rf->get_last_fence_release())
release_heads->push_back(rf->get_last_fence_release());
if (!rf->is_rmw())
- break; /* End of RMW chain */
+ break;/* End of RMW chain */
/** @todo Need to be smarter here... In the linux lock
* example, this will run to the beginning of the program for
/* acq_rel RMW is a sufficient stopping condition */
if (rf->is_acquire() && rf->is_release())
- return true; /* complete */
+ return true;/* complete */
};
- ASSERT(rf); // Needs to be real write
+ ASSERT(rf); // Needs to be real write
if (rf->is_release())
- return true; /* complete */
+ return true;/* complete */
/* else relaxed write
* - check for fence-release in the same thread (29.8, stmt. 3)
if (fence_release)
release_heads->push_back(fence_release);
- return true; /* complete */
+ return true; /* complete */
}
/**
action_list_t::reverse_iterator rit;
for (rit = list->rbegin();(*rit) != curr;rit++)
;
- rit++; /* Skip past curr */
+ rit++; /* Skip past curr */
for ( ;rit != list->rend();rit++)
if ((*rit)->is_write() && (*rit)->is_seqcst())
return *rit;
Thread *curr_thrd = get_thread(curr);
ASSERT(curr_thrd->get_state() == THREAD_READY);
- ASSERT(check_action_enabled(curr)); /* May have side effects? */
+ ASSERT(check_action_enabled(curr)); /* May have side effects? */
curr = check_current_action(curr);
ASSERT(curr);
Thread * action_select_next_thread(const ModelAction *curr) const;
};
-#endif/* __EXECUTION_H__ */
+#endif /* __EXECUTION_H__ */
_GLIBCXX_END_NAMESPACE_VERSION
}
-#endif// defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
-#endif// _GLIBCXX_HAS_GTHREADS
+#endif // defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
+#endif // _GLIBCXX_HAS_GTHREADS
capacitymask = initialcapacity - 1;
threshold = (unsigned int)(initialcapacity * loadfactor);
- size = 0; // Initial number of elements in the hash
+ size = 0; // Initial number of elements in the hash
}
/** @brief Hash table destructor */
exit(EXIT_FAILURE);
}
- table = newtable; // Update the global hashtable upon resize()
+ table = newtable; // Update the global hashtable upon resize()
capacity = newsize;
capacitymask = newsize - 1;
search->val = bin->val;
}
- _free(oldtable); // Free the memory of the old hash table
+ _free(oldtable); // Free the memory of the old hash table
}
private:
double loadfactor;
};
-#endif/* __HASHTABLE_H__ */
+#endif /* __HASHTABLE_H__ */
{"analysis", required_argument, NULL, 't'},
{"options", required_argument, NULL, 'o'},
{"maxexecutions", required_argument, NULL, 'x'},
- {0, 0, 0, 0} /* Terminator */
+ {0, 0, 0, 0} /* Terminator */
};
int opt, longindex;
bool error = false;
error = true;
}
break;
- default: /* '?' */
+ default: /* '?' */
error = true;
break;
}
for(int exec = 0;exec < params.maxexecutions;exec++) {
thrd_t user_thread;
- Thread *t = new Thread(execution->get_next_id(), &user_thread, &user_main_wrapper, NULL, NULL); // L: user_main_wrapper passes the user program
+ Thread *t = new Thread(execution->get_next_id(), &user_thread, &user_main_wrapper, NULL, NULL); // L: user_main_wrapper passes the user program
execution->add_thread(t);
//Need to seed random number generator, otherwise its state gets reset
do {
thread_id_t tid = int_to_id(i);
Thread *thr = get_thread(tid);
if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
- switch_from_master(thr); // L: context swapped, and action type of thr changed.
+ switch_from_master(thr); // L: context swapped, and action type of thr changed.
if (thr->is_waiting_on(thr))
assert_bug("Deadlock detected (thread %u)", i);
}
/** @brief Model checker execution stats */
struct execution_stats {
- int num_total; /**< @brief Total number of executions */
- int num_infeasible; /**< @brief Number of infeasible executions */
- int num_buggy_executions; /** @brief Number of buggy executions */
- int num_complete; /**< @brief Number of feasible, non-buggy, complete executions */
- int num_redundant; /**< @brief Number of redundant, aborted executions */
+ int num_total; /**< @brief Total number of executions */
+ int num_infeasible; /**< @brief Number of infeasible executions */
+ int num_buggy_executions; /** @brief Number of buggy executions */
+ int num_complete; /**< @brief Number of feasible, non-buggy, complete executions */
+ int num_redundant; /**< @brief Number of redundant, aborted executions */
};
/** @brief The central structure for model-checking */
extern ModelChecker *model;
-#endif/* __MODEL_H__ */
+#endif /* __MODEL_H__ */
free(ptr);
}
-#endif/* !USE_MPROTECT_SNAPSHOT */
+#endif /* !USE_MPROTECT_SNAPSHOT */
void operator delete[](void *p, size_t size) { \
model_free(p); \
} \
- void * operator new(size_t size, void *p) { /* placement new */ \
+ void * operator new(size_t size, void *p) { /* placement new */ \
return p; \
}
void operator delete[](void *p, size_t size) { \
snapshot_free(p); \
} \
- void * operator new(size_t size, void *p) { /* placement new */ \
+ void * operator new(size_t size, void *p) { /* placement new */ \
return p; \
}
}; /* end of extern "C" */
#endif
-#endif/* _MY_MEMORY_H */
+#endif /* _MY_MEMORY_H */
int head_idx;
};
-#endif/* __NODESTACK_H__ */
+#endif /* __NODESTACK_H__ */
void redirect_output();
void clear_program_output();
void print_program_output();
-#endif/* ! CONFIG_DEBUG */
+#endif /* ! CONFIG_DEBUG */
-#endif/* __OUTPUT_H__ */
+#endif /* __OUTPUT_H__ */
char **argv;
};
-#endif/* __PARAMS_H__ */
+#endif /* __PARAMS_H__ */
void pthread_exit(void *value_ptr) {
Thread * th = thread_current();
model->switch_to_master(new ModelAction(THREAD_FINISH, std::memory_order_seq_cst, th));
- while(1) ; //make warning goaway
+ while(1) ;//make warning goaway
}
int pthread_mutex_init(pthread_mutex_t *p_mutex, const pthread_mutexattr_t *) {
}
if (avail_threads == 0)
- return NULL; // No threads availablex
+ return NULL;// No threads availablex
Thread * thread = execution->getFuzzer()->selectThread(n, thread_list, avail_threads);
curr_thread_index = id_to_int(thread->get_id());
Thread *current;
};
-#endif/* __SCHEDULE_H__ */
+#endif /* __SCHEDULE_H__ */
/* Struct for each memory region */
struct MemoryRegion {
- void *basePtr; // base of memory region
- int sizeInPages; // size of memory region in pages
+ void *basePtr; // base of memory region
+ int sizeInPages; // size of memory region in pages
};
/** ReturnPageAlignedAddress returns a page aligned address for the
mprot_snapshotter(unsigned int numbackingpages, unsigned int numsnapshots, unsigned int nummemoryregions);
~mprot_snapshotter();
- struct MemoryRegion *regionsToSnapShot; //This pointer references an array of memory regions to snapshot
- snapshot_page_t *backingStore; //This pointer references an array of snapshotpage's that form the backing store
- void *backingStoreBasePtr; //This pointer references an array of snapshotpage's that form the backing store
- struct BackingPageRecord *backingRecords; //This pointer references an array of backingpagerecord's (same number of elements as backingstore
- struct SnapShotRecord *snapShots; //This pointer references the snapshot array
+ struct MemoryRegion *regionsToSnapShot; //This pointer references an array of memory regions to snapshot
+ snapshot_page_t *backingStore; //This pointer references an array of snapshotpage's that form the backing store
+ void *backingStoreBasePtr; //This pointer references an array of snapshotpage's that form the backing store
+ struct BackingPageRecord *backingRecords; //This pointer references an array of backingpagerecord's (same number of elements as backingstore
+ struct SnapShotRecord *snapShots; //This pointer references the snapshot array
- unsigned int lastSnapShot; //Stores the next snapshot record we should use
- unsigned int lastBackingPage; //Stores the next backingpage we should use
- unsigned int lastRegion; //Stores the next memory region to be used
+ unsigned int lastSnapShot; //Stores the next snapshot record we should use
+ unsigned int lastBackingPage; //Stores the next backingpage we should use
+ unsigned int lastRegion; //Stores the next memory region to be used
- unsigned int maxRegions; //Stores the max number of memory regions we support
- unsigned int maxBackingPages; //Stores the total number of backing pages
- unsigned int maxSnapShots; //Stores the total number of snapshots we allow
+ unsigned int maxRegions; //Stores the max number of memory regions we support
+ unsigned int maxBackingPages; //Stores the total number of backing pages
+ unsigned int maxSnapShots; //Stores the total number of snapshots we allow
MEMALLOC
};
}
void* addr = ReturnPageAlignedAddress(si->si_addr);
- unsigned int backingpage = mprot_snap->lastBackingPage++; //Could run out of pages...
+ unsigned int backingpage = mprot_snap->lastBackingPage++; //Could run out of pages...
if (backingpage == mprot_snap->maxBackingPages) {
model_print("Out of backing pages at %p\n", si->si_addr);
exit(EXIT_FAILURE);
memset(&si, 0, sizeof(si));
si.si_addr = ss.ss_sp;
mprot_handle_pf(SIGSEGV, &si, NULL);
- mprot_snap->lastBackingPage--; //remove the fake page we copied
+ mprot_snap->lastBackingPage--; //remove the fake page we copied
void *basemySpace = model_malloc((numheappages + 1) * PAGESIZE);
void *pagealignedbase = PageAlignAddressUpward(basemySpace);
}
mprot_snap->lastSnapShot = theID;
mprot_snap->lastBackingPage = mprot_snap->snapShots[theID].firstBackingPage;
- mprot_take_snapshot(); //Make sure current snapshot is still good...All later ones are cleared
+ mprot_take_snapshot(); //Make sure current snapshot is still good...All later ones are cleared
}
#else /* !USE_MPROTECT_SNAPSHOT */
-#define SHARED_MEMORY_DEFAULT (100 * ((size_t)1 << 20))// 100mb for the shared memory
+#define SHARED_MEMORY_DEFAULT (100 * ((size_t)1 << 20)) // 100mb for the shared memory
#define STACK_SIZE_DEFAULT (((size_t)1 << 20) * 20) // 20 mb out of the above 100 mb for my stack
struct fork_snapshotter {
fork_snap->mIDToRollback = -1;
}
-#endif/* !USE_MPROTECT_SNAPSHOT */
+#endif /* !USE_MPROTECT_SNAPSHOT */
/**
* @brief Initializes the snapshot system
char* ret = abi::__cxa_demangle(begin_name,
funcname, &funcnamesize, &status);
if (status == 0) {
- funcname = ret; // use possibly realloc()-ed string
+ funcname = ret; // use possibly realloc()-ed string
dprintf(fd, " %s : %s+%s\n",
symbollist[i], funcname, begin_offset);
} else {
print_stacktrace(fileno(out), max_frames);
}
-#endif// __STACKTRACE_H__
+#endif // __STACKTRACE_H__
SNAPSHOTALLOC
};
-#endif/* __STL_MODEL_H__ */
+#endif /* __STL_MODEL_H__ */
return id;
}
-#endif/* __THREADS_MODEL_H__ */
+#endif /* __THREADS_MODEL_H__ */
stack(NULL),
user_thread(NULL),
id(tid),
- state(THREAD_READY), /* Thread is always ready? */
+ state(THREAD_READY), /* Thread is always ready? */
last_action_val(0),
model_thread(true)
{
if (ret)
model_print("Error in create_context\n");
- user_thread->priv = this; // WL
+ user_thread->priv = this; // WL
}
/**