#include <algorithm>
#include <new>
#include <stdarg.h>
+#include <errno.h>
#include "model.h"
#include "execution.h"
#include "datarace.h"
#include "threads-model.h"
#include "bugmessage.h"
-#include "history.h"
#include "fuzzer.h"
-#include "newfuzzer.h"
-#define INITIAL_THREAD_ID 0
+#ifdef COLLECT_STAT
+static unsigned int atomic_load_count = 0;
+static unsigned int atomic_store_count = 0;
+static unsigned int atomic_rmw_count = 0;
+
+static unsigned int atomic_fence_count = 0;
+static unsigned int atomic_lock_count = 0;
+static unsigned int atomic_trylock_count = 0;
+static unsigned int atomic_unlock_count = 0;
+static unsigned int atomic_notify_count = 0;
+static unsigned int atomic_wait_count = 0;
+static unsigned int atomic_timedwait_count = 0;
+#endif
/**
* Structure for holding small ModelChecker members that should be snapshotted
scheduler(scheduler),
thread_map(2), /* We'll always need at least 2 threads */
pthread_map(0),
- pthread_counter(1),
+ pthread_counter(2),
action_trace(),
obj_map(),
condvar_waiters_map(),
thrd_last_fence_release(),
priv(new struct model_snapshot_members ()),
mo_graph(new CycleGraph()),
-#ifdef NEWFUZZER
- fuzzer(new NewFuzzer()),
-#else
fuzzer(new Fuzzer()),
-#endif
isfinished(false)
{
/* Initialize a model-checker thread, for special ModelActions */
/** @brief Destructor */
ModelExecution::~ModelExecution()
{
- for (unsigned int i = 0;i < get_num_threads();i++)
+ for (unsigned int i = INITIAL_THREAD_ID;i < get_num_threads();i++)
delete get_thread(int_to_id(i));
delete mo_graph;
return model->get_execution_number();
}
-static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 2> * hash, void * ptr)
+static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
{
- action_list_t *tmp = hash->get(ptr);
+ SnapVector<action_list_t> *tmp = hash->get(ptr);
if (tmp == NULL) {
- tmp = new action_list_t();
+ tmp = new SnapVector<action_list_t>();
hash->put(ptr, tmp);
}
return tmp;
}
-static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
+static simple_action_list_t * get_safe_ptr_action(HashTable<const void *, simple_action_list_t *, uintptr_t, 2> * hash, void * ptr)
{
- SnapVector<action_list_t> *tmp = hash->get(ptr);
+ simple_action_list_t *tmp = hash->get(ptr);
if (tmp == NULL) {
- tmp = new SnapVector<action_list_t>();
+ tmp = new simple_action_list_t();
hash->put(ptr, tmp);
}
return tmp;
}
+static SnapVector<simple_action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<simple_action_list_t> *, uintptr_t, 2> * hash, void * ptr)
+{
+ SnapVector<simple_action_list_t> *tmp = hash->get(ptr);
+ if (tmp == NULL) {
+ tmp = new SnapVector<simple_action_list_t>();
+ hash->put(ptr, tmp);
+ }
+ return tmp;
+}
+
+/**
+ * When vectors of action lists are reallocated due to resize, the root address of
+ * action lists may change. Hence we need to fix the parent pointer of the children
+ * of root.
+ */
+static void fixup_action_list(SnapVector<action_list_t> * vec)
+{
+ for (uint i = 0;i < vec->size();i++) {
+ action_list_t * list = &(*vec)[i];
+ if (list != NULL)
+ list->fixupParent();
+ }
+}
+
+#ifdef COLLECT_STAT
+static inline void record_atomic_stats(ModelAction * act)
+{
+ switch (act->get_type()) {
+ case ATOMIC_WRITE:
+ atomic_store_count++;
+ break;
+ case ATOMIC_RMW:
+ atomic_load_count++;
+ break;
+ case ATOMIC_READ:
+ atomic_rmw_count++;
+ break;
+ case ATOMIC_FENCE:
+ atomic_fence_count++;
+ break;
+ case ATOMIC_LOCK:
+ atomic_lock_count++;
+ break;
+ case ATOMIC_TRYLOCK:
+ atomic_trylock_count++;
+ break;
+ case ATOMIC_UNLOCK:
+ atomic_unlock_count++;
+ break;
+ case ATOMIC_NOTIFY_ONE:
+ case ATOMIC_NOTIFY_ALL:
+ atomic_notify_count++;
+ break;
+ case ATOMIC_WAIT:
+ atomic_wait_count++;
+ break;
+ case ATOMIC_TIMEDWAIT:
+ atomic_timedwait_count++;
+ default:
+ return;
+ }
+}
+
+void print_atomic_accesses()
+{
+ model_print("atomic store count: %u\n", atomic_store_count);
+ model_print("atomic load count: %u\n", atomic_load_count);
+ model_print("atomic rmw count: %u\n", atomic_rmw_count);
+
+ model_print("atomic fence count: %u\n", atomic_fence_count);
+ model_print("atomic lock count: %u\n", atomic_lock_count);
+ model_print("atomic trylock count: %u\n", atomic_trylock_count);
+ model_print("atomic unlock count: %u\n", atomic_unlock_count);
+ model_print("atomic notify count: %u\n", atomic_notify_count);
+ model_print("atomic wait count: %u\n", atomic_wait_count);
+ model_print("atomic timedwait count: %u\n", atomic_timedwait_count);
+}
+#endif
/** @return a thread ID for a new Thread */
thread_id_t ModelExecution::get_next_id()
{
* @param thread The thread that we might wake up
* @return True, if we should wake up the sleeping thread; false otherwise
*/
-bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
+bool ModelExecution::should_wake_up(const ModelAction * asleep) const
{
- const ModelAction *asleep = thread->get_pending();
- /* Don't allow partial RMW to wake anyone up */
- if (curr->is_rmwr())
- return false;
- /* Synchronizing actions may have been backtracked */
- if (asleep->could_synchronize_with(curr))
- return true;
- /* All acquire/release fences and fence-acquire/store-release */
- if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
- return true;
- /* Fence-release + store can awake load-acquire on the same location */
- if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
- ModelAction *fence_release = get_last_fence_release(curr->get_tid());
- if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
- return true;
- }
/* The sleep is literally sleeping */
- if (asleep->is_sleep()) {
- if (fuzzer->shouldWake(asleep))
- return true;
+ switch (asleep->get_type()) {
+ case THREAD_SLEEP:
+ if (fuzzer->shouldWake(asleep))
+ return true;
+ break;
+ case ATOMIC_WAIT:
+ case ATOMIC_TIMEDWAIT:
+ if (fuzzer->waitShouldWakeUp(asleep))
+ return true;
+ break;
+ default:
+ return false;
}
return false;
}
-void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
+void ModelExecution::wake_up_sleeping_actions()
{
- for (unsigned int i = 0;i < get_num_threads();i++) {
- Thread *thr = get_thread(int_to_id(i));
- if (scheduler->is_sleep_set(thr)) {
- if (should_wake_up(curr, thr)) {
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
+ thread_id_t tid = int_to_id(i);
+ if (scheduler->is_sleep_set(tid)) {
+ Thread *thr = get_thread(tid);
+ ModelAction * pending = thr->get_pending();
+ if (should_wake_up(pending)) {
/* Remove this thread from sleep set */
scheduler->remove_sleep(thr);
- if (thr->get_pending()->is_sleep())
+
+ if (pending->is_sleep()) {
thr->set_wakeup_state(true);
+ } else if (pending->is_wait()) {
+ thr->set_wakeup_state(true);
+ /* Remove this thread from list of waiters */
+ simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, pending->get_location());
+ for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
+ if (rit->getVal()->get_tid() == tid) {
+ waiters->erase(rit);
+ break;
+ }
+ }
+
+ /* Set ETIMEDOUT error */
+ if (pending->is_timedwait())
+ thr->set_return_value(ETIMEDOUT);
+ }
}
}
}
bool ModelExecution::is_deadlocked() const
{
bool blocking_threads = false;
- for (unsigned int i = 0;i < get_num_threads();i++) {
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
thread_id_t tid = int_to_id(i);
if (is_enabled(tid))
return false;
*/
bool ModelExecution::is_complete_execution() const
{
- for (unsigned int i = 0;i < get_num_threads();i++)
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++)
if (is_enabled(int_to_id(i)))
return false;
return true;
add_normal_write_to_lists(act);
add_write_to_lists(act);
w_modification_order(act);
-#ifdef NEWFUZZER
- model->get_history()->process_action(act, act->get_tid());
-#endif
return act;
}
mo_graph->addEdge((*priorset)[i], rf);
}
read_from(curr, rf);
- get_thread(curr)->set_return_value(curr->get_return_value());
+ get_thread(curr)->set_return_value(rf->get_write_value());
delete priorset;
+ //Update acquire fence clock vector
+ ClockVector * hbcv = get_hb_from_write(rf);
+ if (hbcv != NULL)
+ get_thread(curr)->get_acq_fence_cv()->merge(hbcv);
return canprune && (curr->get_type() == ATOMIC_READ);
}
priorset->clear();
//TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
//if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
// assert_bug("Lock access before initialization");
+
+ // TODO: lock count for recursive mutexes
state->locked = get_thread(curr);
ModelAction *unlock = get_last_unlock(curr);
//synchronize with the previous unlock statement
break;
}
case ATOMIC_WAIT: {
- //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
- if (fuzzer->shouldWait(curr)) {
- /* wake up the other threads */
- for (unsigned int i = 0;i < get_num_threads();i++) {
- Thread *t = get_thread(int_to_id(i));
- Thread *curr_thrd = get_thread(curr);
- if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
- scheduler->wake(t);
- }
+ Thread *curr_thrd = get_thread(curr);
+ /* wake up the other threads */
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
+ Thread *t = get_thread(int_to_id(i));
+ if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
+ scheduler->wake(t);
+ }
+
+ /* unlock the lock - after checking who was waiting on it */
+ state->locked = NULL;
+
+ /* disable this thread */
+ simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
+ waiters->push_back(curr);
+ curr_thrd->set_pending(curr); // Forbid this thread to stash a new action
- /* unlock the lock - after checking who was waiting on it */
- state->locked = NULL;
+ if (fuzzer->waitShouldFail(curr)) // If wait should fail spuriously,
+ scheduler->add_sleep(curr_thrd); // place this thread into THREAD_SLEEP_SET
+ else
+ scheduler->sleep(curr_thrd);
+
+ break;
+ }
+ case ATOMIC_TIMEDWAIT: {
+ Thread *curr_thrd = get_thread(curr);
+ if (!fuzzer->randomizeWaitTime(curr)) {
+ curr_thrd->set_return_value(ETIMEDOUT);
+ return false;
+ }
- /* disable this thread */
- get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
- scheduler->sleep(get_thread(curr));
+ /* wake up the other threads */
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
+ Thread *t = get_thread(int_to_id(i));
+ if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
+ scheduler->wake(t);
}
+ /* unlock the lock - after checking who was waiting on it */
+ state->locked = NULL;
+
+ /* disable this thread */
+ simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
+ waiters->push_back(curr);
+ curr_thrd->set_pending(curr); // Forbid this thread to stash a new action
+ scheduler->add_sleep(curr_thrd);
break;
}
- case ATOMIC_TIMEDWAIT:
case ATOMIC_UNLOCK: {
- //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
- //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
- //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
- //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
- //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
- //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
- //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
- //MUST EVENMTUALLY RELEASE...
-
+ // TODO: lock count for recursive mutexes
/* wake up the other threads */
- for (unsigned int i = 0;i < get_num_threads();i++) {
+ Thread *curr_thrd = get_thread(curr);
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
Thread *t = get_thread(int_to_id(i));
- Thread *curr_thrd = get_thread(curr);
if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
scheduler->wake(t);
}
break;
}
case ATOMIC_NOTIFY_ALL: {
- action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
+ simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
//activate all the waiting threads
for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
- scheduler->wake(get_thread(rit->getVal()));
+ Thread * thread = get_thread(rit->getVal());
+ if (thread->get_state() != THREAD_COMPLETED)
+ scheduler->wake(thread);
+ thread->set_wakeup_state(true);
}
waiters->clear();
break;
}
case ATOMIC_NOTIFY_ONE: {
- action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
+ simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
if (waiters->size() != 0) {
Thread * thread = fuzzer->selectNotify(waiters);
- scheduler->wake(thread);
+ if (thread->get_state() != THREAD_COMPLETED)
+ scheduler->wake(thread);
+ thread->set_wakeup_state(true);
}
break;
}
* @param curr The ModelAction to process
* @return True if synchronization was updated
*/
-bool ModelExecution::process_fence(ModelAction *curr)
+void ModelExecution::process_fence(ModelAction *curr)
{
/*
* fence-relaxed: no-op
* sequences
* fence-seq-cst: MO constraints formed in {r,w}_modification_order
*/
- bool updated = false;
if (curr->is_acquire()) {
- action_list_t *list = &action_trace;
- sllnode<ModelAction *> * rit;
- /* Find X : is_read(X) && X --sb-> curr */
- for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
- ModelAction *act = rit->getVal();
- if (act == curr)
- continue;
- if (act->get_tid() != curr->get_tid())
- continue;
- /* Stop at the beginning of the thread */
- if (act->is_thread_start())
- break;
- /* Stop once we reach a prior fence-acquire */
- if (act->is_fence() && act->is_acquire())
- break;
- if (!act->is_read())
- continue;
- /* read-acquire will find its own release sequences */
- if (act->is_acquire())
- continue;
-
- /* Establish hypothetical release sequences */
- ClockVector *cv = get_hb_from_write(act->get_reads_from());
- if (cv != NULL && curr->get_cv()->merge(cv))
- updated = true;
- }
+ curr->get_cv()->merge(get_thread(curr)->get_acq_fence_cv());
}
- return updated;
}
/**
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- break; // WL: to be add (modified)
+ break;
}
case THREADONLY_FINISH:
}
/* Wake up any joining threads */
- for (unsigned int i = 0;i < get_num_threads();i++) {
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
Thread *waiting = get_thread(int_to_id(i));
if (waiting->waiting_on() == th &&
waiting->get_pending()->is_thread_join())
* @return a bool that indicates whether the action is enabled.
*/
bool ModelExecution::check_action_enabled(ModelAction *curr) {
- if (curr->is_lock()) {
+ switch (curr->get_type()) {
+ case ATOMIC_LOCK: {
cdsc::mutex *lock = curr->get_mutex();
struct cdsc::mutex_state *state = lock->get_state();
- if (state->locked)
+ if (state->locked) {
+ Thread *lock_owner = (Thread *)state->locked;
+ Thread *curr_thread = get_thread(curr);
+ if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) {
+ return true;
+ }
+
return false;
- } else if (curr->is_thread_join()) {
+ }
+ break;
+ }
+ case THREAD_JOIN:
+ case PTHREAD_JOIN: {
Thread *blocking = curr->get_thread_operand();
if (!blocking->is_complete()) {
return false;
}
- } else if (curr->is_sleep()) {
+ break;
+ }
+ case THREAD_SLEEP: {
if (!fuzzer->shouldSleep(curr))
return false;
+ break;
+ }
+ default:
+ return true;
}
return true;
ModelAction * ModelExecution::check_current_action(ModelAction *curr)
{
ASSERT(curr);
- bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
bool newly_explored = initialize_curr_action(&curr);
DBG();
- wake_up_sleeping_actions(curr);
+ wake_up_sleeping_actions();
SnapVector<ModelAction *> * rf_set = NULL;
+ bool canprune = false;
/* Build may_read_from set for newly-created actions */
- if (newly_explored && curr->is_read())
+ if (curr->is_read() && newly_explored) {
rf_set = build_may_read_from(curr);
-
- bool canprune = false;
-
- if (curr->is_read() && !second_part_of_rmw) {
canprune = process_read(curr, rf_set);
delete rf_set;
} else
ASSERT(rf_set == NULL);
- /* Add the action to lists */
- if (!second_part_of_rmw)
+ /* Add the action to lists if not the second part of a rmw */
+ if (newly_explored) {
+#ifdef COLLECT_STAT
+ record_atomic_stats(curr);
+#endif
add_action_to_lists(curr, canprune);
+ }
if (curr->is_write())
add_write_to_lists(curr);
*/
bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
- SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
+ SnapVector<ModelAction *> * priorset, bool * canprune)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
ASSERT(curr->is_read());
thrd_lists->resize(priv->next_thread_id);
for(uint i = oldsize;i < priv->next_thread_id;i++)
new (&(*thrd_lists)[i]) action_list_t();
+
+ fixup_action_list(thrd_lists);
}
ModelAction *prev_same_thread = NULL;
*act < *last_sc_fence_thread_local) {
if (mo_graph->checkReachable(rf, act))
return false;
- if (!check_only)
- priorset->push_back(act);
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 4 */
*act < *last_sc_fence_local) {
if (mo_graph->checkReachable(rf, act))
return false;
- if (!check_only)
- priorset->push_back(act);
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 6 */
*act < *last_sc_fence_thread_before) {
if (mo_graph->checkReachable(rf, act))
return false;
- if (!check_only)
- priorset->push_back(act);
+ priorset->push_back(act);
break;
}
}
if (act->is_write()) {
if (mo_graph->checkReachable(rf, act))
return false;
- if (!check_only)
- priorset->push_back(act);
+ priorset->push_back(act);
} else {
ModelAction *prevrf = act->get_reads_from();
if (!prevrf->equals(rf)) {
if (mo_graph->checkReachable(rf, prevrf))
return false;
- if (!check_only)
- priorset->push_back(prevrf);
+ priorset->push_back(prevrf);
} else {
if (act->get_tid() == curr->get_tid()) {
//Can prune curr from obj list
- if (!check_only)
- *canprune = true;
+ *canprune = true;
}
}
}
}
-/**
- * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
- * some constraints. This method checks one the following constraint (others
- * require compiler support):
- *
- * If X --hb-> Y --mo-> Z, then X should not read from Z.
- * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
- */
-bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
-{
- SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
- unsigned int i;
- /* Iterate over all threads */
- for (i = 0;i < thrd_lists->size();i++) {
- const ModelAction *write_after_read = NULL;
-
- /* Iterate over actions in thread, starting from most recent */
- action_list_t *list = &(*thrd_lists)[i];
- sllnode<ModelAction *>* rit;
- for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
- ModelAction *act = rit->getVal();
-
- /* Don't disallow due to act == reader */
- if (!reader->happens_before(act) || reader == act)
- break;
- else if (act->is_write())
- write_after_read = act;
- else if (act->is_read() && act->get_reads_from() != NULL)
- write_after_read = act->get_reads_from();
- }
-
- if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
- return false;
- }
- return true;
-}
-
/**
* Computes the clock vector that happens before propagates from this write.
*
//operation that isn't release
if (rf->get_last_fence_release()) {
if (vec == NULL)
- vec = rf->get_last_fence_release()->get_cv();
+ vec = new ClockVector(rf->get_last_fence_release()->get_cv(), NULL);
else
(vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
+ } else {
+ if (vec == NULL) {
+ if (rf->is_rmw()) {
+ vec = new ClockVector(NULL, NULL);
+ }
+ } else {
+ vec = new ClockVector(vec, NULL);
+ }
}
rf->set_rfcv(vec);
}
{
int tid = id_to_int(act->get_tid());
if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
- action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
+ simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
act->setActionRef(list->add_back(act));
}
// Update action trace, a total order of all actions
- act->setTraceRef(action_trace.add_back(act));
+ action_trace.addAction(act);
// Update obj_thrd_map, a per location, per thread, order of actions
vec->resize(priv->next_thread_id);
for(uint i = oldsize;i < priv->next_thread_id;i++)
new (&(*vec)[i]) action_list_t();
+
+ fixup_action_list(vec);
}
if (!canprune && (act->is_read() || act->is_write()))
- act->setThrdMapRef((*vec)[tid].add_back(act));
+ (*vec)[tid].addAction(act);
// Update thrd_last_action, the last action taken by each thread
if ((int)thrd_last_action.size() <= tid)
}
}
-sllnode<ModelAction *>* insertIntoActionList(action_list_t *list, ModelAction *act) {
- sllnode<ModelAction*> * rit = list->end();
- modelclock_t next_seq = act->get_seq_number();
- if (rit == NULL || (rit->getVal()->get_seq_number() <= next_seq))
- return list->add_back(act);
- else {
- for(;rit != NULL;rit=rit->getPrev()) {
- if (rit->getVal()->get_seq_number() <= next_seq) {
- return list->insertAfter(rit, act);
- }
- }
- return list->add_front(act);
- }
+void insertIntoActionList(action_list_t *list, ModelAction *act) {
+ list->addAction(act);
}
-sllnode<ModelAction *>* insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
- sllnode<ModelAction*> * rit = list->end();
- modelclock_t next_seq = act->get_seq_number();
- if (rit == NULL) {
- act->create_cv(NULL);
- return list->add_back(act);
- } else if (rit->getVal()->get_seq_number() <= next_seq) {
- act->create_cv(rit->getVal());
- return list->add_back(act);
- } else {
- for(;rit != NULL;rit=rit->getPrev()) {
- if (rit->getVal()->get_seq_number() <= next_seq) {
- act->create_cv(rit->getVal());
- return list->insertAfter(rit, act);
- }
- }
- return list->add_front(act);
- }
+void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
+ act->create_cv(NULL);
+ list->addAction(act);
}
/**
void ModelExecution::add_normal_write_to_lists(ModelAction *act)
{
int tid = id_to_int(act->get_tid());
- act->setTraceRef(insertIntoActionListAndSetCV(&action_trace, act));
+ insertIntoActionListAndSetCV(&action_trace, act);
// Update obj_thrd_map, a per location, per thread, order of actions
SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
vec->resize(priv->next_thread_id);
for(uint i=oldsize;i<priv->next_thread_id;i++)
new (&(*vec)[i]) action_list_t();
+
+ fixup_action_list(vec);
}
- act->setThrdMapRef(insertIntoActionList(&(*vec)[tid],act));
+ insertIntoActionList(&(*vec)[tid],act);
ModelAction * lastact = thrd_last_action[tid];
// Update thrd_last_action, the last action taken by each thrad
void ModelExecution::add_write_to_lists(ModelAction *write) {
- SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
+ SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
int tid = id_to_int(write->get_tid());
if (tid >= (int)vec->size()) {
uint oldsize =vec->size();
vec->resize(priv->next_thread_id);
for(uint i=oldsize;i<priv->next_thread_id;i++)
- new (&(*vec)[i]) action_list_t();
+ new (&(*vec)[i]) simple_action_list_t();
}
write->setActionRef((*vec)[tid].add_back(write));
}
ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
{
/* All fences should have location FENCE_LOCATION */
- action_list_t *list = obj_map.get(FENCE_LOCATION);
+ simple_action_list_t *list = obj_map.get(FENCE_LOCATION);
if (!list)
return NULL;
{
void *location = curr->get_location();
- action_list_t *list = obj_map.get(location);
+ simple_action_list_t *list = obj_map.get(location);
if (list == NULL)
return NULL;
*/
SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
{
- SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
+ SnapVector<simple_action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
unsigned int i;
ASSERT(curr->is_read());
if (thrd_lists != NULL)
for (i = 0;i < thrd_lists->size();i++) {
/* Iterate over actions in thread, starting from most recent */
- action_list_t *list = &(*thrd_lists)[i];
+ simple_action_list_t *list = &(*thrd_lists)[i];
sllnode<ModelAction *> * rit;
for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
ModelAction *act = rit->getVal();
}
+void ModelExecution::print_tail()
+{
+ model_print("Execution trace %d:\n", get_execution_number());
+
+ sllnode<ModelAction*> *it;
+
+ model_print("------------------------------------------------------------------------------------\n");
+ model_print("# t Action type MO Location Value Rf CV\n");
+ model_print("------------------------------------------------------------------------------------\n");
+
+ unsigned int hash = 0;
+
+ int length = 25;
+ int counter = 0;
+ SnapList<ModelAction *> list;
+ for (it = action_trace.end();it != NULL;it = it->getPrev()) {
+ if (counter > length)
+ break;
+
+ ModelAction * act = it->getVal();
+ list.push_front(act);
+ counter++;
+ }
+
+ for (it = list.begin();it != NULL;it=it->getNext()) {
+ const ModelAction *act = it->getVal();
+ if (act->get_seq_number() > 0)
+ act->print();
+ hash = hash^(hash<<3)^(it->getVal()->hash());
+ }
+ model_print("HASH %u\n", hash);
+ model_print("------------------------------------------------------------------------------------\n");
+}
+
/**
* Add a Thread to the system for the first time. Should only be called once
* per thread.
* @return A Thread reference
*/
Thread * ModelExecution::get_pthread(pthread_t pid) {
+ // pid 1 is reserved for the main thread, pthread ids should start from 2
+ if (pid == 1)
+ return get_thread(pid);
+
union {
pthread_t p;
uint32_t v;
} x;
x.p = pid;
uint32_t thread_id = x.v;
- if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
- else return NULL;
+
+ if (thread_id < pthread_counter + 1)
+ return pthread_map[thread_id];
+ else
+ return NULL;
}
/**
ASSERT(curr);
/* Process this action in ModelHistory for records */
-#ifdef NEWFUZZER
- model->get_history()->process_action( curr, curr->get_tid() );
-#endif
if (curr_thrd->is_blocked() || curr_thrd->is_complete())
scheduler->remove_thread(curr_thrd);
void ModelExecution::removeAction(ModelAction *act) {
{
- sllnode<ModelAction *> * listref = act->getTraceRef();
- if (listref != NULL) {
- action_trace.erase(listref);
- }
+ action_trace.removeAction(act);
}
{
- sllnode<ModelAction *> * listref = act->getThrdMapRef();
- if (listref != NULL) {
- SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
- (*vec)[act->get_tid()].erase(listref);
- }
+ SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
+ (*vec)[act->get_tid()].removeAction(act);
}
if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
sllnode<ModelAction *> * listref = act->getActionRef();
if (listref != NULL) {
- action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
+ simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
list->erase(listref);
}
} else if (act->is_wait()) {
} else if (act->is_free()) {
sllnode<ModelAction *> * listref = act->getActionRef();
if (listref != NULL) {
- SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
+ SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
(*vec)[act->get_tid()].erase(listref);
}
+
//Clear it from last_sc_map
if (obj_last_sc_map.get(act->get_location()) == act) {
obj_last_sc_map.remove(act->get_location());
//Thread 0 isn't a real thread, so skip it..
for(unsigned int i = 1;i < thread_map.size();i++) {
Thread * t = thread_map[i];
- if (t->get_state() == THREAD_COMPLETED)
+ if (t->is_complete())
continue;
thread_id_t tid = int_to_id(i);
ClockVector * cv = get_cv(tid);
CycleNode * prevnode = node->getInEdge(i);
ModelAction * prevact = prevnode->getAction();
if (prevact->get_type() != READY_FREE) {
- // Save the original action type
- prevact->set_original_type(prevact->get_type());
prevact->set_free();
queue->push_back(prevnode);
}
if (act->is_read()) {
if (act->get_reads_from()->is_free()) {
if (act->is_rmw()) {
- // Save the original action type
- act->set_original_type(act->get_type());
//Weaken a RMW from a freed store to a write
act->set_type(ATOMIC_WRITE);
} else {
fixupLastAct(act);
}
- // Only delete this action if not being using by ModelHistory.
- // Otherwise, the deletion of action is deferred.
- if (act->get_func_ref_count() == 0) {
- delete act;
- continue;
- }
+ delete act;
+ continue;
}
}
}
if (act->is_read()) {
if (act->get_reads_from()->is_free()) {
if (act->is_rmw()) {
- // Save the original action type
- act->set_original_type(act->get_type());
act->set_type(ATOMIC_WRITE);
} else {
removeAction(act);
if (islastact) {
fixupLastAct(act);
}
- if (act->get_func_ref_count() == 0) {
- delete act;
- continue;
- }
+ delete act;
+ continue;
}
}
} else if (act->is_free()) {
if (islastact) {
fixupLastAct(act);
}
- if (act->get_func_ref_count() == 0) {
- delete act;
- continue;
- }
+ delete act;
+ continue;
} else if (act->is_write()) {
//Do nothing with write that hasn't been marked to be freed
} else if (islastact) {