#include <algorithm>
#include <new>
#include <stdarg.h>
+#include <errno.h>
#include "model.h"
#include "execution.h"
#include "datarace.h"
#include "threads-model.h"
#include "bugmessage.h"
-#include "history.h"
#include "fuzzer.h"
-#include "newfuzzer.h"
-#define INITIAL_THREAD_ID 0
+#ifdef COLLECT_STAT
+static unsigned int atomic_load_count = 0;
+static unsigned int atomic_store_count = 0;
+static unsigned int atomic_rmw_count = 0;
+
+static unsigned int atomic_fence_count = 0;
+static unsigned int atomic_lock_count = 0;
+static unsigned int atomic_trylock_count = 0;
+static unsigned int atomic_unlock_count = 0;
+static unsigned int atomic_notify_count = 0;
+static unsigned int atomic_wait_count = 0;
+static unsigned int atomic_timedwait_count = 0;
+#endif
/**
* Structure for holding small ModelChecker members that should be snapshotted
thrd_last_fence_release(),
priv(new struct model_snapshot_members ()),
mo_graph(new CycleGraph()),
-#ifdef NEWFUZZER
- fuzzer(new NewFuzzer()),
-#else
fuzzer(new Fuzzer()),
-#endif
isfinished(false)
{
/* Initialize a model-checker thread, for special ModelActions */
/** @brief Destructor */
ModelExecution::~ModelExecution()
{
- for (unsigned int i = 0;i < get_num_threads();i++)
+ for (unsigned int i = INITIAL_THREAD_ID;i < get_num_threads();i++)
delete get_thread(int_to_id(i));
delete mo_graph;
* action lists may change. Hence we need to fix the parent pointer of the children
* of root.
*/
-static void fixup_action_list (SnapVector<action_list_t> * vec)
+static void fixup_action_list(SnapVector<action_list_t> * vec)
{
- for (uint i = 0; i < vec->size(); i++) {
+ for (uint i = 0;i < vec->size();i++) {
action_list_t * list = &(*vec)[i];
if (list != NULL)
list->fixupParent();
}
}
+#ifdef COLLECT_STAT
+static inline void record_atomic_stats(ModelAction * act)
+{
+ switch (act->get_type()) {
+ case ATOMIC_WRITE:
+ atomic_store_count++;
+ break;
+ case ATOMIC_RMW:
+ atomic_load_count++;
+ break;
+ case ATOMIC_READ:
+ atomic_rmw_count++;
+ break;
+ case ATOMIC_FENCE:
+ atomic_fence_count++;
+ break;
+ case ATOMIC_LOCK:
+ atomic_lock_count++;
+ break;
+ case ATOMIC_TRYLOCK:
+ atomic_trylock_count++;
+ break;
+ case ATOMIC_UNLOCK:
+ atomic_unlock_count++;
+ break;
+ case ATOMIC_NOTIFY_ONE:
+ case ATOMIC_NOTIFY_ALL:
+ atomic_notify_count++;
+ break;
+ case ATOMIC_WAIT:
+ atomic_wait_count++;
+ break;
+ case ATOMIC_TIMEDWAIT:
+ atomic_timedwait_count++;
+ default:
+ return;
+ }
+}
+
+void print_atomic_accesses()
+{
+ model_print("atomic store count: %u\n", atomic_store_count);
+ model_print("atomic load count: %u\n", atomic_load_count);
+ model_print("atomic rmw count: %u\n", atomic_rmw_count);
+
+ model_print("atomic fence count: %u\n", atomic_fence_count);
+ model_print("atomic lock count: %u\n", atomic_lock_count);
+ model_print("atomic trylock count: %u\n", atomic_trylock_count);
+ model_print("atomic unlock count: %u\n", atomic_unlock_count);
+ model_print("atomic notify count: %u\n", atomic_notify_count);
+ model_print("atomic wait count: %u\n", atomic_wait_count);
+ model_print("atomic timedwait count: %u\n", atomic_timedwait_count);
+}
+#endif
/** @return a thread ID for a new Thread */
thread_id_t ModelExecution::get_next_id()
{
* @param thread The thread that we might wake up
* @return True, if we should wake up the sleeping thread; false otherwise
*/
-bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
+bool ModelExecution::should_wake_up(const ModelAction * asleep) const
{
- const ModelAction *asleep = thread->get_pending();
- /* Don't allow partial RMW to wake anyone up */
- if (curr->is_rmwr())
- return false;
- /* Synchronizing actions may have been backtracked */
- if (asleep->could_synchronize_with(curr))
- return true;
- /* All acquire/release fences and fence-acquire/store-release */
- if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
- return true;
- /* Fence-release + store can awake load-acquire on the same location */
- if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
- ModelAction *fence_release = get_last_fence_release(curr->get_tid());
- if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
- return true;
- }
/* The sleep is literally sleeping */
- if (asleep->is_sleep()) {
- if (fuzzer->shouldWake(asleep))
- return true;
+ switch (asleep->get_type()) {
+ case THREAD_SLEEP:
+ if (fuzzer->shouldWake(asleep))
+ return true;
+ break;
+ case ATOMIC_WAIT:
+ case ATOMIC_TIMEDWAIT:
+ if (fuzzer->waitShouldWakeUp(asleep))
+ return true;
+ break;
+ default:
+ return false;
}
return false;
}
-void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
+void ModelExecution::wake_up_sleeping_actions()
{
- for (unsigned int i = 0;i < get_num_threads();i++) {
- Thread *thr = get_thread(int_to_id(i));
- if (scheduler->is_sleep_set(thr)) {
- if (should_wake_up(curr, thr)) {
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
+ thread_id_t tid = int_to_id(i);
+ if (scheduler->is_sleep_set(tid)) {
+ Thread *thr = get_thread(tid);
+ ModelAction * pending = thr->get_pending();
+ if (should_wake_up(pending)) {
/* Remove this thread from sleep set */
scheduler->remove_sleep(thr);
- if (thr->get_pending()->is_sleep())
+
+ if (pending->is_sleep()) {
+ thr->set_wakeup_state(true);
+ } else if (pending->is_wait()) {
thr->set_wakeup_state(true);
+ /* Remove this thread from list of waiters */
+ simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, pending->get_location());
+ for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
+ if (rit->getVal()->get_tid() == tid) {
+ waiters->erase(rit);
+ break;
+ }
+ }
+
+ /* Set ETIMEDOUT error */
+ if (pending->is_timedwait())
+ thr->set_return_value(ETIMEDOUT);
+ }
}
}
}
bool ModelExecution::is_deadlocked() const
{
bool blocking_threads = false;
- for (unsigned int i = 0;i < get_num_threads();i++) {
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
thread_id_t tid = int_to_id(i);
if (is_enabled(tid))
return false;
*/
bool ModelExecution::is_complete_execution() const
{
- for (unsigned int i = 0;i < get_num_threads();i++)
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++)
if (is_enabled(int_to_id(i)))
return false;
return true;
add_normal_write_to_lists(act);
add_write_to_lists(act);
w_modification_order(act);
-#ifdef NEWFUZZER
- model->get_history()->process_action(act, act->get_tid());
-#endif
return act;
}
mo_graph->addEdge((*priorset)[i], rf);
}
read_from(curr, rf);
- get_thread(curr)->set_return_value(curr->get_return_value());
+ get_thread(curr)->set_return_value(rf->get_write_value());
delete priorset;
//Update acquire fence clock vector
- ClockVector * hbcv = get_hb_from_write(curr->get_reads_from());
+ ClockVector * hbcv = get_hb_from_write(rf);
if (hbcv != NULL)
get_thread(curr)->get_acq_fence_cv()->merge(hbcv);
return canprune && (curr->get_type() == ATOMIC_READ);
break;
}
case ATOMIC_WAIT: {
- //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
- if (fuzzer->shouldWait(curr)) {
- /* wake up the other threads */
- for (unsigned int i = 0;i < get_num_threads();i++) {
- Thread *t = get_thread(int_to_id(i));
- Thread *curr_thrd = get_thread(curr);
- if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
- scheduler->wake(t);
- }
+ Thread *curr_thrd = get_thread(curr);
+ /* wake up the other threads */
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
+ Thread *t = get_thread(int_to_id(i));
+ if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
+ scheduler->wake(t);
+ }
- /* unlock the lock - after checking who was waiting on it */
- state->locked = NULL;
+ /* unlock the lock - after checking who was waiting on it */
+ state->locked = NULL;
- /* remove old wait action and disable this thread */
- simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
- for (sllnode<ModelAction *> * it = waiters->begin(); it != NULL; it = it->getNext()) {
- ModelAction * wait = it->getVal();
- if (wait->get_tid() == curr->get_tid()) {
- waiters->erase(it);
- break;
- }
- }
+ /* disable this thread */
+ simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
+ waiters->push_back(curr);
+ curr_thrd->set_pending(curr); // Forbid this thread to stash a new action
+
+ if (fuzzer->waitShouldFail(curr)) // If wait should fail spuriously,
+ scheduler->add_sleep(curr_thrd); // place this thread into THREAD_SLEEP_SET
+ else
+ scheduler->sleep(curr_thrd);
- waiters->push_back(curr);
- scheduler->sleep(get_thread(curr));
+ break;
+ }
+ case ATOMIC_TIMEDWAIT: {
+ Thread *curr_thrd = get_thread(curr);
+ if (!fuzzer->randomizeWaitTime(curr)) {
+ curr_thrd->set_return_value(ETIMEDOUT);
+ return false;
}
+ /* wake up the other threads */
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
+ Thread *t = get_thread(int_to_id(i));
+ if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
+ scheduler->wake(t);
+ }
+
+ /* unlock the lock - after checking who was waiting on it */
+ state->locked = NULL;
+
+ /* disable this thread */
+ simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
+ waiters->push_back(curr);
+ curr_thrd->set_pending(curr); // Forbid this thread to stash a new action
+ scheduler->add_sleep(curr_thrd);
break;
}
- case ATOMIC_TIMEDWAIT:
case ATOMIC_UNLOCK: {
- //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
- //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
- //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
- //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
- //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
- //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
- //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
- //MUST EVENMTUALLY RELEASE...
-
// TODO: lock count for recursive mutexes
/* wake up the other threads */
- for (unsigned int i = 0;i < get_num_threads();i++) {
+ Thread *curr_thrd = get_thread(curr);
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
Thread *t = get_thread(int_to_id(i));
- Thread *curr_thrd = get_thread(curr);
if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
scheduler->wake(t);
}
simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
//activate all the waiting threads
for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
- scheduler->wake(get_thread(rit->getVal()));
+ Thread * thread = get_thread(rit->getVal());
+ if (thread->get_state() != THREAD_COMPLETED)
+ scheduler->wake(thread);
+ thread->set_wakeup_state(true);
}
waiters->clear();
break;
simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
if (waiters->size() != 0) {
Thread * thread = fuzzer->selectNotify(waiters);
- scheduler->wake(thread);
+ if (thread->get_state() != THREAD_COMPLETED)
+ scheduler->wake(thread);
+ thread->set_wakeup_state(true);
}
break;
}
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- break; // WL: to be add (modified)
+ break;
}
case THREADONLY_FINISH:
}
/* Wake up any joining threads */
- for (unsigned int i = 0;i < get_num_threads();i++) {
+ for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
Thread *waiting = get_thread(int_to_id(i));
if (waiting->waiting_on() == th &&
waiting->get_pending()->is_thread_join())
* @return a bool that indicates whether the action is enabled.
*/
bool ModelExecution::check_action_enabled(ModelAction *curr) {
- if (curr->is_lock()) {
+ switch (curr->get_type()) {
+ case ATOMIC_LOCK: {
cdsc::mutex *lock = curr->get_mutex();
struct cdsc::mutex_state *state = lock->get_state();
if (state->locked) {
return false;
}
- } else if (curr->is_thread_join()) {
+ break;
+ }
+ case THREAD_JOIN:
+ case PTHREAD_JOIN: {
Thread *blocking = curr->get_thread_operand();
if (!blocking->is_complete()) {
return false;
}
- } else if (curr->is_sleep()) {
+ break;
+ }
+ case THREAD_SLEEP: {
if (!fuzzer->shouldSleep(curr))
return false;
+ break;
+ }
+ default:
+ return true;
}
return true;
ModelAction * ModelExecution::check_current_action(ModelAction *curr)
{
ASSERT(curr);
- bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
bool newly_explored = initialize_curr_action(&curr);
DBG();
- wake_up_sleeping_actions(curr);
+ wake_up_sleeping_actions();
SnapVector<ModelAction *> * rf_set = NULL;
+ bool canprune = false;
/* Build may_read_from set for newly-created actions */
- if (newly_explored && curr->is_read())
+ if (curr->is_read() && newly_explored) {
rf_set = build_may_read_from(curr);
-
- bool canprune = false;
-
- if (curr->is_read() && !second_part_of_rmw) {
canprune = process_read(curr, rf_set);
delete rf_set;
} else
ASSERT(rf_set == NULL);
- /* Add the action to lists */
- if (!second_part_of_rmw)
+ /* Add the action to lists if not the second part of a rmw */
+ if (newly_explored) {
+#ifdef COLLECT_STAT
+ record_atomic_stats(curr);
+#endif
add_action_to_lists(curr, canprune);
+ }
if (curr->is_write())
add_write_to_lists(curr);
*/
bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
- SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
+ SnapVector<ModelAction *> * priorset, bool * canprune)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
ASSERT(curr->is_read());
*act < *last_sc_fence_thread_local) {
if (mo_graph->checkReachable(rf, act))
return false;
- if (!check_only)
- priorset->push_back(act);
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 4 */
*act < *last_sc_fence_local) {
if (mo_graph->checkReachable(rf, act))
return false;
- if (!check_only)
- priorset->push_back(act);
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 6 */
*act < *last_sc_fence_thread_before) {
if (mo_graph->checkReachable(rf, act))
return false;
- if (!check_only)
- priorset->push_back(act);
+ priorset->push_back(act);
break;
}
}
if (act->is_write()) {
if (mo_graph->checkReachable(rf, act))
return false;
- if (!check_only)
- priorset->push_back(act);
+ priorset->push_back(act);
} else {
ModelAction *prevrf = act->get_reads_from();
if (!prevrf->equals(rf)) {
if (mo_graph->checkReachable(rf, prevrf))
return false;
- if (!check_only)
- priorset->push_back(prevrf);
+ priorset->push_back(prevrf);
} else {
if (act->get_tid() == curr->get_tid()) {
//Can prune curr from obj list
- if (!check_only)
- *canprune = true;
+ *canprune = true;
}
}
}
}
-/**
- * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
- * some constraints. This method checks one the following constraint (others
- * require compiler support):
- *
- * If X --hb-> Y --mo-> Z, then X should not read from Z.
- * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
- */
-bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
-{
- SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
- unsigned int i;
- /* Iterate over all threads */
- for (i = 0;i < thrd_lists->size();i++) {
- const ModelAction *write_after_read = NULL;
-
- /* Iterate over actions in thread, starting from most recent */
- action_list_t *list = &(*thrd_lists)[i];
- sllnode<ModelAction *>* rit;
- for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
- ModelAction *act = rit->getVal();
-
- /* Don't disallow due to act == reader */
- if (!reader->happens_before(act) || reader == act)
- break;
- else if (act->is_write())
- write_after_read = act;
- else if (act->is_read() && act->get_reads_from() != NULL)
- write_after_read = act->get_reads_from();
- }
-
- if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
- return false;
- }
- return true;
-}
-
/**
* Computes the clock vector that happens before propagates from this write.
*
//operation that isn't release
if (rf->get_last_fence_release()) {
if (vec == NULL)
- vec = rf->get_last_fence_release()->get_cv();
+ vec = new ClockVector(rf->get_last_fence_release()->get_cv(), NULL);
else
(vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
+ } else {
+ if (vec == NULL) {
+ if (rf->is_rmw()) {
+ vec = new ClockVector(NULL, NULL);
+ }
+ } else {
+ vec = new ClockVector(vec, NULL);
+ }
}
rf->set_rfcv(vec);
}
ASSERT(curr);
/* Process this action in ModelHistory for records */
-#ifdef NEWFUZZER
- model->get_history()->process_action( curr, curr->get_tid() );
-#endif
if (curr_thrd->is_blocked() || curr_thrd->is_complete())
scheduler->remove_thread(curr_thrd);
//Thread 0 isn't a real thread, so skip it..
for(unsigned int i = 1;i < thread_map.size();i++) {
Thread * t = thread_map[i];
- if (t->get_state() == THREAD_COMPLETED)
+ if (t->is_complete())
continue;
thread_id_t tid = int_to_id(i);
ClockVector * cv = get_cv(tid);