#define INITIAL_THREAD_ID 0
+#ifdef COLLECT_STAT
+static unsigned int atomic_load_count = 0;
+static unsigned int atomic_store_count = 0;
+static unsigned int atomic_rmw_count = 0;
+
+static unsigned int atomic_fence_count = 0;
+static unsigned int atomic_lock_count = 0;
+static unsigned int atomic_trylock_count = 0;
+static unsigned int atomic_unlock_count = 0;
+static unsigned int atomic_notify_count = 0;
+static unsigned int atomic_wait_count = 0;
+static unsigned int atomic_timedwait_count = 0;
+#endif
+
/**
* Structure for holding small ModelChecker members that should be snapshotted
*/
* action lists may change. Hence we need to fix the parent pointer of the children
* of root.
*/
-static void fixup_action_list (SnapVector<action_list_t> * vec)
+static void fixup_action_list(SnapVector<action_list_t> * vec)
{
- for (uint i = 0; i < vec->size(); i++) {
+ for (uint i = 0;i < vec->size();i++) {
action_list_t * list = &(*vec)[i];
if (list != NULL)
list->fixupParent();
}
}
+#ifdef COLLECT_STAT
+static inline void record_atomic_stats(ModelAction * act)
+{
+ switch (act->get_type()) {
+ case ATOMIC_WRITE:
+ atomic_store_count++;
+ break;
+ case ATOMIC_RMW:
+ atomic_load_count++;
+ break;
+ case ATOMIC_READ:
+ atomic_rmw_count++;
+ break;
+ case ATOMIC_FENCE:
+ atomic_fence_count++;
+ break;
+ case ATOMIC_LOCK:
+ atomic_lock_count++;
+ break;
+ case ATOMIC_TRYLOCK:
+ atomic_trylock_count++;
+ break;
+ case ATOMIC_UNLOCK:
+ atomic_unlock_count++;
+ break;
+ case ATOMIC_NOTIFY_ONE:
+ case ATOMIC_NOTIFY_ALL:
+ atomic_notify_count++;
+ break;
+ case ATOMIC_WAIT:
+ atomic_wait_count++;
+ break;
+ case ATOMIC_TIMEDWAIT:
+ atomic_timedwait_count++;
+ default:
+ return;
+ }
+}
+
+void print_atomic_accesses()
+{
+ model_print("atomic store count: %u\n", atomic_store_count);
+ model_print("atomic load count: %u\n", atomic_load_count);
+ model_print("atomic rmw count: %u\n", atomic_rmw_count);
+
+ model_print("atomic fence count: %u\n", atomic_fence_count);
+ model_print("atomic lock count: %u\n", atomic_lock_count);
+ model_print("atomic trylock count: %u\n", atomic_trylock_count);
+ model_print("atomic unlock count: %u\n", atomic_unlock_count);
+ model_print("atomic notify count: %u\n", atomic_notify_count);
+ model_print("atomic wait count: %u\n", atomic_wait_count);
+ model_print("atomic timedwait count: %u\n", atomic_timedwait_count);
+}
+#endif
/** @return a thread ID for a new Thread */
thread_id_t ModelExecution::get_next_id()
{
bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
{
const ModelAction *asleep = thread->get_pending();
- /* Don't allow partial RMW to wake anyone up */
- if (curr->is_rmwr())
- return false;
- /* Synchronizing actions may have been backtracked */
- if (asleep->could_synchronize_with(curr))
- return true;
- /* All acquire/release fences and fence-acquire/store-release */
- if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
- return true;
- /* Fence-release + store can awake load-acquire on the same location */
- if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
- ModelAction *fence_release = get_last_fence_release(curr->get_tid());
- if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
- return true;
- }
+
/* The sleep is literally sleeping */
if (asleep->is_sleep()) {
if (fuzzer->shouldWake(asleep))
void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
{
for (unsigned int i = 0;i < get_num_threads();i++) {
- Thread *thr = get_thread(int_to_id(i));
- if (scheduler->is_sleep_set(thr)) {
+ thread_id_t tid = int_to_id(i);
+ if (scheduler->is_sleep_set(tid)) {
+ Thread *thr = get_thread(tid);
if (should_wake_up(curr, thr)) {
/* Remove this thread from sleep set */
scheduler->remove_sleep(thr);
mo_graph->addEdge((*priorset)[i], rf);
}
read_from(curr, rf);
- get_thread(curr)->set_return_value(curr->get_return_value());
+ get_thread(curr)->set_return_value(rf->get_write_value());
delete priorset;
//Update acquire fence clock vector
- ClockVector * hbcv = get_hb_from_write(curr->get_reads_from());
+ ClockVector * hbcv = get_hb_from_write(rf);
if (hbcv != NULL)
get_thread(curr)->get_acq_fence_cv()->merge(hbcv);
return canprune && (curr->get_type() == ATOMIC_READ);
case ATOMIC_WAIT: {
//TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
if (fuzzer->shouldWait(curr)) {
+ Thread *curr_thrd = get_thread(curr);
/* wake up the other threads */
for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *t = get_thread(int_to_id(i));
- Thread *curr_thrd = get_thread(curr);
if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
scheduler->wake(t);
}
/* remove old wait action and disable this thread */
simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
- for (sllnode<ModelAction *> * it = waiters->begin(); it != NULL; it = it->getNext()) {
+ for (sllnode<ModelAction *> * it = waiters->begin();it != NULL;it = it->getNext()) {
ModelAction * wait = it->getVal();
if (wait->get_tid() == curr->get_tid()) {
waiters->erase(it);
}
waiters->push_back(curr);
- scheduler->sleep(get_thread(curr));
+ scheduler->sleep(curr_thrd);
}
break;
// TODO: lock count for recursive mutexes
/* wake up the other threads */
+ Thread *curr_thrd = get_thread(curr);
for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *t = get_thread(int_to_id(i));
- Thread *curr_thrd = get_thread(curr);
if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
scheduler->wake(t);
}
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- break; // WL: to be add (modified)
+ break;
}
case THREADONLY_FINISH:
* @return a bool that indicates whether the action is enabled.
*/
bool ModelExecution::check_action_enabled(ModelAction *curr) {
- if (curr->is_lock()) {
+ switch (curr->get_type()) {
+ case ATOMIC_LOCK: {
cdsc::mutex *lock = curr->get_mutex();
struct cdsc::mutex_state *state = lock->get_state();
if (state->locked) {
return false;
}
- } else if (curr->is_thread_join()) {
+ break;
+ }
+ case THREAD_JOIN:
+ case PTHREAD_JOIN: {
Thread *blocking = curr->get_thread_operand();
if (!blocking->is_complete()) {
return false;
}
- } else if (curr->is_sleep()) {
+ break;
+ }
+ case THREAD_SLEEP: {
if (!fuzzer->shouldSleep(curr))
return false;
+ break;
+ }
+ default:
+ return true;
}
return true;
ModelAction * ModelExecution::check_current_action(ModelAction *curr)
{
ASSERT(curr);
- bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
bool newly_explored = initialize_curr_action(&curr);
DBG();
wake_up_sleeping_actions(curr);
SnapVector<ModelAction *> * rf_set = NULL;
+ bool canprune = false;
/* Build may_read_from set for newly-created actions */
- if (newly_explored && curr->is_read())
+ if (curr->is_read() && newly_explored) {
rf_set = build_may_read_from(curr);
-
- bool canprune = false;
-
- if (curr->is_read() && !second_part_of_rmw) {
canprune = process_read(curr, rf_set);
delete rf_set;
} else
ASSERT(rf_set == NULL);
- /* Add the action to lists */
- if (!second_part_of_rmw)
+ /* Add the action to lists if not the second part of a rmw */
+ if (newly_explored) {
+#ifdef COLLECT_STAT
+ record_atomic_stats(curr);
+#endif
add_action_to_lists(curr, canprune);
+ }
if (curr->is_write())
add_write_to_lists(curr);
*/
bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
- SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
+ SnapVector<ModelAction *> * priorset, bool * canprune)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
ASSERT(curr->is_read());
*act < *last_sc_fence_thread_local) {
if (mo_graph->checkReachable(rf, act))
return false;
- if (!check_only)
- priorset->push_back(act);
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 4 */
*act < *last_sc_fence_local) {
if (mo_graph->checkReachable(rf, act))
return false;
- if (!check_only)
- priorset->push_back(act);
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 6 */
*act < *last_sc_fence_thread_before) {
if (mo_graph->checkReachable(rf, act))
return false;
- if (!check_only)
- priorset->push_back(act);
+ priorset->push_back(act);
break;
}
}
if (act->is_write()) {
if (mo_graph->checkReachable(rf, act))
return false;
- if (!check_only)
- priorset->push_back(act);
+ priorset->push_back(act);
} else {
ModelAction *prevrf = act->get_reads_from();
if (!prevrf->equals(rf)) {
if (mo_graph->checkReachable(rf, prevrf))
return false;
- if (!check_only)
- priorset->push_back(prevrf);
+ priorset->push_back(prevrf);
} else {
if (act->get_tid() == curr->get_tid()) {
//Can prune curr from obj list
- if (!check_only)
- *canprune = true;
+ *canprune = true;
}
}
}
}
-/**
- * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
- * some constraints. This method checks one the following constraint (others
- * require compiler support):
- *
- * If X --hb-> Y --mo-> Z, then X should not read from Z.
- * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
- */
-bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
-{
- SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
- unsigned int i;
- /* Iterate over all threads */
- for (i = 0;i < thrd_lists->size();i++) {
- const ModelAction *write_after_read = NULL;
-
- /* Iterate over actions in thread, starting from most recent */
- action_list_t *list = &(*thrd_lists)[i];
- sllnode<ModelAction *>* rit;
- for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
- ModelAction *act = rit->getVal();
-
- /* Don't disallow due to act == reader */
- if (!reader->happens_before(act) || reader == act)
- break;
- else if (act->is_write())
- write_after_read = act;
- else if (act->is_read() && act->get_reads_from() != NULL)
- write_after_read = act->get_reads_from();
- }
-
- if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
- return false;
- }
- return true;
-}
-
/**
* Computes the clock vector that happens before propagates from this write.
*
//operation that isn't release
if (rf->get_last_fence_release()) {
if (vec == NULL)
- vec = rf->get_last_fence_release()->get_cv();
+ vec = new ClockVector(rf->get_last_fence_release()->get_cv(), NULL);
else
(vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
+ } else {
+ if (vec == NULL) {
+ if (rf->is_rmw()) {
+ vec = new ClockVector(NULL, NULL);
+ }
+ } else {
+ vec = new ClockVector(vec, NULL);
+ }
}
rf->set_rfcv(vec);
}
//Thread 0 isn't a real thread, so skip it..
for(unsigned int i = 1;i < thread_map.size();i++) {
Thread * t = thread_map[i];
- if (t->get_state() == THREAD_COMPLETED)
+ if (t->is_complete())
continue;
thread_id_t tid = int_to_id(i);
ClockVector * cv = get_cv(tid);