#include "cyclegraph.h"
#include "promise.h"
#include "datarace.h"
+#include "mutex.h"
#define INITIAL_THREAD_ID 0
action_trace(new action_list_t()),
thread_map(new HashTable<int, Thread *, int>()),
obj_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
+ lock_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
obj_thrd_map(new HashTable<void *, std::vector<action_list_t>, uintptr_t, 4 >()),
promises(new std::vector<Promise *>()),
futurevalues(new std::vector<struct PendingFutureValue>()),
- lazy_sync_with_release(new HashTable<void *, std::list<ModelAction *>, uintptr_t, 4>()),
+ lazy_sync_with_release(new HashTable<void *, action_list_t, uintptr_t, 4>()),
thrd_last_action(new std::vector<ModelAction *>(1)),
node_stack(new NodeStack()),
mo_graph(new CycleGraph()),
delete obj_thrd_map;
delete obj_map;
+ delete lock_waiters_map;
delete action_trace;
for (unsigned int i = 0; i < promises->size(); i++)
{
thread_id_t tid;
- /* Do not split atomic actions. */
- if (curr->is_rmwr())
- return thread_current();
- /* The THREAD_CREATE action points to the created Thread */
- else if (curr->get_type() == THREAD_CREATE)
- return (Thread *)curr->get_location();
+ if (curr!=NULL) {
+ /* Do not split atomic actions. */
+ if (curr->is_rmwr())
+ return thread_current();
+ /* The THREAD_CREATE action points to the created Thread */
+ else if (curr->get_type() == THREAD_CREATE)
+ return (Thread *)curr->get_location();
+ }
/* Have we completed exploring the preselected path? */
if (diverge == NULL)
{
action_type type = act->get_type();
- switch (type) {
- case ATOMIC_READ:
- case ATOMIC_WRITE:
- case ATOMIC_RMW:
- break;
- default:
- return NULL;
- }
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map->get_safe_ptr(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (act->is_synchronizing(prev))
- return prev;
+ if (type==ATOMIC_READ||type==ATOMIC_WRITE||type==ATOMIC_RMW) {
+ /* linear search: from most recent to oldest */
+ action_list_t *list = obj_map->get_safe_ptr(act->get_location());
+ action_list_t::reverse_iterator rit;
+ for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ ModelAction *prev = *rit;
+ if (act->is_synchronizing(prev))
+ return prev;
+ }
+ } else if (type==ATOMIC_LOCK||type==ATOMIC_TRYLOCK) {
+ /* linear search: from most recent to oldest */
+ action_list_t *list = obj_map->get_safe_ptr(act->get_location());
+ action_list_t::reverse_iterator rit;
+ for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ ModelAction *prev = *rit;
+ if (prev->is_success_lock())
+ return prev;
+ }
+ } else if (type==ATOMIC_UNLOCK) {
+ /* linear search: from most recent to oldest */
+ action_list_t *list = obj_map->get_safe_ptr(act->get_location());
+ action_list_t::reverse_iterator rit;
+ for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ ModelAction *prev = *rit;
+ if (prev->is_failed_trylock())
+ return prev;
+ }
}
return NULL;
}
}
}
+void ModelChecker::process_mutex(ModelAction *curr) {
+ std::mutex * mutex=(std::mutex *) curr->get_location();
+ struct std::mutex_state * state=mutex->get_state();
+ switch(curr->get_type()) {
+ case ATOMIC_TRYLOCK: {
+ bool success=!state->islocked;
+ curr->set_try_lock(success);
+ if (!success) {
+ get_thread(curr)->set_return_value(0);
+ break;
+ }
+ get_thread(curr)->set_return_value(1);
+ }
+ //otherwise fall into the lock case
+ case ATOMIC_LOCK: {
+ if (curr->get_cv()->getClock(state->alloc_tid)<=state->alloc_clock) {
+ printf("Lock access before initialization\n");
+ set_assert();
+ }
+ state->islocked=true;
+ ModelAction *unlock=get_last_unlock(curr);
+ //synchronize with the previous unlock statement
+ if ( unlock != NULL )
+ curr->synchronize_with(unlock);
+ break;
+ }
+ case ATOMIC_UNLOCK: {
+ //unlock the lock
+ state->islocked=false;
+ //wake up the other threads
+ action_list_t * waiters = lock_waiters_map->get_safe_ptr(curr->get_location());
+ //activate all the waiting threads
+ for(action_list_t::iterator rit = waiters->begin(); rit!=waiters->end(); rit++) {
+ add_thread(get_thread((*rit)->get_tid()));
+ }
+ waiters->clear();
+ break;
+ }
+ default:
+ ASSERT(0);
+ }
+}
+
+
/**
* Process a write ModelAction
* @param curr The ModelAction to process
return newcurr;
}
- newcurr = node_stack->explore_action(curr);
+ newcurr = node_stack->explore_action(curr, scheduler->get_enabled());
if (newcurr) {
/* First restore type and order in case of RMW operation */
if (curr->is_rmwr())
newcurr->copy_typeandorder(curr);
+ ASSERT(curr->get_location()==newcurr->get_location());
+
/* Discard duplicate ModelAction; use action from NodeStack */
delete curr;
return newcurr;
}
+bool ModelChecker::check_action_enabled(ModelAction *curr) {
+ if (curr->is_lock()) {
+ std::mutex * lock=(std::mutex *) curr->get_location();
+ struct std::mutex_state * state = lock->get_state();
+ if (state->islocked) {
+ //Stick the action in the appropriate waiting queue
+ lock_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
+ return false;
+ }
+ }
+
+ return true;
+}
+
/**
* This is the heart of the model checker routine. It performs model-checking
* actions corresponding to a given "current action." Among other processes, it
bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
+ if (!check_action_enabled(curr)) {
+ //we'll make the execution look like we chose to run this action
+ //much later...when a lock is actually available to relese
+ get_current_thread()->set_pending(curr);
+ remove_thread(get_current_thread());
+ return get_next_thread(NULL);
+ }
+
ModelAction *newcurr = initialize_curr_action(curr);
/* Add the action to lists before any other model-checking tasks */
build_reads_from_past(curr);
curr = newcurr;
+ /* Add the action to lists before any other model-checking tasks */
+ if (!second_part_of_rmw)
+ add_action_to_lists(newcurr);
+
+ /* Build may_read_from set for newly-created actions */
+ if (curr == newcurr && curr->is_read())
+ build_reads_from_past(curr);
+ curr = newcurr;
+
/* Thread specific actions */
switch (curr->get_type()) {
case THREAD_CREATE: {
if (act->is_write() && process_write(act))
updated = true;
+ if (act->is_mutex_op())
+ process_mutex(act);
+
if (updated)
work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
break;
complete = release_seq_head(rf, release_heads);
if (!complete) {
/* add act to 'lazy checking' list */
- std::list<ModelAction *> *list;
+ action_list_t *list;
list = lazy_sync_with_release->get_safe_ptr(act->get_location());
list->push_back(act);
(*lazy_sync_size)++;
*/
bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
{
- std::list<ModelAction *> *list;
+ action_list_t *list;
list = lazy_sync_with_release->getptr(location);
if (!list)
return false;
bool updated = false;
- std::list<ModelAction *>::iterator it = list->begin();
+ action_list_t::iterator it = list->begin();
while (it != list->end()) {
ModelAction *act = *it;
const ModelAction *rf = act->get_reads_from();
ModelAction * ModelChecker::get_last_action(thread_id_t tid)
{
- int nthreads = get_num_threads();
- if ((int)thrd_last_action->size() < nthreads)
- thrd_last_action->resize(nthreads);
- return (*thrd_last_action)[id_to_int(tid)];
+ int threadid=id_to_int(tid);
+ if (threadid<(int)thrd_last_action->size())
+ return (*thrd_last_action)[id_to_int(tid)];
+ else
+ return NULL;
}
/**
return NULL;
}
+ModelAction * ModelChecker::get_last_unlock(ModelAction *curr)
+{
+ void *location = curr->get_location();
+ action_list_t *list = obj_map->get_safe_ptr(location);
+ /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
+ action_list_t::reverse_iterator rit;
+ for (rit = list->rbegin(); rit != list->rend(); rit++)
+ if ((*rit)->is_unlock())
+ return *rit;
+ return NULL;
+}
+
ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
{
ModelAction *parent = get_last_action(tid);
* @return Returns true (success) if a step was taken and false otherwise.
*/
bool ModelChecker::take_step() {
- Thread *curr, *next;
-
if (has_asserted())
return false;
- curr = thread_current();
+ Thread * curr = thread_current();
if (curr) {
if (curr->get_state() == THREAD_READY) {
ASSERT(priv->current_action);
priv->nextThread = check_current_action(priv->current_action);
priv->current_action = NULL;
- if (!curr->is_blocked() && !curr->is_complete())
- scheduler->add_thread(curr);
+ if (curr->is_blocked() || curr->is_complete())
+ scheduler->remove_thread(curr);
} else {
ASSERT(false);
}
}
- next = scheduler->next_thread(priv->nextThread);
+ Thread * next = scheduler->next_thread(priv->nextThread);
/* Infeasible -> don't take any more steps */
if (!isfeasible())
/* next == NULL -> don't take any more steps */
if (!next)
return false;
+
+ if ( next->get_pending() != NULL ) {
+ //restart a pending action
+ set_current_action(next->get_pending());
+ next->set_pending(NULL);
+ next->set_state(THREAD_READY);
+ return true;
+ }
+
/* Return false only if swap fails with an error */
return (Thread::swap(&system_context, next) == 0);
}