X-Git-Url: http://plrg.eecs.uci.edu/git/?p=c11tester.git;a=blobdiff_plain;f=pthread.cc;h=4f2dcd0ee7e00665274c239533234cb31499bc6c;hp=06e293de6e6bba07b821ea97065484363176dde9;hb=0dbf7f2dd8d6cb6ffdbb5ffaf6329e9ca4b8d913;hpb=d636a580c23766d2734cb3bca516b2f25177bae5 diff --git a/pthread.cc b/pthread.cc index 06e293de..4f2dcd0e 100644 --- a/pthread.cc +++ b/pthread.cc @@ -2,7 +2,11 @@ #include "threads-model.h" #include "action.h" #include "pthread.h" -#include + +#include "snapshot-interface.h" +#include "datarace.h" + +#include "mutex.h" #include #include @@ -11,7 +15,7 @@ #include "execution.h" int pthread_create(pthread_t *t, const pthread_attr_t * attr, - pthread_start_t start_routine, void * arg) { + pthread_start_t start_routine, void * arg) { struct pthread_params params = { start_routine, arg }; ModelAction *act = new ModelAction(PTHREAD_CREATE, std::memory_order_seq_cst, t, (uint64_t)¶ms); @@ -33,53 +37,92 @@ int pthread_join(pthread_t t, void **value_ptr) { // store return value void *rtval = th->get_pthread_return(); *value_ptr = rtval; - } + } return 0; } void pthread_exit(void *value_ptr) { Thread * th = thread_current(); model->switch_to_master(new ModelAction(THREAD_FINISH, std::memory_order_seq_cst, th)); + while(1) ;//make warning goaway } int pthread_mutex_init(pthread_mutex_t *p_mutex, const pthread_mutexattr_t *) { - std::mutex *m = new std::mutex(); + if (!model) { + model = new ModelChecker(); + } + + cdsc::mutex *m = new cdsc::mutex(); ModelExecution *execution = model->get_execution(); - execution->mutex_map.put(p_mutex, m); + execution->getMutexMap()->put(p_mutex, m); return 0; } int pthread_mutex_lock(pthread_mutex_t *p_mutex) { ModelExecution *execution = model->get_execution(); - std::mutex *m = execution->mutex_map.get(p_mutex); - m->lock(); - /* error message? */ + + /* to protect the case where PTHREAD_MUTEX_INITIALIZER is used + instead of pthread_mutex_init, or where *p_mutex is not stored + in the execution->mutex_map for some reason. */ + if (!execution->getMutexMap()->contains(p_mutex)) { + pthread_mutex_init(p_mutex, NULL); + } + + cdsc::mutex *m = execution->getMutexMap()->get(p_mutex); + + if (m != NULL) { + m->lock(); + } else { + printf("ah\n"); + } + return 0; } + int pthread_mutex_trylock(pthread_mutex_t *p_mutex) { ModelExecution *execution = model->get_execution(); - std::mutex *m = execution->mutex_map.get(p_mutex); + cdsc::mutex *m = execution->getMutexMap()->get(p_mutex); return m->try_lock(); } -int pthread_mutex_unlock(pthread_mutex_t *p_mutex) { +int pthread_mutex_unlock(pthread_mutex_t *p_mutex) { ModelExecution *execution = model->get_execution(); - std::mutex *m = execution->mutex_map.get(p_mutex); - m->unlock(); + cdsc::mutex *m = execution->getMutexMap()->get(p_mutex); + + if (m != NULL) { + m->unlock(); + } else { + printf("try to unlock an untracked pthread_mutex\n"); + } + return 0; } int pthread_mutex_timedlock (pthread_mutex_t *__restrict p_mutex, - const struct timespec *__restrict abstime) { - ModelExecution *execution = model->get_execution(); - std::mutex *m = execution->mutex_map.get(p_mutex); - m->lock(); + const struct timespec *__restrict abstime) { +// timedlock just gives the option of giving up the lock, so return and let the scheduler decide which thread goes next + +/* + ModelExecution *execution = model->get_execution(); + if (!execution->mutex_map.contains(p_mutex)) { + pthread_mutex_init(p_mutex, NULL); + } + cdsc::mutex *m = execution->mutex_map.get(p_mutex); + + if (m != NULL) { + m->lock(); + } else { + printf("something is wrong with pthread_mutex_timedlock\n"); + } + + printf("pthread_mutex_timedlock is called. It is currently implemented as a normal lock operation without no timeout\n"); + */ return 0; } pthread_t pthread_self() { Thread* th = model->get_current_thread(); - return th->get_id(); + return (pthread_t)th->get_id(); } int pthread_key_delete(pthread_key_t) { @@ -88,37 +131,51 @@ int pthread_key_delete(pthread_key_t) { } int pthread_cond_init(pthread_cond_t *p_cond, const pthread_condattr_t *attr) { - std::condition_variable *v = new std::condition_variable(); + cdsc::condition_variable *v = new cdsc::condition_variable(); ModelExecution *execution = model->get_execution(); - execution->cond_map.put(p_cond, v); + execution->getCondMap()->put(p_cond, v); return 0; } int pthread_cond_wait(pthread_cond_t *p_cond, pthread_mutex_t *p_mutex) { ModelExecution *execution = model->get_execution(); - std::condition_variable *v = execution->cond_map.get(p_cond); - std::mutex *m = execution->mutex_map.get(p_mutex); + if ( !execution->getCondMap()->contains(p_cond) ) + pthread_cond_init(p_cond, NULL); + + cdsc::condition_variable *v = execution->getCondMap()->get(p_cond); + cdsc::mutex *m = execution->getMutexMap()->get(p_mutex); v->wait(*m); return 0; - } -int pthread_cond_timedwait(pthread_cond_t *p_cond, - pthread_mutex_t *p_mutex, const struct timespec *abstime) { +int pthread_cond_timedwait(pthread_cond_t *p_cond, + pthread_mutex_t *p_mutex, const struct timespec *abstime) { +// implement cond_timedwait as a noop and let the scheduler decide which thread goes next ModelExecution *execution = model->get_execution(); - std::condition_variable *v = execution->cond_map.get(p_cond); - std::mutex *m = execution->mutex_map.get(p_mutex); - v->wait(*m); + if ( !execution->getCondMap()->contains(p_cond) ) + pthread_cond_init(p_cond, NULL); + if ( !execution->getMutexMap()->contains(p_mutex) ) + pthread_mutex_init(p_mutex, NULL); + + cdsc::condition_variable *v = execution->getCondMap()->get(p_cond); + cdsc::mutex *m = execution->getMutexMap()->get(p_mutex); + + model->switch_to_master(new ModelAction(NOOP, std::memory_order_seq_cst, v)); +// v->wait(*m); +// printf("timed_wait called\n"); return 0; } int pthread_cond_signal(pthread_cond_t *p_cond) { // notify only one blocked thread ModelExecution *execution = model->get_execution(); - std::condition_variable *v = execution->cond_map.get(p_cond); + if ( !execution->getCondMap()->contains(p_cond) ) + pthread_cond_init(p_cond, NULL); + + cdsc::condition_variable *v = execution->getCondMap()->get(p_cond); v->notify_one(); return 0;