Implement both pthread_yield and c++ thread yield
[c11tester.git] / pthread.cc
index 63060d898eb2af5c8db5c995dd24a7c5f1e63710..4b5e3d37963dc3887bcfa24d764375c4d479f654 100644 (file)
 /* global "model" object */
 #include "model.h"
 #include "execution.h"
-extern "C" {
-int nanosleep(const struct timespec *rqtp, struct timespec *rmtp);
-}
-
-int nanosleep(const struct timespec *rqtp, struct timespec *rmtp) {
-       if (model) {
-               uint64_t time = rqtp->tv_sec * 1000000000 + rqtp->tv_nsec;
-               struct timespec currtime;
-               clock_gettime(CLOCK_MONOTONIC, &currtime);
-               uint64_t lcurrtime = currtime.tv_sec * 1000000000 + currtime.tv_nsec;
-               model->switch_to_master(new ModelAction(THREAD_SLEEP, std::memory_order_seq_cst, time, lcurrtime));
-               if (rmtp != NULL) {
-                       clock_gettime(CLOCK_MONOTONIC, &currtime);
-                       uint64_t lendtime = currtime.tv_sec * 1000000000 + currtime.tv_nsec;
-                       uint64_t elapsed = lendtime - lcurrtime;
-                       rmtp->tv_sec = elapsed / 1000000000;
-                       rmtp->tv_nsec = elapsed - rmtp->tv_sec * 1000000000;
-               }
-       }
-       return 0;
-}
+#include <errno.h>
 
 int pthread_create(pthread_t *t, const pthread_attr_t * attr,
-                                                                        pthread_start_t start_routine, void * arg) {
+        pthread_start_t start_routine, void * arg) {
        if (!model) {
                snapshot_system_init(10000, 1024, 1024, 40000);
                model = new ModelChecker();
@@ -72,6 +52,12 @@ int pthread_detach(pthread_t t) {
        return 0;
 }
 
+/* Take care of both pthread_yield and c++ thread yield */
+int sched_yield() {
+       model->switch_to_master(new ModelAction(THREAD_YIELD, std::memory_order_seq_cst, thread_current(), VALUE_NONE));
+       return 0;
+}
+
 void pthread_exit(void *value_ptr) {
        Thread * th = thread_current();
        th->set_pthread_return(value_ptr);
@@ -115,7 +101,7 @@ int pthread_mutex_lock(pthread_mutex_t *p_mutex) {
        if (m != NULL) {
                m->lock();
        } else {
-               printf("ah\n");
+               return 1;
        }
 
        return 0;
@@ -140,31 +126,39 @@ int pthread_mutex_unlock(pthread_mutex_t *p_mutex) {
                m->unlock();
        } else {
                printf("try to unlock an untracked pthread_mutex\n");
+               return 1;
        }
 
        return 0;
 }
 
 int pthread_mutex_timedlock (pthread_mutex_t *__restrict p_mutex,
-                                                                                                                const struct timespec *__restrict abstime) {
+        const struct timespec *__restrict abstime) {
 // timedlock just gives the option of giving up the lock, so return and let the scheduler decide which thread goes next
 
-/*
-        ModelExecution *execution = model->get_execution();
-        if (!execution->mutex_map.contains(p_mutex)) {
-                pthread_mutex_init(p_mutex, NULL);
-        }
-        cdsc::snapmutex *m = execution->mutex_map.get(p_mutex);
-
-        if (m != NULL) {
-                m->lock();
-        } else {
-                printf("something is wrong with pthread_mutex_timedlock\n");
-        }
-
-        printf("pthread_mutex_timedlock is called. It is currently implemented as a normal lock operation without no timeout\n");
- */
-       return 0;
+       if (!model) {
+               snapshot_system_init(10000, 1024, 1024, 40000);
+               model = new ModelChecker();
+               model->startChecker();
+       }
+
+       ModelExecution *execution = model->get_execution();
+
+       /* to protect the case where PTHREAD_MUTEX_INITIALIZER is used
+          instead of pthread_mutex_init, or where *p_mutex is not stored
+          in the execution->mutex_map for some reason. */
+       if (!execution->getMutexMap()->contains(p_mutex)) {
+               pthread_mutex_init(p_mutex, NULL);
+       }
+
+       cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
+
+       if (m != NULL) {
+               m->lock();
+               return 0;
+       }
+
+       return 1;
 }
 
 pthread_t pthread_self() {
@@ -189,6 +183,8 @@ int pthread_cond_wait(pthread_cond_t *p_cond, pthread_mutex_t *p_mutex) {
        ModelExecution *execution = model->get_execution();
        if ( !execution->getCondMap()->contains(p_cond) )
                pthread_cond_init(p_cond, NULL);
+       if ( !execution->getMutexMap()->contains(p_mutex) )
+               pthread_mutex_init(p_mutex, NULL);
 
        cdsc::snapcondition_variable *v = execution->getCondMap()->get(p_cond);
        cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
@@ -198,8 +194,7 @@ int pthread_cond_wait(pthread_cond_t *p_cond, pthread_mutex_t *p_mutex) {
 }
 
 int pthread_cond_timedwait(pthread_cond_t *p_cond,
-                                                                                                        pthread_mutex_t *p_mutex, const struct timespec *abstime) {
-// implement cond_timedwait as a noop and let the scheduler decide which thread goes next
+       pthread_mutex_t *p_mutex, const struct timespec *abstime) {
        ModelExecution *execution = model->get_execution();
 
        if ( !execution->getCondMap()->contains(p_cond) )
@@ -208,11 +203,12 @@ int pthread_cond_timedwait(pthread_cond_t *p_cond,
                pthread_mutex_init(p_mutex, NULL);
 
        cdsc::snapcondition_variable *v = execution->getCondMap()->get(p_cond);
-//     cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
+       cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
+
+       model->switch_to_master(new ModelAction(ATOMIC_TIMEDWAIT, std::memory_order_seq_cst, v, (uint64_t) m));
+       m->lock();
 
-       model->switch_to_master(new ModelAction(NOOP, std::memory_order_seq_cst, v));
-//     v->wait(*m);
-//     printf("timed_wait called\n");
+       // model_print("Timed_wait is called\n");
        return 0;
 }