X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=Robust%2Fsrc%2FRuntime%2Fmlp_lock.h;h=51cf22d3466a60a4395acd28072d994f6db1747c;hb=bdc086e2ec7fcc674a604906627b52e16fba7eb3;hp=d462534bf934c7c6ada01d29c03cfcb2bd20c1cf;hpb=0eb2cfd5d50e08dd5ed4be28bd0720bd249df68d;p=IRC.git diff --git a/Robust/src/Runtime/mlp_lock.h b/Robust/src/Runtime/mlp_lock.h index d462534b..51cf22d3 100644 --- a/Robust/src/Runtime/mlp_lock.h +++ b/Robust/src/Runtime/mlp_lock.h @@ -10,16 +10,26 @@ #define __xg(x) ((volatile INTPTR *)(x)) -#define CFENCE asm volatile("":::"memory"); +#define CFENCE asm volatile ("" ::: "memory"); +#define MFENCE asm volatile ("mfence" ::: "memory"); #define LOCK_PREFIX \ ".section .smp_locks,\"a\"\n" \ " .align 4\n" \ - " .long 661f\n" /* address */\ + " .long 661f\n" /* address */ \ ".previous\n" \ "661:\n\tlock; " +static inline int atomicincandread(volatile unsigned int *lock) { + int retval=1; + __asm__ __volatile__ ("lock; xadd %0,%1" + : "=r" (retval) + : "m" (*lock), "0" (retval) + : "memory"); + return retval; +} + static inline void atomic_dec(volatile int *v) { __asm__ __volatile__ (LOCK_PREFIX "decl %0" : "+m" (*v)); @@ -50,67 +60,88 @@ static inline void atomic_add(int i, volatile int *v) { : "ir" (i)); } -static inline int LOCKXCHG32(volatile int* ptr, int val){ +static inline int LOCKXCHG32(volatile int* ptr, int val) { int retval; - //note: xchgl always implies lock - __asm__ __volatile__("xchgl %0,%1" - : "=r"(retval) - : "m"(*ptr), "0"(val) - : "memory"); + //note: xchgl always implies lock + __asm__ __volatile__ ("xchgl %0,%1" + : "=r" (retval) + : "m" (*ptr), "0" (val) + : "memory"); return retval; - + } + +// LOCKXCH atomically does the following: +// INTPTR retval=*ptr; +// *ptr=val; +// return retval #ifdef BIT64 -static inline INTPTR LOCKXCHG(volatile INTPTR * ptr, INTPTR val){ +static inline INTPTR LOCKXCHG(volatile INTPTR * ptr, INTPTR val) { INTPTR retval; - //note: xchgl always implies lock - __asm__ __volatile__("xchgq %0,%1" - : "=r"(retval) - : "m"(*ptr), "0"(val) - : "memory"); + //note: xchgl always implies lock + __asm__ __volatile__ ("xchgq %0,%1" + : "=r" (retval) + : "m" (*ptr), "0" (val) + : "memory"); return retval; - + } #else #define LOCKXCHG LOCKXCHG32 #endif /* -static inline int write_trylock(volatile int *lock) { - int retval=0; - __asm__ __volatile__("xchgl %0,%1" - : "=r"(retval) - : "m"(*lock), "0"(retval) - : "memory"); - return retval; -} -*/ + static inline int write_trylock(volatile int *lock) { + int retval=0; + __asm__ __volatile__("xchgl %0,%1" + : "=r"(retval) + : "m"(*lock), "0"(retval) + : "memory"); + return retval; + } + */ #ifdef BIT64 -static inline INTPTR CAS(volatile void *ptr, unsigned INTPTR old, unsigned INTPTR new){ +static inline INTPTR CAS(volatile void *ptr, unsigned INTPTR old, unsigned INTPTR new) { unsigned INTPTR prev; - __asm__ __volatile__("lock; cmpxchgq %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + __asm__ __volatile__ ("lock; cmpxchgq %1,%2" + : "=a" (prev) + : "r" (new), "m" (*__xg(ptr)), "0" (old) + : "memory"); + return prev; +} + +static inline long CAS32(volatile void *ptr, unsigned long old, unsigned long new) { + unsigned long prev; + __asm__ __volatile__ ("lock; cmpxchgl %k1,%2" + : "=a" (prev) + : "r" (new), "m" (*__xg(ptr)), "0" (old) + : "memory"); return prev; } #else -static inline long CAS(volatile void *ptr, unsigned long old, unsigned long new){ +static inline long CAS(volatile void *ptr, unsigned long old, unsigned long new) { unsigned long prev; - __asm__ __volatile__("lock; cmpxchgl %k1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + __asm__ __volatile__ ("lock; cmpxchgl %k1,%2" + : "=a" (prev) + : "r" (new), "m" (*__xg(ptr)), "0" (old) + : "memory"); return prev; } +#define CAS32 CAS #endif -static inline int BARRIER(){ + +static inline int BARRIER() { CFENCE; return 1; } +static inline int MBARRIER() { + MFENCE; + return 1; +} + #endif // ____MLP_LOCK_H__