X-Git-Url: http://plrg.eecs.uci.edu/git/?p=IRC.git;a=blobdiff_plain;f=Robust%2Fsrc%2FRuntime%2Fmlp_lock.h;h=c0f1afb5680f8ea8dc1907f58cc351428d7830d2;hp=ad33a424408ffb3166d586ec660f4e528633484c;hb=eb17be02c22191b3fc7bdc335d9434ada68278de;hpb=c94e3d181dec110fc0bd7071a555631cd2a3863d diff --git a/Robust/src/Runtime/mlp_lock.h b/Robust/src/Runtime/mlp_lock.h index ad33a424..c0f1afb5 100644 --- a/Robust/src/Runtime/mlp_lock.h +++ b/Robust/src/Runtime/mlp_lock.h @@ -10,34 +10,34 @@ #define __xg(x) ((volatile INTPTR *)(x)) -#define CFENCE asm volatile("":::"memory"); -#define MFENCE asm volatile("mfence":::"memory"); +#define CFENCE asm volatile ("" ::: "memory"); +#define MFENCE asm volatile ("mfence" ::: "memory"); #define LOCK_PREFIX \ ".section .smp_locks,\"a\"\n" \ " .align 4\n" \ - " .long 661f\n" /* address */\ + " .long 661f\n" /* address */ \ ".previous\n" \ "661:\n\tlock; " static inline int atomicincandread(volatile unsigned int *lock) { int retval=1; - __asm__ __volatile__("lock; xadd %0,%1" - : "=r"(retval) - : "m"(*lock), "0"(retval) - : "memory"); + __asm__ __volatile__ ("lock; xadd %0,%1" + : "=r" (retval) + : "m" (*lock), "0" (retval) + : "memory"); return retval; } static inline void atomic_dec(volatile int *v) { __asm__ __volatile__ (LOCK_PREFIX "decl %0" - : "+m" (*v)); + : "+m" (*v)); } static inline void atomic_inc(volatile int *v) { __asm__ __volatile__ (LOCK_PREFIX "incl %0" - : "+m" (*v)); + : "+m" (*v)); } // this returns TRUE if the atomic subtraction results in @@ -48,97 +48,97 @@ static inline int atomic_sub_and_test(int i, volatile int *v) { unsigned char c; __asm__ __volatile__ (LOCK_PREFIX "subl %2,%0; sete %1" - : "+m" (*v), "=qm" (c) - : "ir" (i) : "memory"); + : "+m" (*v), "=qm" (c) + : "ir" (i) : "memory"); return c; } static inline void atomic_add(int i, volatile int *v) { __asm__ __volatile__ (LOCK_PREFIX "addl %1,%0" - : "+m" (*v) - : "ir" (i)); + : "+m" (*v) + : "ir" (i)); } -static inline int LOCKXCHG32(volatile int* ptr, int val){ +static inline int LOCKXCHG32(volatile int* ptr, int val) { int retval; - //note: xchgl always implies lock - __asm__ __volatile__("xchgl %0,%1" - : "=r"(retval) - : "m"(*ptr), "0"(val) - : "memory"); + //note: xchgl always implies lock + __asm__ __volatile__ ("xchgl %0,%1" + : "=r" (retval) + : "m" (*ptr), "0" (val) + : "memory"); return retval; - + } // LOCKXCH atomically does the following: -// INTPTR retval=*ptr; -// *ptr=val; +// INTPTR retval=*ptr; +// *ptr=val; // return retval #ifdef BIT64 -static inline INTPTR LOCKXCHG(volatile INTPTR * ptr, INTPTR val){ +static inline INTPTR LOCKXCHG(volatile INTPTR * ptr, INTPTR val) { INTPTR retval; - //note: xchgl always implies lock - __asm__ __volatile__("xchgq %0,%1" - : "=r"(retval) - : "m"(*ptr), "0"(val) - : "memory"); + //note: xchgl always implies lock + __asm__ __volatile__ ("xchgq %0,%1" + : "=r" (retval) + : "m" (*ptr), "0" (val) + : "memory"); return retval; - + } #else #define LOCKXCHG LOCKXCHG32 #endif /* -static inline int write_trylock(volatile int *lock) { - int retval=0; - __asm__ __volatile__("xchgl %0,%1" - : "=r"(retval) - : "m"(*lock), "0"(retval) - : "memory"); - return retval; -} -*/ + static inline int write_trylock(volatile int *lock) { + int retval=0; + __asm__ __volatile__("xchgl %0,%1" + : "=r"(retval) + : "m"(*lock), "0"(retval) + : "memory"); + return retval; + } + */ #ifdef BIT64 -static inline INTPTR CAS(volatile void *ptr, unsigned INTPTR old, unsigned INTPTR new){ +static inline INTPTR CAS(volatile void *ptr, unsigned INTPTR old, unsigned INTPTR new) { unsigned INTPTR prev; - __asm__ __volatile__("lock; cmpxchgq %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + __asm__ __volatile__ ("lock; cmpxchgq %1,%2" + : "=a" (prev) + : "r" (new), "m" (*__xg(ptr)), "0" (old) + : "memory"); return prev; } -static inline long CAS32(volatile void *ptr, unsigned long old, unsigned long new){ +static inline long CAS32(volatile void *ptr, unsigned long old, unsigned long new) { unsigned long prev; - __asm__ __volatile__("lock; cmpxchgl %k1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + __asm__ __volatile__ ("lock; cmpxchgl %k1,%2" + : "=a" (prev) + : "r" (new), "m" (*__xg(ptr)), "0" (old) + : "memory"); return prev; } #else -static inline long CAS(volatile void *ptr, unsigned long old, unsigned long new){ +static inline long CAS(volatile void *ptr, unsigned long old, unsigned long new) { unsigned long prev; - __asm__ __volatile__("lock; cmpxchgl %k1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + __asm__ __volatile__ ("lock; cmpxchgl %k1,%2" + : "=a" (prev) + : "r" (new), "m" (*__xg(ptr)), "0" (old) + : "memory"); return prev; } #define CAS32 CAS #endif -static inline int BARRIER(){ +static inline int BARRIER() { CFENCE; return 1; } -static inline int MBARRIER(){ +static inline int MBARRIER() { MFENCE; return 1; }