#define __xg(x) ((volatile INTPTR *)(x))
-#define CFENCE asm volatile("":::"memory");
+#define CFENCE asm volatile ("" ::: "memory");
+#define MFENCE asm volatile ("mfence" ::: "memory");
#define LOCK_PREFIX \
".section .smp_locks,\"a\"\n" \
" .align 4\n" \
- " .long 661f\n" /* address */\
+ " .long 661f\n" /* address */ \
".previous\n" \
"661:\n\tlock; "
+static inline int atomicincandread(volatile unsigned int *lock) {
+ int retval=1;
+ __asm__ __volatile__ ("lock; xadd %0,%1"
+ : "=r" (retval)
+ : "m" (*lock), "0" (retval)
+ : "memory");
+ return retval;
+}
+
static inline void atomic_dec(volatile int *v) {
__asm__ __volatile__ (LOCK_PREFIX "decl %0"
: "+m" (*v));
: "ir" (i));
}
-static inline int LOCKXCHG32(volatile int* ptr, int val){
+static inline int LOCKXCHG32(volatile int* ptr, int val) {
int retval;
- //note: xchgl always implies lock
- __asm__ __volatile__("xchgl %0,%1"
- : "=r"(retval)
- : "m"(*ptr), "0"(val)
- : "memory");
+ //note: xchgl always implies lock
+ __asm__ __volatile__ ("xchgl %0,%1"
+ : "=r" (retval)
+ : "m" (*ptr), "0" (val)
+ : "memory");
return retval;
-
+
}
+
+// LOCKXCH atomically does the following:
+// INTPTR retval=*ptr;
+// *ptr=val;
+// return retval
#ifdef BIT64
-static inline INTPTR LOCKXCHG(volatile INTPTR * ptr, INTPTR val){
+static inline INTPTR LOCKXCHG(volatile INTPTR * ptr, INTPTR val) {
INTPTR retval;
- //note: xchgl always implies lock
- __asm__ __volatile__("xchgq %0,%1"
- : "=r"(retval)
- : "m"(*ptr), "0"(val)
- : "memory");
+ //note: xchgl always implies lock
+ __asm__ __volatile__ ("xchgq %0,%1"
+ : "=r" (retval)
+ : "m" (*ptr), "0" (val)
+ : "memory");
return retval;
-
+
}
#else
#define LOCKXCHG LOCKXCHG32
#endif
/*
-static inline int write_trylock(volatile int *lock) {
- int retval=0;
- __asm__ __volatile__("xchgl %0,%1"
- : "=r"(retval)
- : "m"(*lock), "0"(retval)
- : "memory");
- return retval;
-}
-*/
+ static inline int write_trylock(volatile int *lock) {
+ int retval=0;
+ __asm__ __volatile__("xchgl %0,%1"
+ : "=r"(retval)
+ : "m"(*lock), "0"(retval)
+ : "memory");
+ return retval;
+ }
+ */
#ifdef BIT64
-static inline INTPTR CAS(volatile void *ptr, unsigned INTPTR old, unsigned INTPTR new){
+static inline INTPTR CAS(volatile void *ptr, unsigned INTPTR old, unsigned INTPTR new) {
unsigned INTPTR prev;
- __asm__ __volatile__("lock; cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ __asm__ __volatile__ ("lock; cmpxchgq %1,%2"
+ : "=a" (prev)
+ : "r" (new), "m" (*__xg(ptr)), "0" (old)
+ : "memory");
+ return prev;
+}
+
+static inline long CAS32(volatile void *ptr, unsigned long old, unsigned long new) {
+ unsigned long prev;
+ __asm__ __volatile__ ("lock; cmpxchgl %k1,%2"
+ : "=a" (prev)
+ : "r" (new), "m" (*__xg(ptr)), "0" (old)
+ : "memory");
return prev;
}
#else
-static inline long CAS(volatile void *ptr, unsigned long old, unsigned long new){
+static inline long CAS(volatile void *ptr, unsigned long old, unsigned long new) {
unsigned long prev;
- __asm__ __volatile__("lock; cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ __asm__ __volatile__ ("lock; cmpxchgl %k1,%2"
+ : "=a" (prev)
+ : "r" (new), "m" (*__xg(ptr)), "0" (old)
+ : "memory");
return prev;
}
+#define CAS32 CAS
#endif
-static inline int BARRIER(){
+
+static inline int BARRIER() {
CFENCE;
return 1;
}
+static inline int MBARRIER() {
+ MFENCE;
+ return 1;
+}
+
#endif // ____MLP_LOCK_H__