1 #ifndef ____MLP_LOCK_H__
2 #define ____MLP_LOCK_H__
11 #define __xg(x) ((volatile INTPTR *)(x))
13 #define CFENCE asm volatile("":::"memory");
14 #define MFENCE asm volatile("mfence":::"memory");
17 ".section .smp_locks,\"a\"\n" \
19 " .long 661f\n" /* address */\
24 static inline int atomicincandread(volatile unsigned int *lock) {
26 __asm__ __volatile__("lock; xadd %0,%1"
28 : "m"(*lock), "0"(retval)
33 static inline void atomic_dec(volatile int *v) {
34 __asm__ __volatile__ (LOCK_PREFIX "decl %0"
38 static inline void atomic_inc(volatile int *v) {
39 __asm__ __volatile__ (LOCK_PREFIX "incl %0"
43 // this returns TRUE if the atomic subtraction results in
44 // a zero value--this way two threads cannot dec a value
45 // atomically, but then go ahead and both read zero,
46 // thinking they both are the last decrementer
47 static inline int atomic_sub_and_test(int i, volatile int *v) {
50 __asm__ __volatile__ (LOCK_PREFIX "subl %2,%0; sete %1"
51 : "+m" (*v), "=qm" (c)
52 : "ir" (i) : "memory");
57 static inline void atomic_add(int i, volatile int *v) {
58 __asm__ __volatile__ (LOCK_PREFIX "addl %1,%0"
63 static inline int LOCKXCHG32(volatile int* ptr, int val){
65 //note: xchgl always implies lock
66 __asm__ __volatile__("xchgl %0,%1"
75 // LOCKXCH atomically does the following:
76 // INTPTR retval=*ptr;
80 static inline INTPTR LOCKXCHG(volatile INTPTR * ptr, INTPTR val){
82 //note: xchgl always implies lock
83 __asm__ __volatile__("xchgq %0,%1"
91 #define LOCKXCHG LOCKXCHG32
95 static inline int write_trylock(volatile int *lock) {
97 __asm__ __volatile__("xchgl %0,%1"
99 : "m"(*lock), "0"(retval)
106 static inline INTPTR CAS(volatile void *ptr, unsigned INTPTR old, unsigned INTPTR new){
107 unsigned INTPTR prev;
108 __asm__ __volatile__("lock; cmpxchgq %1,%2"
110 : "r"(new), "m"(*__xg(ptr)), "0"(old)
115 static inline long CAS32(volatile void *ptr, unsigned long old, unsigned long new){
117 __asm__ __volatile__("lock; cmpxchgl %k1,%2"
119 : "r"(new), "m"(*__xg(ptr)), "0"(old)
124 static inline long CAS(volatile void *ptr, unsigned long old, unsigned long new){
126 __asm__ __volatile__("lock; cmpxchgl %k1,%2"
128 : "r"(new), "m"(*__xg(ptr)), "0"(old)
136 static inline int BARRIER(){
141 static inline int MBARRIER(){
147 #endif // ____MLP_LOCK_H__