MIPS: New macro smp_mb__before_llsc.
authorDavid Daney <ddaney@caviumnetworks.com>
Sat, 9 Jan 2010 01:17:43 +0000 (17:17 -0800)
committerRalf Baechle <ralf@linux-mips.org>
Sat, 27 Feb 2010 11:53:06 +0000 (12:53 +0100)
Replace some instances of smp_llsc_mb() with a new macro
smp_mb__before_llsc().  It is used before ll/sc sequences that are
documented as needing write barrier semantics.

The default implementation of smp_mb__before_llsc() is just smp_llsc_mb(),
so there are no changes in semantics.

Also simplify definition of smp_mb(), smp_rmb(), and smp_wmb() to be just
barrier() in the non-SMP case.

Signed-off-by: David Daney <ddaney@caviumnetworks.com>
To: linux-mips@linux-mips.org
Patchwork: http://patchwork.linux-mips.org/patch/851/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/include/asm/atomic.h
arch/mips/include/asm/barrier.h
arch/mips/include/asm/bitops.h
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/spinlock.h
arch/mips/include/asm/system.h

index dd75d673447e37147c438991829b70c15c7c721b..519197ede0898f2ce7bd0a3019fd4ac1f541d057 100644 (file)
@@ -137,7 +137,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
 {
        int result;
 
-       smp_llsc_mb();
+       smp_mb__before_llsc();
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                int temp;
@@ -189,7 +189,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
 {
        int result;
 
-       smp_llsc_mb();
+       smp_mb__before_llsc();
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                int temp;
@@ -249,7 +249,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
 {
        int result;
 
-       smp_llsc_mb();
+       smp_mb__before_llsc();
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                int temp;
@@ -516,7 +516,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
 {
        long result;
 
-       smp_llsc_mb();
+       smp_mb__before_llsc();
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                long temp;
@@ -568,7 +568,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
 {
        long result;
 
-       smp_llsc_mb();
+       smp_mb__before_llsc();
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                long temp;
@@ -628,7 +628,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
 {
        long result;
 
-       smp_llsc_mb();
+       smp_mb__before_llsc();
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                long temp;
@@ -788,9 +788,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  * atomic*_return operations are serializing but not the non-*_return
  * versions.
  */
-#define smp_mb__before_atomic_dec()    smp_llsc_mb()
+#define smp_mb__before_atomic_dec()    smp_mb__before_llsc()
 #define smp_mb__after_atomic_dec()     smp_llsc_mb()
-#define smp_mb__before_atomic_inc()    smp_llsc_mb()
+#define smp_mb__before_atomic_inc()    smp_mb__before_llsc()
 #define smp_mb__after_atomic_inc()     smp_llsc_mb()
 
 #include <asm-generic/atomic-long.h>
index 91785dc8e94e76d00a3f48c4110f81f64a9d47d1..1a5a51c3e96ffbebe193f538cc51dfb7d0d91fd7 100644 (file)
 #endif /* !CONFIG_CPU_HAS_WB */
 
 #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
-#define __WEAK_ORDERING_MB     "       sync    \n"
+#define smp_mb()       __asm__ __volatile__("sync" : : :"memory")
+#define smp_rmb()      __asm__ __volatile__("sync" : : :"memory")
+#define smp_wmb()      __asm__ __volatile__("sync" : : :"memory")
 #else
-#define __WEAK_ORDERING_MB     "               \n"
+#define smp_mb()       barrier()
+#define smp_rmb()      barrier()
+#define smp_wmb()      barrier()
 #endif
+
 #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
 #define __WEAK_LLSC_MB         "       sync    \n"
 #else
 #define __WEAK_LLSC_MB         "               \n"
 #endif
 
-#define smp_mb()       __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
-#define smp_rmb()      __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
-#define smp_wmb()      __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
-
 #define set_mb(var, value) \
        do { var = value; smp_mb(); } while (0)
 
 #define smp_llsc_mb()  __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
 
+#define smp_mb__before_llsc() smp_llsc_mb()
+
 #endif /* __ASM_BARRIER_H */
index 84a383806b2cd823d9acbb362df004ae27c15854..9255cfbee4596b2281a470a740d8bb0d493d5700 100644 (file)
@@ -42,7 +42,7 @@
 /*
  * clear_bit() doesn't provide any barrier for the compiler.
  */
-#define smp_mb__before_clear_bit()     smp_llsc_mb()
+#define smp_mb__before_clear_bit()     smp_mb__before_llsc()
 #define smp_mb__after_clear_bit()      smp_llsc_mb()
 
 /*
@@ -258,7 +258,7 @@ static inline int test_and_set_bit(unsigned long nr,
        unsigned short bit = nr & SZLONG_MASK;
        unsigned long res;
 
-       smp_llsc_mb();
+       smp_mb__before_llsc();
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -395,7 +395,7 @@ static inline int test_and_clear_bit(unsigned long nr,
        unsigned short bit = nr & SZLONG_MASK;
        unsigned long res;
 
-       smp_llsc_mb();
+       smp_mb__before_llsc();
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -485,7 +485,7 @@ static inline int test_and_change_bit(unsigned long nr,
        unsigned short bit = nr & SZLONG_MASK;
        unsigned long res;
 
-       smp_llsc_mb();
+       smp_mb__before_llsc();
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
index 815a438a268da85962b1e0a59d6bc0b90e7eee9c..ed9aaaaf0749932495c93c11e1ff24838c66cdc6 100644 (file)
  */
 extern void __cmpxchg_called_with_bad_pointer(void);
 
-#define __cmpxchg(ptr, old, new, barrier)                              \
+#define __cmpxchg(ptr, old, new, pre_barrier, post_barrier)            \
 ({                                                                     \
        __typeof__(ptr) __ptr = (ptr);                                  \
        __typeof__(*(ptr)) __old = (old);                               \
        __typeof__(*(ptr)) __new = (new);                               \
        __typeof__(*(ptr)) __res = 0;                                   \
                                                                        \
-       barrier;                                                        \
+       pre_barrier;                                                    \
                                                                        \
        switch (sizeof(*(__ptr))) {                                     \
        case 4:                                                         \
@@ -96,13 +96,13 @@ extern void __cmpxchg_called_with_bad_pointer(void);
                break;                                                  \
        }                                                               \
                                                                        \
-       barrier;                                                        \
+       post_barrier;                                                   \
                                                                        \
        __res;                                                          \
 })
 
-#define cmpxchg(ptr, old, new)         __cmpxchg(ptr, old, new, smp_llsc_mb())
-#define cmpxchg_local(ptr, old, new)   __cmpxchg(ptr, old, new, )
+#define cmpxchg(ptr, old, new)         __cmpxchg(ptr, old, new, smp_mb__before_llsc(), smp_llsc_mb())
+#define cmpxchg_local(ptr, old, new)   __cmpxchg(ptr, old, new, )
 
 #define cmpxchg64(ptr, o, n)                                           \
   ({                                                                   \
index 21ef9efbde43f9e61cf726961e81e91c7252d6b5..5f16696eaa003af15d4b5b9e679f882374eac80b 100644 (file)
@@ -138,7 +138,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
        int tmp;
 
-       smp_llsc_mb();
+       smp_mb__before_llsc();
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__ (
@@ -305,7 +305,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
        unsigned int tmp;
 
-       smp_llsc_mb();
+       smp_mb__before_llsc();
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
index 83b5509e09e8cf99a6bd966ee991ce0234806566..bb937ccfba1ea92507214309cf21d8d54d03fd6e 100644 (file)
@@ -95,6 +95,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
 {
        __u32 retval;
 
+       smp_mb__before_llsc();
+
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                unsigned long dummy;
 
@@ -147,6 +149,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
 {
        __u64 retval;
 
+       smp_mb__before_llsc();
+
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                unsigned long dummy;