Merge branch 'torvalds/master'
[firefly-linux-kernel-4.4.55.git] / arch / mn10300 / include / asm / atomic.h
index 975e1841ca64704b4862c6f3579bb40a3a7ab5c3..ce318d5ab23b06484e063d59e564bef6a2673700 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <asm/irqflags.h>
 #include <asm/cmpxchg.h>
+#include <asm/barrier.h>
 
 #ifndef CONFIG_SMP
 #include <asm-generic/atomic.h>
@@ -32,9 +33,8 @@
  * @v: pointer of type atomic_t
  *
  * Atomically reads the value of @v.  Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
  */
-#define atomic_read(v) (ACCESS_ONCE((v)->counter))
+#define atomic_read(v) READ_ONCE((v)->counter)
 
 /**
  * atomic_set - set atomic variable
  * @i: required value
  *
  * Atomically sets the value of @v to @i.  Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
  */
-#define atomic_set(v, i) (((v)->counter) = (i))
-
-/**
- * atomic_add_return - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
-       int retval;
-#ifdef CONFIG_SMP
-       int status;
-
-       asm volatile(
-               "1:     mov     %4,(_AAR,%3)    \n"
-               "       mov     (_ADR,%3),%1    \n"
-               "       add     %5,%1           \n"
-               "       mov     %1,(_ADR,%3)    \n"
-               "       mov     (_ADR,%3),%0    \n"     /* flush */
-               "       mov     (_ASR,%3),%0    \n"
-               "       or      %0,%0           \n"
-               "       bne     1b              \n"
-               : "=&r"(status), "=&r"(retval), "=m"(v->counter)
-               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
-               : "memory", "cc");
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+
+#define ATOMIC_OP(op)                                                  \
+static inline void atomic_##op(int i, atomic_t *v)                     \
+{                                                                      \
+       int retval, status;                                             \
+                                                                       \
+       asm volatile(                                                   \
+               "1:     mov     %4,(_AAR,%3)    \n"                     \
+               "       mov     (_ADR,%3),%1    \n"                     \
+               "       " #op " %5,%1           \n"                     \
+               "       mov     %1,(_ADR,%3)    \n"                     \
+               "       mov     (_ADR,%3),%0    \n"     /* flush */     \
+               "       mov     (_ASR,%3),%0    \n"                     \
+               "       or      %0,%0           \n"                     \
+               "       bne     1b              \n"                     \
+               : "=&r"(status), "=&r"(retval), "=m"(v->counter)        \
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)   \
+               : "memory", "cc");                                      \
+}
 
-#else
-       unsigned long flags;
-
-       flags = arch_local_cli_save();
-       retval = v->counter;
-       retval += i;
-       v->counter = retval;
-       arch_local_irq_restore(flags);
-#endif
-       return retval;
+#define ATOMIC_OP_RETURN(op)                                           \
+static inline int atomic_##op##_return(int i, atomic_t *v)             \
+{                                                                      \
+       int retval, status;                                             \
+                                                                       \
+       asm volatile(                                                   \
+               "1:     mov     %4,(_AAR,%3)    \n"                     \
+               "       mov     (_ADR,%3),%1    \n"                     \
+               "       " #op " %5,%1           \n"                     \
+               "       mov     %1,(_ADR,%3)    \n"                     \
+               "       mov     (_ADR,%3),%0    \n"     /* flush */     \
+               "       mov     (_ASR,%3),%0    \n"                     \
+               "       or      %0,%0           \n"                     \
+               "       bne     1b              \n"                     \
+               : "=&r"(status), "=&r"(retval), "=m"(v->counter)        \
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)   \
+               : "memory", "cc");                                      \
+       return retval;                                                  \
 }
 
-/**
- * atomic_sub_return - subtract integer from atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
- */
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
-       int retval;
-#ifdef CONFIG_SMP
-       int status;
-
-       asm volatile(
-               "1:     mov     %4,(_AAR,%3)    \n"
-               "       mov     (_ADR,%3),%1    \n"
-               "       sub     %5,%1           \n"
-               "       mov     %1,(_ADR,%3)    \n"
-               "       mov     (_ADR,%3),%0    \n"     /* flush */
-               "       mov     (_ASR,%3),%0    \n"
-               "       or      %0,%0           \n"
-               "       bne     1b              \n"
-               : "=&r"(status), "=&r"(retval), "=m"(v->counter)
-               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
-               : "memory", "cc");
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
 
-#else
-       unsigned long flags;
-       flags = arch_local_cli_save();
-       retval = v->counter;
-       retval -= i;
-       v->counter = retval;
-       arch_local_irq_restore(flags);
-#endif
-       return retval;
-}
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
 
-static inline int atomic_add_negative(int i, atomic_t *v)
-{
-       return atomic_add_return(i, v) < 0;
-}
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
 
-static inline void atomic_add(int i, atomic_t *v)
-{
-       atomic_add_return(i, v);
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
 
-static inline void atomic_sub(int i, atomic_t *v)
+static inline int atomic_add_negative(int i, atomic_t *v)
 {
-       atomic_sub_return(i, v);
+       return atomic_add_return(i, v) < 0;
 }
 
 static inline void atomic_inc(atomic_t *v)
@@ -167,79 +131,6 @@ static inline void atomic_dec(atomic_t *v)
 #define atomic_xchg(ptr, v)            (xchg(&(ptr)->counter, (v)))
 #define atomic_cmpxchg(v, old, new)    (cmpxchg(&((v)->counter), (old), (new)))
 
-/**
- * atomic_clear_mask - Atomically clear bits in memory
- * @mask: Mask of the bits to be cleared
- * @v: pointer to word in memory
- *
- * Atomically clears the bits set in mask from the memory word specified.
- */
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
-#ifdef CONFIG_SMP
-       int status;
-
-       asm volatile(
-               "1:     mov     %3,(_AAR,%2)    \n"
-               "       mov     (_ADR,%2),%0    \n"
-               "       and     %4,%0           \n"
-               "       mov     %0,(_ADR,%2)    \n"
-               "       mov     (_ADR,%2),%0    \n"     /* flush */
-               "       mov     (_ASR,%2),%0    \n"
-               "       or      %0,%0           \n"
-               "       bne     1b              \n"
-               : "=&r"(status), "=m"(*addr)
-               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
-               : "memory", "cc");
-#else
-       unsigned long flags;
-
-       mask = ~mask;
-       flags = arch_local_cli_save();
-       *addr &= mask;
-       arch_local_irq_restore(flags);
-#endif
-}
-
-/**
- * atomic_set_mask - Atomically set bits in memory
- * @mask: Mask of the bits to be set
- * @v: pointer to word in memory
- *
- * Atomically sets the bits set in mask from the memory word specified.
- */
-static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
-{
-#ifdef CONFIG_SMP
-       int status;
-
-       asm volatile(
-               "1:     mov     %3,(_AAR,%2)    \n"
-               "       mov     (_ADR,%2),%0    \n"
-               "       or      %4,%0           \n"
-               "       mov     %0,(_ADR,%2)    \n"
-               "       mov     (_ADR,%2),%0    \n"     /* flush */
-               "       mov     (_ASR,%2),%0    \n"
-               "       or      %0,%0           \n"
-               "       bne     1b              \n"
-               : "=&r"(status), "=m"(*addr)
-               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
-               : "memory", "cc");
-#else
-       unsigned long flags;
-
-       flags = arch_local_cli_save();
-       *addr |= mask;
-       arch_local_irq_restore(flags);
-#endif
-}
-
-/* Atomic operations are already serializing on MN10300??? */
-#define smp_mb__before_atomic_dec()    barrier()
-#define smp_mb__after_atomic_dec()     barrier()
-#define smp_mb__before_atomic_inc()    barrier()
-#define smp_mb__after_atomic_inc()     barrier()
-
 #endif /* __KERNEL__ */
 #endif /* CONFIG_SMP */
 #endif /* _ASM_ATOMIC_H */