xtensa: Provide atomic_{or,xor,and}
authorPeter Zijlstra <peterz@infradead.org>
Wed, 23 Apr 2014 17:35:00 +0000 (19:35 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 27 Jul 2015 12:06:23 +0000 (14:06 +0200)
Implement atomic logic ops -- atomic_{or,xor,and}.

These will replace the atomic_{set,clear}_mask functions that are
available on some archs.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/xtensa/include/asm/atomic.h

index 00b7d46b35b848226cacd90034bc241181d3389c..4dd2450300a673a2fd6547b5394b8b2d2ff30dcf 100644 (file)
@@ -145,10 +145,26 @@ static inline int atomic_##op##_return(int i, atomic_t * v)               \
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+#define CONFIG_ARCH_HAS_ATOMIC_OR
+
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
+static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+       atomic_or(mask, v);
+}
+
+static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+       atomic_and(~mask, v);
+}
+
 /**
  * atomic_sub_and_test - subtract value from variable and test result
  * @i: integer value to subtract
@@ -250,75 +266,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
        return c;
 }
 
-
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
-       unsigned long tmp;
-       int result;
-
-       __asm__ __volatile__(
-                       "1:     l32i    %1, %3, 0\n"
-                       "       wsr     %1, scompare1\n"
-                       "       and     %0, %1, %2\n"
-                       "       s32c1i  %0, %3, 0\n"
-                       "       bne     %0, %1, 1b\n"
-                       : "=&a" (result), "=&a" (tmp)
-                       : "a" (~mask), "a" (v)
-                       : "memory"
-                       );
-#else
-       unsigned int all_f = -1;
-       unsigned int vval;
-
-       __asm__ __volatile__(
-                       "       rsil    a15,"__stringify(LOCKLEVEL)"\n"
-                       "       l32i    %0, %2, 0\n"
-                       "       xor     %1, %4, %3\n"
-                       "       and     %0, %0, %4\n"
-                       "       s32i    %0, %2, 0\n"
-                       "       wsr     a15, ps\n"
-                       "       rsync\n"
-                       : "=&a" (vval), "=a" (mask)
-                       : "a" (v), "a" (all_f), "1" (mask)
-                       : "a15", "memory"
-                       );
-#endif
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
-       unsigned long tmp;
-       int result;
-
-       __asm__ __volatile__(
-                       "1:     l32i    %1, %3, 0\n"
-                       "       wsr     %1, scompare1\n"
-                       "       or      %0, %1, %2\n"
-                       "       s32c1i  %0, %3, 0\n"
-                       "       bne     %0, %1, 1b\n"
-                       : "=&a" (result), "=&a" (tmp)
-                       : "a" (mask), "a" (v)
-                       : "memory"
-                       );
-#else
-       unsigned int vval;
-
-       __asm__ __volatile__(
-                       "       rsil    a15,"__stringify(LOCKLEVEL)"\n"
-                       "       l32i    %0, %2, 0\n"
-                       "       or      %0, %0, %1\n"
-                       "       s32i    %0, %2, 0\n"
-                       "       wsr     a15, ps\n"
-                       "       rsync\n"
-                       : "=&a" (vval)
-                       : "a" (mask), "a" (v)
-                       : "a15", "memory"
-                       );
-#endif
-}
-
 #endif /* __KERNEL__ */
 
 #endif /* _XTENSA_ATOMIC_H */