Disintegrate asm/system.h for Xtensa
authorDavid Howells <dhowells@redhat.com>
Wed, 28 Mar 2012 17:30:03 +0000 (18:30 +0100)
committerDavid Howells <dhowells@redhat.com>
Wed, 28 Mar 2012 17:30:03 +0000 (18:30 +0100)
Disintegrate asm/system.h for Xtensa.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Chris Zankel <chris@zankel.net>

15 files changed:
arch/xtensa/include/asm/atomic.h
arch/xtensa/include/asm/barrier.h [new file with mode: 0644]
arch/xtensa/include/asm/bitops.h
arch/xtensa/include/asm/cmpxchg.h [new file with mode: 0644]
arch/xtensa/include/asm/exec.h [new file with mode: 0644]
arch/xtensa/include/asm/setup.h
arch/xtensa/include/asm/switch_to.h [new file with mode: 0644]
arch/xtensa/include/asm/system.h
arch/xtensa/include/asm/uaccess.h
arch/xtensa/kernel/process.c
arch/xtensa/kernel/ptrace.c
arch/xtensa/kernel/setup.c
arch/xtensa/kernel/traps.c
arch/xtensa/mm/fault.c
arch/xtensa/mm/tlb.c

index 23592eff67add99a13cdcf3f27dd1e8947c98e79..b4098930877587d7dec309bbc702a5ed674c94f6 100644 (file)
@@ -18,7 +18,7 @@
 
 #ifdef __KERNEL__
 #include <asm/processor.h>
-#include <asm/system.h>
+#include <asm/cmpxchg.h>
 
 #define ATOMIC_INIT(i) { (i) }
 
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..55707a8
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SYSTEM_H
+#define _XTENSA_SYSTEM_H
+
+#define smp_read_barrier_depends() do { } while(0)
+#define read_barrier_depends() do { } while(0)
+
+#define mb()  barrier()
+#define rmb() mb()
+#define wmb() mb()
+
+#ifdef CONFIG_SMP
+#error smp_* not defined
+#else
+#define smp_mb()       barrier()
+#define smp_rmb()      barrier()
+#define smp_wmb()      barrier()
+#endif
+
+#define set_mb(var, value)     do { var = value; mb(); } while (0)
+
+#endif /* _XTENSA_SYSTEM_H */
index 40aa7fe77f668464450126184297e22aac51410d..5270197ddd369bd76771ab1f05a2c5bbb24563cb 100644 (file)
@@ -21,7 +21,6 @@
 
 #include <asm/processor.h>
 #include <asm/byteorder.h>
-#include <asm/system.h>
 
 #ifdef CONFIG_SMP
 # error SMP not supported on this architecture
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
new file mode 100644 (file)
index 0000000..e321490
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * Atomic xchg and cmpxchg operations.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CMPXCHG_H
+#define _XTENSA_CMPXCHG_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+
+/*
+ * cmpxchg
+ */
+
+static inline unsigned long
+__cmpxchg_u32(volatile int *p, int old, int new)
+{
+  __asm__ __volatile__("rsil    a15, "__stringify(LOCKLEVEL)"\n\t"
+                      "l32i    %0, %1, 0              \n\t"
+                      "bne     %0, %2, 1f             \n\t"
+                      "s32i    %3, %1, 0              \n\t"
+                      "1:                             \n\t"
+                      "wsr     a15, "__stringify(PS)" \n\t"
+                      "rsync                          \n\t"
+                      : "=&a" (old)
+                      : "a" (p), "a" (old), "r" (new)
+                      : "a15", "memory");
+  return old;
+}
+/* This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg(). */
+
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+       switch (size) {
+       case 4:  return __cmpxchg_u32(ptr, old, new);
+       default: __cmpxchg_called_with_bad_pointer();
+                return old;
+       }
+}
+
+#define cmpxchg(ptr,o,n)                                                     \
+       ({ __typeof__(*(ptr)) _o_ = (o);                                      \
+          __typeof__(*(ptr)) _n_ = (n);                                      \
+          (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,          \
+                                       (unsigned long)_n_, sizeof (*(ptr))); \
+       })
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+                                     unsigned long old,
+                                     unsigned long new, int size)
+{
+       switch (size) {
+       case 4:
+               return __cmpxchg_u32(ptr, old, new);
+       default:
+               return __cmpxchg_local_generic(ptr, old, new, size);
+       }
+
+       return old;
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n)                                              \
+       ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+                       (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+/*
+ * xchg_u32
+ *
+ * Note that a15 is used here because the register allocation
+ * done by the compiler is not guaranteed and a window overflow
+ * may not occur between the rsil and wsr instructions. By using
+ * a15 in the rsil, the machine is guaranteed to be in a state
+ * where no register reference will cause an overflow.
+ */
+
+static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
+{
+  unsigned long tmp;
+  __asm__ __volatile__("rsil    a15, "__stringify(LOCKLEVEL)"\n\t"
+                      "l32i    %0, %1, 0              \n\t"
+                      "s32i    %2, %1, 0              \n\t"
+                      "wsr     a15, "__stringify(PS)" \n\t"
+                      "rsync                          \n\t"
+                      : "=&a" (tmp)
+                      : "a" (m), "a" (val)
+                      : "a15", "memory");
+  return tmp;
+}
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+/*
+ * This only works if the compiler isn't horribly bad at optimizing.
+ * gcc-2.5.8 reportedly can't handle this, but I define that one to
+ * be dead anyway.
+ */
+
+extern void __xchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__xchg(unsigned long x, volatile void * ptr, int size)
+{
+       switch (size) {
+               case 4:
+                       return xchg_u32(ptr, x);
+       }
+       __xchg_called_with_bad_pointer();
+       return x;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _XTENSA_CMPXCHG_H */
diff --git a/arch/xtensa/include/asm/exec.h b/arch/xtensa/include/asm/exec.h
new file mode 100644 (file)
index 0000000..af949e2
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_EXEC_H
+#define _XTENSA_EXEC_H
+
+#define arch_align_stack(x) (x)
+
+#endif /* _XTENSA_EXEC_H */
index e3636520d8cc2d534735cd42b35d513c420fc79f..9fa8ad9793611f9d1c2295849b90f0444a2a9835 100644 (file)
@@ -13,4 +13,6 @@
 
 #define COMMAND_LINE_SIZE      256
 
+extern void set_except_vector(int n, void *addr);
+
 #endif
diff --git a/arch/xtensa/include/asm/switch_to.h b/arch/xtensa/include/asm/switch_to.h
new file mode 100644 (file)
index 0000000..6b73bf0
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SWITCH_TO_H
+#define _XTENSA_SWITCH_TO_H
+
+/* * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ */
+extern void *_switch_to(void *last, void *next);
+
+#define switch_to(prev,next,last)              \
+do {                                           \
+       (last) = _switch_to(prev, next);        \
+} while(0)
+
+#endif /* _XTENSA_SWITCH_TO_H */
index 1e7e09ab6cd7836802b7442b5436e3a25a775640..a7f40578587c912c41cbe0eedfc9b8fbf0caaec0 100644 (file)
@@ -1,184 +1,5 @@
-/*
- * include/asm-xtensa/system.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_SYSTEM_H
-#define _XTENSA_SYSTEM_H
-
-#include <linux/stringify.h>
-#include <linux/irqflags.h>
-
-#include <asm/processor.h>
-
-#define smp_read_barrier_depends() do { } while(0)
-#define read_barrier_depends() do { } while(0)
-
-#define mb()  barrier()
-#define rmb() mb()
-#define wmb() mb()
-
-#ifdef CONFIG_SMP
-#error smp_* not defined
-#else
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#endif
-
-#define set_mb(var, value)     do { var = value; mb(); } while (0)
-
-#if !defined (__ASSEMBLY__)
-
-/* * switch_to(n) should switch tasks to task nr n, first
- * checking that n isn't the current task, in which case it does nothing.
- */
-extern void *_switch_to(void *last, void *next);
-
-#endif /* __ASSEMBLY__ */
-
-#define switch_to(prev,next,last)              \
-do {                                           \
-       (last) = _switch_to(prev, next);        \
-} while(0)
-
-/*
- * cmpxchg
- */
-
-static inline unsigned long
-__cmpxchg_u32(volatile int *p, int old, int new)
-{
-  __asm__ __volatile__("rsil    a15, "__stringify(LOCKLEVEL)"\n\t"
-                      "l32i    %0, %1, 0              \n\t"
-                      "bne     %0, %2, 1f             \n\t"
-                      "s32i    %3, %1, 0              \n\t"
-                      "1:                             \n\t"
-                      "wsr     a15, "__stringify(PS)" \n\t"
-                      "rsync                          \n\t"
-                      : "=&a" (old)
-                      : "a" (p), "a" (old), "r" (new)
-                      : "a15", "memory");
-  return old;
-}
-/* This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid cmpxchg(). */
-
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static __inline__ unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
-       switch (size) {
-       case 4:  return __cmpxchg_u32(ptr, old, new);
-       default: __cmpxchg_called_with_bad_pointer();
-                return old;
-       }
-}
-
-#define cmpxchg(ptr,o,n)                                                     \
-       ({ __typeof__(*(ptr)) _o_ = (o);                                      \
-          __typeof__(*(ptr)) _n_ = (n);                                      \
-          (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,          \
-                                       (unsigned long)_n_, sizeof (*(ptr))); \
-       })
-
-#include <asm-generic/cmpxchg-local.h>
-
-static inline unsigned long __cmpxchg_local(volatile void *ptr,
-                                     unsigned long old,
-                                     unsigned long new, int size)
-{
-       switch (size) {
-       case 4:
-               return __cmpxchg_u32(ptr, old, new);
-       default:
-               return __cmpxchg_local_generic(ptr, old, new, size);
-       }
-
-       return old;
-}
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n)                                              \
-       ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
-                       (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-/*
- * xchg_u32
- *
- * Note that a15 is used here because the register allocation
- * done by the compiler is not guaranteed and a window overflow
- * may not occur between the rsil and wsr instructions. By using
- * a15 in the rsil, the machine is guaranteed to be in a state
- * where no register reference will cause an overflow.
- */
-
-static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
-{
-  unsigned long tmp;
-  __asm__ __volatile__("rsil    a15, "__stringify(LOCKLEVEL)"\n\t"
-                      "l32i    %0, %1, 0              \n\t"
-                      "s32i    %2, %1, 0              \n\t"
-                      "wsr     a15, "__stringify(PS)" \n\t"
-                      "rsync                          \n\t"
-                      : "=&a" (tmp)
-                      : "a" (m), "a" (val)
-                      : "a15", "memory");
-  return tmp;
-}
-
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-/*
- * This only works if the compiler isn't horribly bad at optimizing.
- * gcc-2.5.8 reportedly can't handle this, but I define that one to
- * be dead anyway.
- */
-
-extern void __xchg_called_with_bad_pointer(void);
-
-static __inline__ unsigned long
-__xchg(unsigned long x, volatile void * ptr, int size)
-{
-       switch (size) {
-               case 4:
-                       return xchg_u32(ptr, x);
-       }
-       __xchg_called_with_bad_pointer();
-       return x;
-}
-
-extern void set_except_vector(int n, void *addr);
-
-static inline void spill_registers(void)
-{
-       unsigned int a0, ps;
-
-       __asm__ __volatile__ (
-               "movi   a14," __stringify (PS_EXCM_BIT) " | 1\n\t"
-               "mov    a12, a0\n\t"
-               "rsr    a13," __stringify(SAR) "\n\t"
-               "xsr    a14," __stringify(PS) "\n\t"
-               "movi   a0, _spill_registers\n\t"
-               "rsync\n\t"
-               "callx0 a0\n\t"
-               "mov    a0, a12\n\t"
-               "wsr    a13," __stringify(SAR) "\n\t"
-               "wsr    a14," __stringify(PS) "\n\t"
-               :: "a" (&a0), "a" (&ps)
-               : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory");
-}
-
-#define arch_align_stack(x) (x)
-
-#endif /* _XTENSA_SYSTEM_H */
+/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
+#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
+#include <asm/exec.h>
+#include <asm/switch_to.h>
index 3fa526fd3c9989a3b279e4720cbb03742739cb16..6e4bb3b791ab709c221840a5d6987f7067b268d1 100644 (file)
@@ -17,7 +17,9 @@
 #define _XTENSA_UACCESS_H
 
 #include <linux/errno.h>
+#ifndef __ASSEMBLY__
 #include <linux/prefetch.h>
+#endif
 #include <asm/types.h>
 
 #define VERIFY_READ    0
index 2c9004770c4e1d0ca3aa63a10128510bbccab659..6a2d6edf8f728af36bf4da4fc1587e79057bdf46 100644 (file)
@@ -34,7 +34,6 @@
 
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
-#include <asm/system.h>
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/platform.h>
index 2dff698ab02e5ce1642248afa0f582b8aa0f112f..33eea4c16f121bde393508ed132bab7bd4d16d47 100644 (file)
@@ -24,7 +24,6 @@
 
 #include <asm/pgtable.h>
 #include <asm/page.h>
-#include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/ptrace.h>
 #include <asm/elf.h>
index 1e5a034fe01194b200796ea5e2da9d8885eee4fb..17e746f7be604023856417a8d05dab6fcf3a4b37 100644 (file)
@@ -34,7 +34,6 @@
 # include <linux/seq_file.h>
 #endif
 
-#include <asm/system.h>
 #include <asm/bootparam.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
index e64efac3b9db43e7383127ec56ee31030ec36ff3..bc1e14cf93692b811c09c7440c83d9309cc00834 100644 (file)
@@ -381,6 +381,25 @@ static __always_inline unsigned long *stack_pointer(struct task_struct *task)
        return sp;
 }
 
+static inline void spill_registers(void)
+{
+       unsigned int a0, ps;
+
+       __asm__ __volatile__ (
+               "movi   a14," __stringify (PS_EXCM_BIT) " | 1\n\t"
+               "mov    a12, a0\n\t"
+               "rsr    a13," __stringify(SAR) "\n\t"
+               "xsr    a14," __stringify(PS) "\n\t"
+               "movi   a0, _spill_registers\n\t"
+               "rsync\n\t"
+               "callx0 a0\n\t"
+               "mov    a0, a12\n\t"
+               "wsr    a13," __stringify(SAR) "\n\t"
+               "wsr    a14," __stringify(PS) "\n\t"
+               :: "a" (&a0), "a" (&ps)
+               : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory");
+}
+
 void show_trace(struct task_struct *task, unsigned long *sp)
 {
        unsigned long a0, a1, pc;
index e367e30264366d82c17dc66d157a2aa2534388bc..b17885a0b508fe82e84b012b89539caf3862dfed 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/cacheflush.h>
 #include <asm/hardirq.h>
 #include <asm/uaccess.h>
-#include <asm/system.h>
 #include <asm/pgalloc.h>
 
 unsigned long asid_cache = ASID_USER_FIRST;
index 239461d8ea888c150877a18d017a190ff6f61631..e2700b21395b16f2548da0fb809f0066a61d0bd7 100644 (file)
@@ -18,7 +18,6 @@
 #include <asm/processor.h>
 #include <asm/mmu_context.h>
 #include <asm/tlbflush.h>
-#include <asm/system.h>
 #include <asm/cacheflush.h>