Merge branches 'fixes' and 'misc' into for-next
authorRussell King <rmk+kernel@arm.linux.org.uk>
Fri, 11 Apr 2014 13:50:05 +0000 (14:50 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Fri, 11 Apr 2014 13:50:05 +0000 (14:50 +0100)
arch/arm/include/asm/assembler.h
arch/arm/include/asm/cputype.h
arch/arm/kernel/crash_dump.c
arch/arm/kernel/entry-header.S
arch/arm/kernel/pj4-cp0.c
arch/arm/kernel/process.c
arch/arm/kernel/traps.c
arch/arm/mach-vexpress/dcscb.c
arch/arm/mm/dump.c
arch/arm/vfp/entry.S
arch/arm/vfp/vfphw.S

index 380ac4f20000c8c33d1744da3898d71e3f4b7c68..b974184f9941883339c0480df1290a6a6f66a4ae 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/ptrace.h>
 #include <asm/domain.h>
 #include <asm/opcodes-virt.h>
+#include <asm/asm-offsets.h>
 
 #define IOMEM(x)       (x)
 
        restore_irqs_notrace \oldcpsr
        .endm
 
+/*
+ * Get current thread_info.
+ */
+       .macro  get_thread_info, rd
+ ARM(  mov     \rd, sp, lsr #13        )
+ THUMB(        mov     \rd, sp                 )
+ THUMB(        lsr     \rd, \rd, #13           )
+       mov     \rd, \rd, lsl #13
+       .endm
+
+/*
+ * Increment/decrement the preempt count.
+ */
+#ifdef CONFIG_PREEMPT_COUNT
+       .macro  inc_preempt_count, ti, tmp
+       ldr     \tmp, [\ti, #TI_PREEMPT]        @ get preempt count
+       add     \tmp, \tmp, #1                  @ increment it
+       str     \tmp, [\ti, #TI_PREEMPT]
+       .endm
+
+       .macro  dec_preempt_count, ti, tmp
+       ldr     \tmp, [\ti, #TI_PREEMPT]        @ get preempt count
+       sub     \tmp, \tmp, #1                  @ decrement it
+       str     \tmp, [\ti, #TI_PREEMPT]
+       .endm
+
+       .macro  dec_preempt_count_ti, ti, tmp
+       get_thread_info \ti
+       dec_preempt_count \ti, \tmp
+       .endm
+#else
+       .macro  inc_preempt_count, ti, tmp
+       .endm
+
+       .macro  dec_preempt_count, ti, tmp
+       .endm
+
+       .macro  dec_preempt_count_ti, ti, tmp
+       .endm
+#endif
+
 #define USER(x...)                             \
 9999:  x;                                      \
        .pushsection __ex_table,"a";            \
index 42f0889f058456be8e1c34705ae0e2aa12364baf..c651e3b26ec703b08cea0128eb76c23d9aa94d46 100644 (file)
@@ -221,4 +221,23 @@ static inline int cpu_is_xsc3(void)
 #define        cpu_is_xscale() 1
 #endif
 
+/*
+ * Marvell's PJ4 core is based on V7 version. It has some modification
+ * for coprocessor setting. For this reason, we need a way to distinguish
+ * it.
+ */
+#ifndef CONFIG_CPU_PJ4
+#define cpu_is_pj4()   0
+#else
+static inline int cpu_is_pj4(void)
+{
+       unsigned int id;
+
+       id = read_cpuid_id();
+       if ((id & 0xfffffff0) == 0x562f5840)
+               return 1;
+
+       return 0;
+}
+#endif
 #endif
index 90c50d4b43f74089b8a7eae08753c41ec00b4461..5d1286d51154cdc09d269ee292a0541cde367a5b 100644 (file)
@@ -39,7 +39,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
        if (!csize)
                return 0;
 
-       vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+       vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
        if (!vaddr)
                return -ENOMEM;
 
index 39f89fbd5111ee9f0a96d6712e1f0f2ba308147d..1420725142cab1817d26bc3ff7003ae4a4833732 100644 (file)
        movs    pc, lr                          @ return & move spsr_svc into cpsr
        .endm
 
-       .macro  get_thread_info, rd
-       mov     \rd, sp, lsr #13
-       mov     \rd, \rd, lsl #13
-       .endm
-
        @
        @ 32-bit wide "mov pc, reg"
        @
        .endm
 #endif /* ifdef CONFIG_CPU_V7M / else */
 
-       .macro  get_thread_info, rd
-       mov     \rd, sp
-       lsr     \rd, \rd, #13
-       mov     \rd, \rd, lsl #13
-       .endm
-
        @
        @ 32-bit wide "mov pc, reg"
        @
index 679cf4d18c08bfa99fec75cadd24027c192a1706..fc72086362842436381d0595c1afea648eb7b830 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <asm/thread_notify.h>
+#include <asm/cputype.h>
 
 static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
 {
@@ -80,6 +81,9 @@ static int __init pj4_cp0_init(void)
 {
        u32 cp_access;
 
+       if (!cpu_is_pj4())
+               return 0;
+
        cp_access = pj4_cp_access_read() & ~0xf;
        pj4_cp_access_write(cp_access);
 
index 204f7d2733193282f7eadcb20c47c26ae9e451b9..639bf32689dddaa6f54d2b7adbc1fcf2f8101502 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/processor.h>
 #include <asm/thread_notify.h>
 #include <asm/stacktrace.h>
+#include <asm/system_misc.h>
 #include <asm/mach/time.h>
 #include <asm/tls.h>
 
@@ -100,7 +101,7 @@ void soft_restart(unsigned long addr)
        u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
 
        /* Disable interrupts first */
-       local_irq_disable();
+       raw_local_irq_disable();
        local_fiq_disable();
 
        /* Disable the L2 if we're the last man standing. */
index 172ee18ff1247b3159c355d899bf09bfc06579c8..abd2fc0677364a529d4c12479890f6b97770b41a 100644 (file)
@@ -445,6 +445,7 @@ die_sig:
        if (user_debug & UDBG_UNDEFINED) {
                printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
                        current->comm, task_pid_nr(current), pc);
+               __show_regs(regs);
                dump_instr(KERN_INFO, regs);
        }
 #endif
index 14d499688736b3c816c082f3216aa12503005b64..788495d35cf9ea6d920a69a8fd6cc2a7b46fd7c0 100644 (file)
@@ -137,11 +137,16 @@ static void dcscb_power_down(void)
                v7_exit_coherency_flush(all);
 
                /*
-                * This is a harmless no-op.  On platforms with a real
-                * outer cache this might either be needed or not,
-                * depending on where the outer cache sits.
+                * A full outer cache flush could be needed at this point
+                * on platforms with such a cache, depending on where the
+                * outer cache sits. In some cases the notion of a "last
+                * cluster standing" would need to be implemented if the
+                * outer cache is shared across clusters. In any case, when
+                * the outer cache needs flushing, there is no concurrent
+                * access to the cache controller to worry about and no
+                * special locking besides what is already provided by the
+                * MCPM state machinery is needed.
                 */
-               outer_flush_all();
 
                /*
                 * Disable cluster-level coherency by masking
index ef69152f9b52e473796829545bf5cf5d1e83926a..c508f41a43bcb9f2f97c9d4afaf131ef77b4dc44 100644 (file)
@@ -120,34 +120,51 @@ static const struct prot_bits pte_bits[] = {
 };
 
 static const struct prot_bits section_bits[] = {
-#ifndef CONFIG_ARM_LPAE
-       /* These are approximate */
+#ifdef CONFIG_ARM_LPAE
+       {
+               .mask   = PMD_SECT_USER,
+               .val    = PMD_SECT_USER,
+               .set    = "USR",
+       }, {
+               .mask   = PMD_SECT_RDONLY,
+               .val    = PMD_SECT_RDONLY,
+               .set    = "ro",
+               .clear  = "RW",
+#elif __LINUX_ARM_ARCH__ >= 6
        {
-               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
-               .val    = 0,
+               .mask   = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .val    = PMD_SECT_APX | PMD_SECT_AP_WRITE,
                .set    = "    ro",
        }, {
-               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .mask   = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
                .val    = PMD_SECT_AP_WRITE,
                .set    = "    RW",
        }, {
-               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .mask   = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
                .val    = PMD_SECT_AP_READ,
                .set    = "USR ro",
        }, {
-               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .mask   = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
                .val    = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
                .set    = "USR RW",
-#else
+#else /* ARMv4/ARMv5  */
+       /* These are approximate */
        {
-               .mask   = PMD_SECT_USER,
-               .val    = PMD_SECT_USER,
-               .set    = "USR",
+               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .val    = 0,
+               .set    = "    ro",
        }, {
-               .mask   = PMD_SECT_RDONLY,
-               .val    = PMD_SECT_RDONLY,
-               .set    = "ro",
-               .clear  = "RW",
+               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .val    = PMD_SECT_AP_WRITE,
+               .set    = "    RW",
+       }, {
+               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .val    = PMD_SECT_AP_READ,
+               .set    = "USR ro",
+       }, {
+               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .val    = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .set    = "USR RW",
 #endif
        }, {
                .mask   = PMD_SECT_XN,
index 46e17492fd1f3ecccda92c660474c805707b788b..f0759e70fb865b4d24370a46b58b37564e49f871 100644 (file)
@@ -8,9 +8,12 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/init.h>
+#include <linux/linkage.h>
 #include <asm/thread_info.h>
 #include <asm/vfpmacros.h>
-#include "../kernel/entry-header.S"
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
 
 @ VFP entry point.
 @
 @  IRQs disabled.
 @
 ENTRY(do_vfp)
-#ifdef CONFIG_PREEMPT_COUNT
-       ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
-       add     r11, r4, #1             @ increment it
-       str     r11, [r10, #TI_PREEMPT]
-#endif
+       inc_preempt_count r10, r4
        enable_irq
        ldr     r4, .LCvfp
        ldr     r11, [r10, #TI_CPU]     @ CPU number
@@ -35,12 +34,7 @@ ENTRY(do_vfp)
 ENDPROC(do_vfp)
 
 ENTRY(vfp_null_entry)
-#ifdef CONFIG_PREEMPT_COUNT
-       get_thread_info r10
-       ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
-       sub     r11, r4, #1             @ decrement it
-       str     r11, [r10, #TI_PREEMPT]
-#endif
+       dec_preempt_count_ti r10, r4
        mov     pc, lr
 ENDPROC(vfp_null_entry)
 
@@ -53,12 +47,7 @@ ENDPROC(vfp_null_entry)
 
        __INIT
 ENTRY(vfp_testing_entry)
-#ifdef CONFIG_PREEMPT_COUNT
-       get_thread_info r10
-       ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
-       sub     r11, r4, #1             @ decrement it
-       str     r11, [r10, #TI_PREEMPT]
-#endif
+       dec_preempt_count_ti r10, r4
        ldr     r0, VFP_arch_address
        str     r0, [r0]                @ set to non-zero value
        mov     pc, r9                  @ we have handled the fault
index 3e5d3115a2a6847ee41fc3d6d23ffde8fa0a7917..be807625ed8c23dead72282fc9ff5b9babbfe1f6 100644 (file)
  * r10 points at the start of the private FP workspace in the thread structure
  * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
  */
+#include <linux/init.h>
+#include <linux/linkage.h>
 #include <asm/thread_info.h>
 #include <asm/vfpmacros.h>
 #include <linux/kern_levels.h>
-#include "../kernel/entry-header.S"
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
 
        .macro  DBGSTR, str
 #ifdef DEBUG
@@ -179,12 +182,7 @@ vfp_hw_state_valid:
                                        @ else it's one 32-bit instruction, so
                                        @ always subtract 4 from the following
                                        @ instruction address.
-#ifdef CONFIG_PREEMPT_COUNT
-       get_thread_info r10
-       ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
-       sub     r11, r4, #1             @ decrement it
-       str     r11, [r10, #TI_PREEMPT]
-#endif
+       dec_preempt_count_ti r10, r4
        mov     pc, r9                  @ we think we have handled things
 
 
@@ -203,12 +201,7 @@ look_for_VFP_exceptions:
        @ not recognised by VFP
 
        DBGSTR  "not VFP"
-#ifdef CONFIG_PREEMPT_COUNT
-       get_thread_info r10
-       ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
-       sub     r11, r4, #1             @ decrement it
-       str     r11, [r10, #TI_PREEMPT]
-#endif
+       dec_preempt_count_ti r10, r4
        mov     pc, lr
 
 process_exception: