ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on ASID-capable CPUs
authorCatalin Marinas <catalin.marinas@arm.com>
Mon, 28 Nov 2011 13:53:28 +0000 (13:53 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Tue, 17 Apr 2012 14:29:32 +0000 (15:29 +0100)
Since the ASIDs must be unique to an mm across all the CPUs in a system,
the __new_context() function needs to broadcast a context reset event to
all the CPUs during ASID allocation if a roll-over occurred. Such IPIs
cannot be issued with interrupts disabled and ARM had to define
__ARCH_WANT_INTERRUPTS_ON_CTXSW.

This patch changes the check_context() function to
check_and_switch_context() called from switch_mm(). In case of
ASID-capable CPUs (ARMv6 onwards), if a new ASID is needed and the
interrupts are disabled, it defers the __new_context() and
cpu_switch_mm() calls to the post-lock switch hook where the interrupts
are enabled. Setting the reserved TTBR0 was also moved to
check_and_switch_context() from cpu_v7_switch_mm().

Reviewed-by: Will Deacon <will.deacon@arm.com>
Tested-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Frank Rowand <frank.rowand@am.sony.com>
Tested-by: Marc Zyngier <Marc.Zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm/include/asm/mmu.h
arch/arm/include/asm/mmu_context.h
arch/arm/include/asm/thread_info.h
arch/arm/mm/context.c
arch/arm/mm/proc-v7-2level.S

index b8e580a297e49d748d12b1ac17fb6519e8d151b2..20b43d6f23b3a92c5a358241dd13281623fe2e88 100644 (file)
@@ -39,6 +39,8 @@ typedef struct {
  * so enable interrupts over the context switch to avoid high
  * latency.
  */
+#ifndef CONFIG_CPU_HAS_ASID
 #define __ARCH_WANT_INTERRUPTS_ON_CTXSW
+#endif
 
 #endif
index a0b3cac0547c0a9949c30cc919adcf5e08fcf500..94e265cb5146ddc02569017e6c710f987d84a8f0 100644 (file)
@@ -49,39 +49,80 @@ DECLARE_PER_CPU(struct mm_struct *, current_mm);
 
 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void __new_context(struct mm_struct *mm);
+void cpu_set_reserved_ttbr0(void);
 
-static inline void check_context(struct mm_struct *mm)
+static inline void switch_new_context(struct mm_struct *mm)
 {
-       /*
-        * This code is executed with interrupts enabled. Therefore,
-        * mm->context.id cannot be updated to the latest ASID version
-        * on a different CPU (and condition below not triggered)
-        * without first getting an IPI to reset the context. The
-        * alternative is to take a read_lock on mm->context.id_lock
-        * (after changing its type to rwlock_t).
-        */
-       if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
-               __new_context(mm);
+       unsigned long flags;
 
+       __new_context(mm);
+
+       local_irq_save(flags);
+       cpu_switch_mm(mm->pgd, mm);
+       local_irq_restore(flags);
+}
+
+static inline void check_and_switch_context(struct mm_struct *mm,
+                                           struct task_struct *tsk)
+{
        if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
                __check_kvm_seq(mm);
+
+       /*
+        * Required during context switch to avoid speculative page table
+        * walking with the wrong TTBR.
+        */
+       cpu_set_reserved_ttbr0();
+
+       if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
+               /*
+                * The ASID is from the current generation, just switch to the
+                * new pgd. This condition is only true for calls from
+                * context_switch() and interrupts are already disabled.
+                */
+               cpu_switch_mm(mm->pgd, mm);
+       else if (irqs_disabled())
+               /*
+                * Defer the new ASID allocation until after the context
+                * switch critical region since __new_context() cannot be
+                * called with interrupts disabled (it sends IPIs).
+                */
+               set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+       else
+               /*
+                * That is a direct call to switch_mm() or activate_mm() with
+                * interrupts enabled and a new context.
+                */
+               switch_new_context(mm);
 }
 
 #define init_new_context(tsk,mm)       (__init_new_context(tsk,mm),0)
 
-#else
+#define finish_arch_post_lock_switch \
+       finish_arch_post_lock_switch
+static inline void finish_arch_post_lock_switch(void)
+{
+       if (test_and_clear_thread_flag(TIF_SWITCH_MM))
+               switch_new_context(current->mm);
+}
 
-static inline void check_context(struct mm_struct *mm)
+#else  /* !CONFIG_CPU_HAS_ASID */
+
+static inline void check_and_switch_context(struct mm_struct *mm,
+                                           struct task_struct *tsk)
 {
 #ifdef CONFIG_MMU
        if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
                __check_kvm_seq(mm);
+       cpu_switch_mm(mm->pgd, mm);
 #endif
 }
 
 #define init_new_context(tsk,mm)       0
 
-#endif
+#define finish_arch_post_lock_switch() do { } while (0)
+
+#endif /* CONFIG_CPU_HAS_ASID */
 
 #define destroy_context(mm)            do { } while(0)
 
@@ -123,8 +164,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
                struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
                *crt_mm = next;
 #endif
-               check_context(next);
-               cpu_switch_mm(next->pgd, next);
+               check_and_switch_context(next, tsk);
                if (cache_is_vivt())
                        cpumask_clear_cpu(cpu, mm_cpumask(prev));
        }
index d4c24d412a8ddbdaba9f11a6217cd6a8b592c22c..9e13e33ec746656a5e08c6fbda98a5ecf56aee0c 100644 (file)
@@ -146,6 +146,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK    20
 #define TIF_SECCOMP            21
+#define TIF_SWITCH_MM          22      /* deferred switch_mm */
 
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
index aaa291fc072ed27ad9055cc096e4a60a4c772b4f..06a2e7ce23c34bfd69cd110b0757d1dc4d47571e 100644 (file)
@@ -23,7 +23,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
 #endif
 
 #ifdef CONFIG_ARM_LPAE
-static void cpu_set_reserved_ttbr0(void)
+void cpu_set_reserved_ttbr0(void)
 {
        unsigned long ttbl = __pa(swapper_pg_dir);
        unsigned long ttbh = 0;
@@ -39,7 +39,7 @@ static void cpu_set_reserved_ttbr0(void)
        isb();
 }
 #else
-static void cpu_set_reserved_ttbr0(void)
+void cpu_set_reserved_ttbr0(void)
 {
        u32 ttb;
        /* Copy TTBR1 into TTBR0 */
index 72270482a9223d231105e7a4d43d5694dcdd21e2..42ac069c8012bfad83345f62fa814de139614178 100644 (file)
@@ -46,9 +46,6 @@ ENTRY(cpu_v7_switch_mm)
 #ifdef CONFIG_ARM_ERRATA_430973
        mcr     p15, 0, r2, c7, c5, 6           @ flush BTAC/BTB
 #endif
-       mrc     p15, 0, r2, c2, c0, 1           @ load TTB 1
-       mcr     p15, 0, r2, c2, c0, 0           @ into TTB 0
-       isb
 #ifdef CONFIG_ARM_ERRATA_754322
        dsb
 #endif