rk: revert to v3.10
[firefly-linux-kernel-4.4.55.git] / arch / arm / include / asm / mmu_context.h
index e0b10f19d679d5915de6a49a23e9fc141e68d527..a7b85e0d0cc154a90a2efadca763cc60d95e82d8 100644 (file)
@@ -27,15 +27,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
 #define init_new_context(tsk,mm)       ({ atomic64_set(&mm->context.id, 0); 0; })
 
-#ifdef CONFIG_ARM_ERRATA_798181
-void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
-                            cpumask_t *mask);
-#else  /* !CONFIG_ARM_ERRATA_798181 */
-static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
-                                          cpumask_t *mask)
-{
-}
-#endif /* CONFIG_ARM_ERRATA_798181 */
+DECLARE_PER_CPU(atomic64_t, active_asids);
 
 #else  /* !CONFIG_CPU_HAS_ASID */
 
@@ -55,7 +47,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
                 * on non-ASID CPUs, the old mm will remain valid until the
                 * finish_arch_post_lock_switch() call.
                 */
-               mm->context.switch_pending = 1;
+               set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
        else
                cpu_switch_mm(mm->pgd, mm);
 }
@@ -64,21 +56,9 @@ static inline void check_and_switch_context(struct mm_struct *mm,
        finish_arch_post_lock_switch
 static inline void finish_arch_post_lock_switch(void)
 {
-       struct mm_struct *mm = current->mm;
-
-       if (mm && mm->context.switch_pending) {
-               /*
-                * Preemption must be disabled during cpu_switch_mm() as we
-                * have some stateful cache flush implementations. Check
-                * switch_pending again in case we were preempted and the
-                * switch to this mm was already done.
-                */
-               preempt_disable();
-               if (mm->context.switch_pending) {
-                       mm->context.switch_pending = 0;
-                       cpu_switch_mm(mm->pgd, mm);
-               }
-               preempt_enable_no_resched();
+       if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
+               struct mm_struct *mm = current->mm;
+               cpu_switch_mm(mm->pgd, mm);
        }
 }