void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
-#ifdef CONFIG_ARM_ERRATA_798181
-void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
- cpumask_t *mask);
-#else /* !CONFIG_ARM_ERRATA_798181 */
-static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
- cpumask_t *mask)
-{
-}
-#endif /* CONFIG_ARM_ERRATA_798181 */
+DECLARE_PER_CPU(atomic64_t, active_asids);
#else /* !CONFIG_CPU_HAS_ASID */
* on non-ASID CPUs, the old mm will remain valid until the
* finish_arch_post_lock_switch() call.
*/
- mm->context.switch_pending = 1;
+ set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
else
cpu_switch_mm(mm->pgd, mm);
}
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
- struct mm_struct *mm = current->mm;
-
- if (mm && mm->context.switch_pending) {
- /*
- * Preemption must be disabled during cpu_switch_mm() as we
- * have some stateful cache flush implementations. Check
- * switch_pending again in case we were preempted and the
- * switch to this mm was already done.
- */
- preempt_disable();
- if (mm->context.switch_pending) {
- mm->context.switch_pending = 0;
- cpu_switch_mm(mm->pgd, mm);
- }
- preempt_enable_no_resched();
+ if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
+ struct mm_struct *mm = current->mm;
+ cpu_switch_mm(mm->pgd, mm);
}
}