4 * Derived from "include/asm-i386/mmu_context.h"
7 #ifndef __S390_MMU_CONTEXT_H
8 #define __S390_MMU_CONTEXT_H
10 #include <asm/pgalloc.h>
11 #include <asm/uaccess.h>
12 #include <asm/tlbflush.h>
13 #include <asm/ctl_reg.h>
15 static inline int init_new_context(struct task_struct *tsk,
18 cpumask_clear(&mm->context.cpu_attach_mask);
19 atomic_set(&mm->context.attach_count, 0);
20 mm->context.flush_mm = 0;
21 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
23 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
25 mm->context.has_pgste = 0;
26 mm->context.use_skey = 0;
27 mm->context.asce_limit = STACK_TOP_MAX;
28 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
32 #define destroy_context(mm) do { } while (0)
34 static inline void set_user_asce(struct mm_struct *mm)
36 S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
37 if (current->thread.mm_segment.ar4)
38 __ctl_load(S390_lowcore.user_asce, 7, 7);
39 set_cpu_flag(CIF_ASCE);
42 static inline void clear_user_asce(void)
44 S390_lowcore.user_asce = S390_lowcore.kernel_asce;
46 __ctl_load(S390_lowcore.user_asce, 1, 1);
47 __ctl_load(S390_lowcore.user_asce, 7, 7);
50 static inline void load_kernel_asce(void)
54 __ctl_store(asce, 1, 1);
55 if (asce != S390_lowcore.kernel_asce)
56 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
57 set_cpu_flag(CIF_ASCE);
60 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
61 struct task_struct *tsk)
63 int cpu = smp_processor_id();
65 S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
68 if (MACHINE_HAS_TLB_LC)
69 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
70 /* Clear old ASCE by loading the kernel ASCE. */
71 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
72 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
73 atomic_inc(&next->context.attach_count);
74 atomic_dec(&prev->context.attach_count);
75 if (MACHINE_HAS_TLB_LC)
76 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
79 #define finish_arch_post_lock_switch finish_arch_post_lock_switch
80 static inline void finish_arch_post_lock_switch(void)
82 struct task_struct *tsk = current;
83 struct mm_struct *mm = tsk->mm;
88 while (atomic_read(&mm->context.attach_count) >> 16)
91 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
92 if (mm->context.flush_mm)
96 set_fs(current->thread.mm_segment);
99 #define enter_lazy_tlb(mm,tsk) do { } while (0)
100 #define deactivate_mm(tsk,mm) do { } while (0)
102 static inline void activate_mm(struct mm_struct *prev,
103 struct mm_struct *next)
105 switch_mm(prev, next, current);
106 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
110 static inline void arch_dup_mmap(struct mm_struct *oldmm,
111 struct mm_struct *mm)
114 if (oldmm->context.asce_limit < mm->context.asce_limit)
115 crst_table_downgrade(mm, oldmm->context.asce_limit);
119 static inline void arch_exit_mmap(struct mm_struct *mm)
123 static inline void arch_unmap(struct mm_struct *mm,
124 struct vm_area_struct *vma,
125 unsigned long start, unsigned long end)
129 static inline void arch_bprm_mm_init(struct mm_struct *mm,
130 struct vm_area_struct *vma)
134 #endif /* __S390_MMU_CONTEXT_H */