2 * Switch a MMU context.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
11 #ifndef _ASM_MMU_CONTEXT_H
12 #define _ASM_MMU_CONTEXT_H
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/slab.h>
18 #include <asm/cacheflush.h>
19 #include <asm/hazards.h>
20 #include <asm/tlbflush.h>
21 #ifdef CONFIG_MIPS_MT_SMTC
22 #include <asm/mipsmtregs.h>
25 #include <asm-generic/mm_hooks.h>
27 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
29 #define TLBMISS_HANDLER_SETUP_PGD(pgd) \
31 void (*tlbmiss_handler_setup_pgd)(unsigned long); \
32 extern u32 tlbmiss_handler_setup_pgd_array[16]; \
34 tlbmiss_handler_setup_pgd = \
35 (__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \
36 tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \
39 #define TLBMISS_HANDLER_SETUP() \
41 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \
42 write_c0_xcontext((unsigned long) smp_processor_id() << 51); \
45 #else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
48 * For the fast tlb miss handlers, we keep a per cpu array of pointers
49 * to the current pgd for each processor. Also, the proc. id is stuffed
50 * into the context register.
52 extern unsigned long pgd_current[];
54 #define TLBMISS_HANDLER_SETUP_PGD(pgd) \
55 pgd_current[smp_processor_id()] = (unsigned long)(pgd)
58 #define TLBMISS_HANDLER_SETUP() \
59 write_c0_context((unsigned long) smp_processor_id() << 25); \
60 back_to_back_c0_hazard(); \
61 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
64 #define TLBMISS_HANDLER_SETUP() \
65 write_c0_context((unsigned long) smp_processor_id() << 26); \
66 back_to_back_c0_hazard(); \
67 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
69 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
70 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
73 #define ASID_MASK 0xfc0
75 #elif defined(CONFIG_CPU_R8000)
78 #define ASID_MASK 0xff0
80 #elif defined(CONFIG_MIPS_MT_SMTC)
83 extern unsigned long smtc_asid_mask;
84 #define ASID_MASK (smtc_asid_mask)
85 #define HW_ASID_MASK 0xff
86 /* End SMTC/34K debug hack */
87 #else /* FIXME: not correct for R6000 */
90 #define ASID_MASK 0xff
94 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
95 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
96 #define asid_cache(cpu) (cpu_data[cpu].asid_cache)
98 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
103 * All unused by hardware upper bits will be considered
104 * as a software asid extension.
106 #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
107 #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
109 #ifndef CONFIG_MIPS_MT_SMTC
110 /* Normal, classic MIPS get_new_mmu_context */
112 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
114 extern void kvm_local_flush_tlb_all(void);
115 unsigned long asid = asid_cache(cpu);
117 if (! ((asid += ASID_INC) & ASID_MASK) ) {
118 if (cpu_has_vtag_icache)
120 #ifdef CONFIG_VIRTUALIZATION
121 kvm_local_flush_tlb_all(); /* start new asid cycle */
123 local_flush_tlb_all(); /* start new asid cycle */
125 if (!asid) /* fix version if needed */
126 asid = ASID_FIRST_VERSION;
129 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
132 #else /* CONFIG_MIPS_MT_SMTC */
134 #define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu))
136 #endif /* CONFIG_MIPS_MT_SMTC */
139 * Initialize the context related info for a new mm_struct
143 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
147 for_each_possible_cpu(i)
148 cpu_context(i, mm) = 0;
153 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
154 struct task_struct *tsk)
156 unsigned int cpu = smp_processor_id();
158 #ifdef CONFIG_MIPS_MT_SMTC
159 unsigned long oldasid;
160 unsigned long mtflags;
161 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
162 local_irq_save(flags);
165 local_irq_save(flags);
166 #endif /* CONFIG_MIPS_MT_SMTC */
168 /* Check if our ASID is of an older version and thus invalid */
169 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
170 get_new_mmu_context(next, cpu);
171 #ifdef CONFIG_MIPS_MT_SMTC
173 * If the EntryHi ASID being replaced happens to be
174 * the value flagged at ASID recycling time as having
175 * an extended life, clear the bit showing it being
176 * in use by this "CPU", and if that's the last bit,
177 * free up the ASID value for use and flush any old
178 * instances of it from the TLB.
180 oldasid = (read_c0_entryhi() & ASID_MASK);
181 if(smtc_live_asid[mytlb][oldasid]) {
182 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
183 if(smtc_live_asid[mytlb][oldasid] == 0)
184 smtc_flush_tlb_asid(oldasid);
187 * Tread softly on EntryHi, and so long as we support
188 * having ASID_MASK smaller than the hardware maximum,
189 * make sure no "soft" bits become "hard"...
191 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
192 cpu_asid(cpu, next));
193 ehb(); /* Make sure it propagates to TCStatus */
196 write_c0_entryhi(cpu_asid(cpu, next));
197 #endif /* CONFIG_MIPS_MT_SMTC */
198 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
201 * Mark current->active_mm as not "active" anymore.
202 * We don't want to mislead possible IPI tlb flush routines.
204 cpumask_clear_cpu(cpu, mm_cpumask(prev));
205 cpumask_set_cpu(cpu, mm_cpumask(next));
207 local_irq_restore(flags);
211 * Destroy context related info for an mm_struct that is about
214 static inline void destroy_context(struct mm_struct *mm)
218 #define deactivate_mm(tsk, mm) do { } while (0)
221 * After we have set current->mm to a new value, this activates
222 * the context for the new mm so we see the new mappings.
225 activate_mm(struct mm_struct *prev, struct mm_struct *next)
228 unsigned int cpu = smp_processor_id();
230 #ifdef CONFIG_MIPS_MT_SMTC
231 unsigned long oldasid;
232 unsigned long mtflags;
233 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
234 #endif /* CONFIG_MIPS_MT_SMTC */
236 local_irq_save(flags);
238 /* Unconditionally get a new ASID. */
239 get_new_mmu_context(next, cpu);
241 #ifdef CONFIG_MIPS_MT_SMTC
242 /* See comments for similar code above */
244 oldasid = read_c0_entryhi() & ASID_MASK;
245 if(smtc_live_asid[mytlb][oldasid]) {
246 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
247 if(smtc_live_asid[mytlb][oldasid] == 0)
248 smtc_flush_tlb_asid(oldasid);
250 /* See comments for similar code above */
251 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
252 cpu_asid(cpu, next));
253 ehb(); /* Make sure it propagates to TCStatus */
256 write_c0_entryhi(cpu_asid(cpu, next));
257 #endif /* CONFIG_MIPS_MT_SMTC */
258 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
260 /* mark mmu ownership change */
261 cpumask_clear_cpu(cpu, mm_cpumask(prev));
262 cpumask_set_cpu(cpu, mm_cpumask(next));
264 local_irq_restore(flags);
268 * If mm is currently active_mm, we can't really drop it. Instead,
269 * we will get a new one for it.
272 drop_mmu_context(struct mm_struct *mm, unsigned cpu)
275 #ifdef CONFIG_MIPS_MT_SMTC
276 unsigned long oldasid;
277 /* Can't use spinlock because called from TLB flush within DVPE */
278 unsigned int prevvpe;
279 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
280 #endif /* CONFIG_MIPS_MT_SMTC */
282 local_irq_save(flags);
284 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
285 get_new_mmu_context(mm, cpu);
286 #ifdef CONFIG_MIPS_MT_SMTC
287 /* See comments for similar code above */
289 oldasid = (read_c0_entryhi() & ASID_MASK);
290 if (smtc_live_asid[mytlb][oldasid]) {
291 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
292 if(smtc_live_asid[mytlb][oldasid] == 0)
293 smtc_flush_tlb_asid(oldasid);
295 /* See comments for similar code above */
296 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
297 | cpu_asid(cpu, mm));
298 ehb(); /* Make sure it propagates to TCStatus */
300 #else /* not CONFIG_MIPS_MT_SMTC */
301 write_c0_entryhi(cpu_asid(cpu, mm));
302 #endif /* CONFIG_MIPS_MT_SMTC */
304 /* will get a new context next time */
305 #ifndef CONFIG_MIPS_MT_SMTC
306 cpu_context(cpu, mm) = 0;
310 /* SMTC shares the TLB (and ASIDs) across VPEs */
311 for_each_online_cpu(i) {
312 if((smtc_status & SMTC_TLB_SHARED)
313 || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
314 cpu_context(i, mm) = 0;
316 #endif /* CONFIG_MIPS_MT_SMTC */
318 local_irq_restore(flags);
321 #endif /* _ASM_MMU_CONTEXT_H */