1 /* $Id: mmu_context.h,v 1.54 2002/02/09 19:49:31 davem Exp $ */
2 #ifndef __SPARC64_MMU_CONTEXT_H
3 #define __SPARC64_MMU_CONTEXT_H
5 /* Derived heavily from Linus's Alpha/AXP ASN code... */
9 #include <linux/spinlock.h>
10 #include <asm/system.h>
11 #include <asm/spitfire.h>
13 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17 extern spinlock_t ctx_alloc_lock;
18 extern unsigned long tlb_context_cache;
19 extern unsigned long mmu_context_bmap[];
21 extern void get_new_mmu_context(struct mm_struct *mm);
22 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
23 extern void destroy_context(struct mm_struct *mm);
25 extern void __tsb_context_switch(unsigned long pgd_pa,
26 unsigned long tsb_reg,
27 unsigned long tsb_vaddr,
28 unsigned long tsb_pte,
29 unsigned long tsb_descr_pa);
31 static inline void tsb_context_switch(struct mm_struct *mm)
33 __tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val,
34 mm->context.tsb_map_vaddr,
35 mm->context.tsb_map_pte,
36 __pa(&mm->context.tsb_descr));
39 extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss, gfp_t gfp_flags);
41 extern void smp_tsb_sync(struct mm_struct *mm);
43 #define smp_tsb_sync(__mm) do { } while (0)
46 /* Set MMU context in the actual hardware. */
47 #define load_secondary_context(__mm) \
48 __asm__ __volatile__( \
49 "\n661: stxa %0, [%1] %2\n" \
50 " .section .sun4v_1insn_patch, \"ax\"\n" \
52 " stxa %0, [%1] %3\n" \
56 : "r" (CTX_HWBITS((__mm)->context)), \
57 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
59 extern void __flush_tlb_mm(unsigned long, unsigned long);
61 /* Switch the current MM context. */
62 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
64 unsigned long ctx_valid;
67 /* Note: page_table_lock is used here to serialize switch_mm
68 * and activate_mm, and their calls to get_new_mmu_context.
69 * This use of page_table_lock is unrelated to its other uses.
71 spin_lock(&mm->page_table_lock);
72 ctx_valid = CTX_VALID(mm->context);
74 get_new_mmu_context(mm);
75 spin_unlock(&mm->page_table_lock);
77 if (!ctx_valid || (old_mm != mm)) {
78 load_secondary_context(mm);
79 tsb_context_switch(mm);
82 /* Even if (mm == old_mm) we _must_ check
83 * the cpu_vm_mask. If we do not we could
84 * corrupt the TLB state because of how
85 * smp_flush_tlb_{page,range,mm} on sparc64
86 * and lazy tlb switches work. -DaveM
88 cpu = smp_processor_id();
89 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
90 cpu_set(cpu, mm->cpu_vm_mask);
91 __flush_tlb_mm(CTX_HWBITS(mm->context),
96 #define deactivate_mm(tsk,mm) do { } while (0)
98 /* Activate a new MM instance for the current task. */
99 static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
103 /* Note: page_table_lock is used here to serialize switch_mm
104 * and activate_mm, and their calls to get_new_mmu_context.
105 * This use of page_table_lock is unrelated to its other uses.
107 spin_lock(&mm->page_table_lock);
108 if (!CTX_VALID(mm->context))
109 get_new_mmu_context(mm);
110 cpu = smp_processor_id();
111 if (!cpu_isset(cpu, mm->cpu_vm_mask))
112 cpu_set(cpu, mm->cpu_vm_mask);
113 spin_unlock(&mm->page_table_lock);
115 load_secondary_context(mm);
116 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
117 tsb_context_switch(mm);
120 #endif /* !(__ASSEMBLY__) */
122 #endif /* !(__SPARC64_MMU_CONTEXT_H) */