1 #ifndef _MOTOROLA_PGTABLE_H
2 #define _MOTOROLA_PGTABLE_H
6 * Definitions for MMU descriptors
8 #define _PAGE_PRESENT 0x001
9 #define _PAGE_SHORT 0x002
10 #define _PAGE_RONLY 0x004
11 #define _PAGE_ACCESSED 0x008
12 #define _PAGE_DIRTY 0x010
13 #define _PAGE_SUPER 0x080 /* 68040 supervisor only */
14 #define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */
15 #define _PAGE_NOCACHE030 0x040 /* 68030 no-cache mode */
16 #define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */
17 #define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */
18 #define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */
19 #define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */
21 #define _DESCTYPE_MASK 0x003
23 #define _CACHEMASK040 (~0x060)
24 #define _TABLE_MASK (0xfffffe00)
26 #define _PAGE_TABLE (_PAGE_SHORT)
27 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
29 #define _PAGE_PROTNONE 0x004
30 #define _PAGE_FILE 0x008 /* pagecache or swap? */
34 /* This is the cache mode to be used for pages containing page descriptors for
35 * processors >= '040. It is in pte_mknocache(), and the variable is defined
36 * and initialized in head.S */
37 extern int m68k_pgtable_cachemode;
39 /* This is the cache mode for normal pages, for supervisor access on
40 * processors >= '040. It is used in pte_mkcache(), and the variable is
41 * defined and initialized in head.S */
43 #if defined(CPU_M68060_ONLY) && defined(CONFIG_060_WRITETHROUGH)
44 #define m68k_supervisor_cachemode _PAGE_CACHE040W
45 #elif defined(CPU_M68040_OR_M68060_ONLY)
46 #define m68k_supervisor_cachemode _PAGE_CACHE040
47 #elif defined(CPU_M68020_OR_M68030_ONLY)
48 #define m68k_supervisor_cachemode 0
50 extern int m68k_supervisor_cachemode;
53 #if defined(CPU_M68040_OR_M68060_ONLY)
54 #define mm_cachebits _PAGE_CACHE040
55 #elif defined(CPU_M68020_OR_M68030_ONLY)
56 #define mm_cachebits 0
58 extern unsigned long mm_cachebits;
61 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED | mm_cachebits)
62 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | mm_cachebits)
63 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
64 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
65 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | mm_cachebits)
67 /* Alternate definitions that are compile time constants, for
68 initializing protection_map. The cachebits are fixed later. */
69 #define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
70 #define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
71 #define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
72 #define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
75 * The m68k can't do page protection for execute, and considers that the same are read.
76 * Also, write permissions imply read permissions. This is the closest we can get..
78 #define __P000 PAGE_NONE_C
79 #define __P001 PAGE_READONLY_C
80 #define __P010 PAGE_COPY_C
81 #define __P011 PAGE_COPY_C
82 #define __P100 PAGE_READONLY_C
83 #define __P101 PAGE_READONLY_C
84 #define __P110 PAGE_COPY_C
85 #define __P111 PAGE_COPY_C
87 #define __S000 PAGE_NONE_C
88 #define __S001 PAGE_READONLY_C
89 #define __S010 PAGE_SHARED_C
90 #define __S011 PAGE_SHARED_C
91 #define __S100 PAGE_READONLY_C
92 #define __S101 PAGE_READONLY_C
93 #define __S110 PAGE_SHARED_C
94 #define __S111 PAGE_SHARED_C
97 * Conversion functions: convert a page and protection to a page entry,
98 * and a page entry and page directory to the page they refer to.
100 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
102 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
104 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
108 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
110 unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
111 unsigned long *ptr = pmdp->pmd;
115 ptbl += (sizeof(pte_t)*PTRS_PER_PTE/16);
119 static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
121 pgd_val(*pgdp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp);
124 #define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK))
125 #define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
126 #define __pgd_page(pgd) ((unsigned long)__va(pgd_val(pgd) & _TABLE_MASK))
129 #define pte_none(pte) (!pte_val(pte))
130 #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
131 #define pte_clear(mm,addr,ptep) ({ pte_val(*(ptep)) = 0; })
133 #define pte_page(pte) virt_to_page(__va(pte_val(pte)))
134 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
135 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
137 #define pmd_none(pmd) (!pmd_val(pmd))
138 #define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
139 #define pmd_present(pmd) (pmd_val(pmd) & _PAGE_TABLE)
140 #define pmd_clear(pmdp) ({ \
141 unsigned long *__ptr = pmdp->pmd; \
146 #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
149 #define pgd_none(pgd) (!pgd_val(pgd))
150 #define pgd_bad(pgd) ((pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE)
151 #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_TABLE)
152 #define pgd_clear(pgdp) ({ pgd_val(*pgdp) = 0; })
153 #define pgd_page(pgd) (mem_map + ((unsigned long)(__va(pgd_val(pgd)) - PAGE_OFFSET) >> PAGE_SHIFT))
155 #define pte_ERROR(e) \
156 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
157 #define pmd_ERROR(e) \
158 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
159 #define pgd_ERROR(e) \
160 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
164 * The following only work if pte_present() is true.
165 * Undefined behaviour if not..
167 static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); }
168 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
169 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
170 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
172 static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
173 static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
174 static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
175 static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_RONLY; return pte; }
176 static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
177 static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
178 static inline pte_t pte_mknocache(pte_t pte)
180 pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_pgtable_cachemode;
183 static inline pte_t pte_mkcache(pte_t pte)
185 pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode;
189 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
191 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
193 /* to find an entry in a page-table-directory */
194 static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address)
196 return mm->pgd + pgd_index(address);
199 #define swapper_pg_dir kernel_pg_dir
200 extern pgd_t kernel_pg_dir[128];
202 static inline pgd_t *pgd_offset_k(unsigned long address)
204 return kernel_pg_dir + (address >> PGDIR_SHIFT);
208 /* Find an entry in the second-level page table.. */
209 static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
211 return (pmd_t *)__pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
214 /* Find an entry in the third-level page table.. */
215 static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
217 return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
220 #define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
221 #define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
222 #define pte_unmap(pte) ((void)0)
223 #define pte_unmap_nested(pte) ((void)0)
226 * Allocate and free page tables. The xxx_kernel() versions are
227 * used to allocate a kernel page table - this turns on ASN bits
231 /* Prior to calling these routines, the page should have been flushed
232 * from both the cache and ATC, or the CPU might not notice that the
233 * cache setting for the page has been changed. -jskov
235 static inline void nocache_page(void *vaddr)
237 unsigned long addr = (unsigned long)vaddr;
239 if (CPU_IS_040_OR_060) {
244 dir = pgd_offset_k(addr);
245 pmdp = pmd_offset(dir, addr);
246 ptep = pte_offset_kernel(pmdp, addr);
247 *ptep = pte_mknocache(*ptep);
251 static inline void cache_page(void *vaddr)
253 unsigned long addr = (unsigned long)vaddr;
255 if (CPU_IS_040_OR_060) {
260 dir = pgd_offset_k(addr);
261 pmdp = pmd_offset(dir, addr);
262 ptep = pte_offset_kernel(pmdp, addr);
263 *ptep = pte_mkcache(*ptep);
267 #define PTE_FILE_MAX_BITS 28
269 static inline unsigned long pte_to_pgoff(pte_t pte)
274 static inline pte_t pgoff_to_pte(unsigned off)
276 pte_t pte = { (off << 4) + _PAGE_FILE };
280 /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
281 #define __swp_type(x) (((x).val >> 4) & 0xff)
282 #define __swp_offset(x) ((x).val >> 12)
283 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 12) })
284 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
285 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
287 #endif /* !__ASSEMBLY__ */
288 #endif /* _MOTOROLA_PGTABLE_H */