2 * Based on arch/arm/mm/mmu.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/libfdt.h>
25 #include <linux/mman.h>
26 #include <linux/nodemask.h>
27 #include <linux/memblock.h>
30 #include <linux/slab.h>
31 #include <linux/stop_machine.h>
33 #include <asm/barrier.h>
34 #include <asm/cputype.h>
35 #include <asm/fixmap.h>
36 #include <asm/kernel-pgtable.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/sizes.h>
41 #include <asm/memblock.h>
42 #include <asm/mmu_context.h>
46 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
49 * Empty_zero_page is a special page that is used for zero-initialized data
52 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
53 EXPORT_SYMBOL(empty_zero_page);
55 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
56 unsigned long size, pgprot_t vma_prot)
59 return pgprot_noncached(vma_prot);
60 else if (file->f_flags & O_SYNC)
61 return pgprot_writecombine(vma_prot);
64 EXPORT_SYMBOL(phys_mem_access_prot);
66 static void __init *early_pgtable_alloc(void)
71 phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
74 memset(ptr, 0, PAGE_SIZE);
76 /* Ensure the zeroed page is visible to the page table walker */
82 * remap a PMD into pages
84 static void split_pmd(pmd_t *pmd, pte_t *pte)
86 unsigned long pfn = pmd_pfn(*pmd);
91 * Need to have the least restrictive permissions available
92 * permissions will be fixed up later
94 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
96 } while (pte++, i++, i < PTRS_PER_PTE);
99 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
100 unsigned long end, unsigned long pfn,
102 void *(*pgtable_alloc)(void))
106 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
107 pte = pgtable_alloc();
110 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
113 BUG_ON(pmd_bad(*pmd));
115 pte = pte_offset_kernel(pmd, addr);
117 set_pte(pte, pfn_pte(pfn, prot));
119 } while (pte++, addr += PAGE_SIZE, addr != end);
122 static void split_pud(pud_t *old_pud, pmd_t *pmd)
124 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
125 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
129 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
131 } while (pmd++, i++, i < PTRS_PER_PMD);
134 static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
135 unsigned long addr, unsigned long end,
136 phys_addr_t phys, pgprot_t prot,
137 void *(*pgtable_alloc)(void))
143 * Check for initial section mappings in the pgd/pud and remove them.
145 if (pud_none(*pud) || pud_sect(*pud)) {
146 pmd = pgtable_alloc();
147 if (pud_sect(*pud)) {
149 * need to have the 1G of mappings continue to be
154 pud_populate(mm, pud, pmd);
157 BUG_ON(pud_bad(*pud));
159 pmd = pmd_offset(pud, addr);
161 next = pmd_addr_end(addr, end);
162 /* try section mapping first */
163 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
165 set_pmd(pmd, __pmd(phys |
166 pgprot_val(mk_sect_prot(prot))));
168 * Check for previous table entries created during
169 * boot (__create_page_tables) and flush them.
171 if (!pmd_none(old_pmd)) {
173 if (pmd_table(old_pmd)) {
174 phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
175 if (!WARN_ON_ONCE(slab_is_available()))
176 memblock_free(table, PAGE_SIZE);
180 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
181 prot, pgtable_alloc);
184 } while (pmd++, addr = next, addr != end);
187 static inline bool use_1G_block(unsigned long addr, unsigned long next,
190 if (PAGE_SHIFT != 12)
193 if (((addr | next | phys) & ~PUD_MASK) != 0)
199 static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
200 unsigned long addr, unsigned long end,
201 phys_addr_t phys, pgprot_t prot,
202 void *(*pgtable_alloc)(void))
207 if (pgd_none(*pgd)) {
208 pud = pgtable_alloc();
209 pgd_populate(mm, pgd, pud);
211 BUG_ON(pgd_bad(*pgd));
213 pud = pud_offset(pgd, addr);
215 next = pud_addr_end(addr, end);
218 * For 4K granule only, attempt to put down a 1GB block
220 if (use_1G_block(addr, next, phys)) {
221 pud_t old_pud = *pud;
222 set_pud(pud, __pud(phys |
223 pgprot_val(mk_sect_prot(prot))));
226 * If we have an old value for a pud, it will
227 * be pointing to a pmd table that we no longer
228 * need (from swapper_pg_dir).
230 * Look up the old pmd table and free it.
232 if (!pud_none(old_pud)) {
234 if (pud_table(old_pud)) {
235 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
236 if (!WARN_ON_ONCE(slab_is_available()))
237 memblock_free(table, PAGE_SIZE);
241 alloc_init_pmd(mm, pud, addr, next, phys, prot,
245 } while (pud++, addr = next, addr != end);
249 * Create the page directory entries and any necessary page tables for the
250 * mapping specified by 'md'.
252 static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
253 phys_addr_t phys, unsigned long virt,
254 phys_addr_t size, pgprot_t prot,
255 void *(*pgtable_alloc)(void))
257 unsigned long addr, length, end, next;
260 * If the virtual and physical address don't have the same offset
261 * within a page, we cannot map the region as the caller expects.
263 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
267 addr = virt & PAGE_MASK;
268 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
272 next = pgd_addr_end(addr, end);
273 alloc_init_pud(mm, pgd, addr, next, phys, prot, pgtable_alloc);
275 } while (pgd++, addr = next, addr != end);
278 static void *late_pgtable_alloc(void)
280 void *ptr = (void *)__get_free_page(PGALLOC_GFP);
283 /* Ensure the zeroed page is visible to the page table walker */
288 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
289 phys_addr_t size, pgprot_t prot)
291 if (virt < VMALLOC_START) {
292 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
296 __create_mapping(&init_mm, pgd_offset_k(virt), phys, virt,
297 size, prot, early_pgtable_alloc);
300 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
301 unsigned long virt, phys_addr_t size,
304 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
308 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
309 phys_addr_t size, pgprot_t prot)
311 if (virt < VMALLOC_START) {
312 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
317 return __create_mapping(&init_mm, pgd_offset_k(virt),
318 phys, virt, size, prot, late_pgtable_alloc);
321 #ifdef CONFIG_DEBUG_RODATA
322 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
325 * Set up the executable regions using the existing section mappings
326 * for now. This will get more fine grained later once all memory
329 unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
330 unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
332 if (end < kernel_x_start) {
333 create_mapping(start, __phys_to_virt(start),
334 end - start, PAGE_KERNEL);
335 } else if (start >= kernel_x_end) {
336 create_mapping(start, __phys_to_virt(start),
337 end - start, PAGE_KERNEL);
339 if (start < kernel_x_start)
340 create_mapping(start, __phys_to_virt(start),
341 kernel_x_start - start,
343 create_mapping(kernel_x_start,
344 __phys_to_virt(kernel_x_start),
345 kernel_x_end - kernel_x_start,
347 if (kernel_x_end < end)
348 create_mapping(kernel_x_end,
349 __phys_to_virt(kernel_x_end),
356 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
358 create_mapping(start, __phys_to_virt(start), end - start,
363 static void __init map_mem(void)
365 struct memblock_region *reg;
369 * Temporarily limit the memblock range. We need to do this as
370 * create_mapping requires puds, pmds and ptes to be allocated from
371 * memory addressable from the initial direct kernel mapping.
373 * The initial direct kernel mapping, located at swapper_pg_dir, gives
374 * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
375 * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
376 * per Documentation/arm64/booting.txt).
378 limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
379 memblock_set_current_limit(limit);
381 /* map all the memory banks */
382 for_each_memblock(memory, reg) {
383 phys_addr_t start = reg->base;
384 phys_addr_t end = start + reg->size;
389 if (ARM64_SWAPPER_USES_SECTION_MAPS) {
391 * For the first memory bank align the start address and
392 * current memblock limit to prevent create_mapping() from
393 * allocating pte page tables from unmapped memory. With
394 * the section maps, if the first block doesn't end on section
395 * size boundary, create_mapping() will try to allocate a pte
396 * page, which may be returned from an unmapped area.
397 * When section maps are not used, the pte page table for the
398 * current limit is already present in swapper_pg_dir.
401 start = ALIGN(start, SECTION_SIZE);
403 limit = end & SECTION_MASK;
404 memblock_set_current_limit(limit);
407 __map_memblock(start, end);
410 /* Limit no longer required. */
411 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
414 static void __init fixup_executable(void)
416 #ifdef CONFIG_DEBUG_RODATA
417 /* now that we are actually fully mapped, make the start/end more fine grained */
418 if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
419 unsigned long aligned_start = round_down(__pa(_stext),
422 create_mapping(aligned_start, __phys_to_virt(aligned_start),
423 __pa(_stext) - aligned_start,
427 if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
428 unsigned long aligned_end = round_up(__pa(__init_end),
430 create_mapping(__pa(__init_end), (unsigned long)__init_end,
431 aligned_end - __pa(__init_end),
437 #ifdef CONFIG_DEBUG_RODATA
438 void mark_rodata_ro(void)
440 create_mapping_late(__pa(_stext), (unsigned long)_stext,
441 (unsigned long)_etext - (unsigned long)_stext,
447 void fixup_init(void)
449 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
450 (unsigned long)__init_end - (unsigned long)__init_begin,
455 * paging_init() sets up the page tables, initialises the zone memory
456 * maps and sets up the zero page.
458 void __init paging_init(void)
466 * TTBR0 is only used for the identity mapping at this stage. Make it
467 * point to zero page to avoid speculatively fetching new entries.
469 cpu_set_reserved_ttbr0();
470 local_flush_tlb_all();
471 cpu_set_default_tcr_t0sz();
475 * Check whether a kernel address is valid (derived from arch/x86/).
477 int kern_addr_valid(unsigned long addr)
484 if ((((long)addr) >> VA_BITS) != -1UL)
487 pgd = pgd_offset_k(addr);
491 pud = pud_offset(pgd, addr);
496 return pfn_valid(pud_pfn(*pud));
498 pmd = pmd_offset(pud, addr);
503 return pfn_valid(pmd_pfn(*pmd));
505 pte = pte_offset_kernel(pmd, addr);
509 return pfn_valid(pte_pfn(*pte));
511 #ifdef CONFIG_SPARSEMEM_VMEMMAP
512 #if !ARM64_SWAPPER_USES_SECTION_MAPS
513 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
515 return vmemmap_populate_basepages(start, end, node);
517 #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
518 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
520 unsigned long addr = start;
527 next = pmd_addr_end(addr, end);
529 pgd = vmemmap_pgd_populate(addr, node);
533 pud = vmemmap_pud_populate(pgd, addr, node);
537 pmd = pmd_offset(pud, addr);
538 if (pmd_none(*pmd)) {
541 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
545 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
547 vmemmap_verify((pte_t *)pmd, node, addr, next);
548 } while (addr = next, addr != end);
552 #endif /* CONFIG_ARM64_64K_PAGES */
553 void vmemmap_free(unsigned long start, unsigned long end)
556 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
558 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
559 #if CONFIG_PGTABLE_LEVELS > 2
560 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
562 #if CONFIG_PGTABLE_LEVELS > 3
563 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
566 static inline pud_t * fixmap_pud(unsigned long addr)
568 pgd_t *pgd = pgd_offset_k(addr);
570 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
572 return pud_offset(pgd, addr);
575 static inline pmd_t * fixmap_pmd(unsigned long addr)
577 pud_t *pud = fixmap_pud(addr);
579 BUG_ON(pud_none(*pud) || pud_bad(*pud));
581 return pmd_offset(pud, addr);
584 static inline pte_t * fixmap_pte(unsigned long addr)
586 pmd_t *pmd = fixmap_pmd(addr);
588 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
590 return pte_offset_kernel(pmd, addr);
593 void __init early_fixmap_init(void)
598 unsigned long addr = FIXADDR_START;
600 pgd = pgd_offset_k(addr);
601 pgd_populate(&init_mm, pgd, bm_pud);
602 pud = pud_offset(pgd, addr);
603 pud_populate(&init_mm, pud, bm_pmd);
604 pmd = pmd_offset(pud, addr);
605 pmd_populate_kernel(&init_mm, pmd, bm_pte);
608 * The boot-ioremap range spans multiple pmds, for which
609 * we are not preparted:
611 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
612 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
614 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
615 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
617 pr_warn("pmd %p != %p, %p\n",
618 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
619 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
620 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
621 fix_to_virt(FIX_BTMAP_BEGIN));
622 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
623 fix_to_virt(FIX_BTMAP_END));
625 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
626 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
630 void __set_fixmap(enum fixed_addresses idx,
631 phys_addr_t phys, pgprot_t flags)
633 unsigned long addr = __fix_to_virt(idx);
636 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
638 pte = fixmap_pte(addr);
640 if (pgprot_val(flags)) {
641 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
643 pte_clear(&init_mm, addr, pte);
644 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
648 void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
650 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
651 pgprot_t prot = PAGE_KERNEL_RO;
656 * Check whether the physical FDT address is set and meets the minimum
657 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
658 * at least 8 bytes so that we can always access the size field of the
659 * FDT header after mapping the first chunk, double check here if that
660 * is indeed the case.
662 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
663 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
667 * Make sure that the FDT region can be mapped without the need to
668 * allocate additional translation table pages, so that it is safe
669 * to call create_mapping() this early.
671 * On 64k pages, the FDT will be mapped using PTEs, so we need to
672 * be in the same PMD as the rest of the fixmap.
673 * On 4k pages, we'll use section mappings for the FDT so we only
674 * have to be in the same PUD.
676 BUILD_BUG_ON(dt_virt_base % SZ_2M);
678 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
679 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
681 offset = dt_phys % SWAPPER_BLOCK_SIZE;
682 dt_virt = (void *)dt_virt_base + offset;
684 /* map the first chunk so we can read the size from the header */
685 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
686 SWAPPER_BLOCK_SIZE, prot);
688 if (fdt_check_header(dt_virt) != 0)
691 size = fdt_totalsize(dt_virt);
692 if (size > MAX_FDT_SIZE)
695 if (offset + size > SWAPPER_BLOCK_SIZE)
696 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
697 round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
699 memblock_reserve(dt_phys, size);