2 * This file contains kasan initialization code for ARM64.
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) "kasan: " fmt
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/memblock.h>
17 #include <linux/start_kernel.h>
19 #include <asm/mmu_context.h>
20 #include <asm/kernel-pgtable.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pgtable.h>
24 #include <asm/sections.h>
25 #include <asm/tlbflush.h>
27 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
29 static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
36 pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
38 pte = pte_offset_kimg(pmd, addr);
40 next = addr + PAGE_SIZE;
41 set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
43 } while (pte++, addr = next, addr != end && pte_none(*pte));
46 static void __init kasan_early_pmd_populate(pud_t *pud,
54 pud_populate(&init_mm, pud, kasan_zero_pmd);
56 pmd = pmd_offset_kimg(pud, addr);
58 next = pmd_addr_end(addr, end);
59 kasan_early_pte_populate(pmd, addr, next);
60 } while (pmd++, addr = next, addr != end && pmd_none(*pmd));
63 static void __init kasan_early_pud_populate(pgd_t *pgd,
71 pgd_populate(&init_mm, pgd, kasan_zero_pud);
73 pud = pud_offset_kimg(pgd, addr);
75 next = pud_addr_end(addr, end);
76 kasan_early_pmd_populate(pud, addr, next);
77 } while (pud++, addr = next, addr != end && pud_none(*pud));
80 static void __init kasan_map_early_shadow(void)
82 unsigned long addr = KASAN_SHADOW_START;
83 unsigned long end = KASAN_SHADOW_END;
87 pgd = pgd_offset_k(addr);
89 next = pgd_addr_end(addr, end);
90 kasan_early_pud_populate(pgd, addr, next);
91 } while (pgd++, addr = next, addr != end);
94 asmlinkage void __init kasan_early_init(void)
96 BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
97 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
98 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
99 kasan_map_early_shadow();
103 * Copy the current shadow region into a new pgdir.
105 void __init kasan_copy_shadow(pgd_t *pgdir)
107 pgd_t *pgd, *pgd_new, *pgd_end;
109 pgd = pgd_offset_k(KASAN_SHADOW_START);
110 pgd_end = pgd_offset_k(KASAN_SHADOW_END);
111 pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
113 set_pgd(pgd_new, *pgd);
114 } while (pgd++, pgd_new++, pgd != pgd_end);
117 static void __init clear_pgds(unsigned long start,
121 * Remove references to kasan page tables from
122 * swapper_pg_dir. pgd_clear() can't be used
123 * here because it's nop on 2,3-level pagetable setups
125 for (; start < end; start += PGDIR_SIZE)
126 set_pgd(pgd_offset_k(start), __pgd(0));
129 void __init kasan_init(void)
131 u64 kimg_shadow_start, kimg_shadow_end;
132 struct memblock_region *reg;
135 kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
136 kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
139 * We are going to perform proper setup of shadow memory.
140 * At first we should unmap early shadow (clear_pgds() call bellow).
141 * However, instrumented code couldn't execute without shadow memory.
142 * tmp_pg_dir used to keep early shadow mapped until full shadow
143 * setup will be finished.
145 memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
147 cpu_replace_ttbr1(tmp_pg_dir);
149 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
151 vmemmap_populate(kimg_shadow_start, kimg_shadow_end, NUMA_NO_NODE);
154 * vmemmap_populate() has populated the shadow region that covers the
155 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
156 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
157 * kasan_populate_zero_shadow() from replacing the PMD block mappings
158 * with PMD table mappings at the edges of the shadow region for the
161 if (ARM64_SWAPPER_USES_SECTION_MAPS)
162 kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
164 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
165 kasan_mem_to_shadow((void *)MODULES_VADDR));
166 kasan_populate_zero_shadow((void *)kimg_shadow_end,
167 kasan_mem_to_shadow((void *)PAGE_OFFSET));
169 for_each_memblock(memory, reg) {
170 void *start = (void *)__phys_to_virt(reg->base);
171 void *end = (void *)__phys_to_virt(reg->base + reg->size);
177 * end + 1 here is intentional. We check several shadow bytes in
178 * advance to slightly speed up fastpath. In some rare cases
179 * we could cross boundary of mapped shadow, so we just map
182 vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
183 (unsigned long)kasan_mem_to_shadow(end) + 1,
184 pfn_to_nid(virt_to_pfn(start)));
188 * KAsan may reuse the contents of kasan_zero_pte directly, so we
189 * should make sure that it maps the zero page read-only.
191 for (i = 0; i < PTRS_PER_PTE; i++)
192 set_pte(&kasan_zero_pte[i],
193 pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
195 memset(kasan_zero_page, 0, PAGE_SIZE);
196 cpu_replace_ttbr1(swapper_pg_dir);
198 /* At this point kasan is fully initialized. Enable error messages */
199 init_task.kasan_depth = 0;
200 pr_info("KernelAddressSanitizer initialized\n");