fd4c95ba1b6d1b70e8e86e592bf3c259da763169
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / mm / mmu.c
1 /*
2  * Based on arch/arm/mm/mmu.c
3  *
4  * Copyright (C) 1995-2005 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/libfdt.h>
25 #include <linux/mman.h>
26 #include <linux/nodemask.h>
27 #include <linux/memblock.h>
28 #include <linux/fs.h>
29 #include <linux/io.h>
30 #include <linux/slab.h>
31 #include <linux/stop_machine.h>
32
33 #include <asm/barrier.h>
34 #include <asm/cputype.h>
35 #include <asm/fixmap.h>
36 #include <asm/kernel-pgtable.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/sizes.h>
40 #include <asm/tlb.h>
41 #include <asm/memblock.h>
42 #include <asm/mmu_context.h>
43
44 #include "mm.h"
45
46 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
47
48 /*
49  * Empty_zero_page is a special page that is used for zero-initialized data
50  * and COW.
51  */
52 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
53 EXPORT_SYMBOL(empty_zero_page);
54
55 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
56                               unsigned long size, pgprot_t vma_prot)
57 {
58         if (!pfn_valid(pfn))
59                 return pgprot_noncached(vma_prot);
60         else if (file->f_flags & O_SYNC)
61                 return pgprot_writecombine(vma_prot);
62         return vma_prot;
63 }
64 EXPORT_SYMBOL(phys_mem_access_prot);
65
66 static phys_addr_t __init early_pgtable_alloc(void)
67 {
68         phys_addr_t phys;
69         void *ptr;
70
71         phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
72         BUG_ON(!phys);
73
74         /*
75          * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
76          * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
77          * any level of table.
78          */
79         ptr = pte_set_fixmap(phys);
80
81         memset(ptr, 0, PAGE_SIZE);
82
83         /*
84          * Implicit barriers also ensure the zeroed page is visible to the page
85          * table walker
86          */
87         pte_clear_fixmap();
88
89         return phys;
90 }
91
92 /*
93  * remap a PMD into pages
94  */
95 static void split_pmd(pmd_t *pmd, pte_t *pte)
96 {
97         unsigned long pfn = pmd_pfn(*pmd);
98         int i = 0;
99
100         do {
101                 /*
102                  * Need to have the least restrictive permissions available
103                  * permissions will be fixed up later
104                  */
105                 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
106                 pfn++;
107         } while (pte++, i++, i < PTRS_PER_PTE);
108 }
109
110 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
111                                   unsigned long end, unsigned long pfn,
112                                   pgprot_t prot,
113                                   phys_addr_t (*pgtable_alloc)(void))
114 {
115         pte_t *pte;
116
117         if (pmd_none(*pmd) || pmd_sect(*pmd)) {
118                 phys_addr_t pte_phys = pgtable_alloc();
119                 pte = pte_set_fixmap(pte_phys);
120                 if (pmd_sect(*pmd))
121                         split_pmd(pmd, pte);
122                 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
123                 flush_tlb_all();
124                 pte_clear_fixmap();
125         }
126         BUG_ON(pmd_bad(*pmd));
127
128         pte = pte_set_fixmap_offset(pmd, addr);
129         do {
130                 set_pte(pte, pfn_pte(pfn, prot));
131                 pfn++;
132         } while (pte++, addr += PAGE_SIZE, addr != end);
133
134         pte_clear_fixmap();
135 }
136
137 static void split_pud(pud_t *old_pud, pmd_t *pmd)
138 {
139         unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
140         pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
141         int i = 0;
142
143         do {
144                 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
145                 addr += PMD_SIZE;
146         } while (pmd++, i++, i < PTRS_PER_PMD);
147 }
148
149 static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
150                                   phys_addr_t phys, pgprot_t prot,
151                                   phys_addr_t (*pgtable_alloc)(void))
152 {
153         pmd_t *pmd;
154         unsigned long next;
155
156         /*
157          * Check for initial section mappings in the pgd/pud and remove them.
158          */
159         if (pud_none(*pud) || pud_sect(*pud)) {
160                 phys_addr_t pmd_phys = pgtable_alloc();
161                 pmd = pmd_set_fixmap(pmd_phys);
162                 if (pud_sect(*pud)) {
163                         /*
164                          * need to have the 1G of mappings continue to be
165                          * present
166                          */
167                         split_pud(pud, pmd);
168                 }
169                 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
170                 flush_tlb_all();
171                 pmd_clear_fixmap();
172         }
173         BUG_ON(pud_bad(*pud));
174
175         pmd = pmd_set_fixmap_offset(pud, addr);
176         do {
177                 next = pmd_addr_end(addr, end);
178                 /* try section mapping first */
179                 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
180                         pmd_t old_pmd =*pmd;
181                         set_pmd(pmd, __pmd(phys |
182                                            pgprot_val(mk_sect_prot(prot))));
183                         /*
184                          * Check for previous table entries created during
185                          * boot (__create_page_tables) and flush them.
186                          */
187                         if (!pmd_none(old_pmd)) {
188                                 flush_tlb_all();
189                                 if (pmd_table(old_pmd)) {
190                                         phys_addr_t table = pmd_page_paddr(old_pmd);
191                                         if (!WARN_ON_ONCE(slab_is_available()))
192                                                 memblock_free(table, PAGE_SIZE);
193                                 }
194                         }
195                 } else {
196                         alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
197                                        prot, pgtable_alloc);
198                 }
199                 phys += next - addr;
200         } while (pmd++, addr = next, addr != end);
201
202         pmd_clear_fixmap();
203 }
204
205 static inline bool use_1G_block(unsigned long addr, unsigned long next,
206                         unsigned long phys)
207 {
208         if (PAGE_SHIFT != 12)
209                 return false;
210
211         if (((addr | next | phys) & ~PUD_MASK) != 0)
212                 return false;
213
214         return true;
215 }
216
217 static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
218                                   phys_addr_t phys, pgprot_t prot,
219                                   phys_addr_t (*pgtable_alloc)(void))
220 {
221         pud_t *pud;
222         unsigned long next;
223
224         if (pgd_none(*pgd)) {
225                 phys_addr_t pud_phys = pgtable_alloc();
226                 __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
227         }
228         BUG_ON(pgd_bad(*pgd));
229
230         pud = pud_set_fixmap_offset(pgd, addr);
231         do {
232                 next = pud_addr_end(addr, end);
233
234                 /*
235                  * For 4K granule only, attempt to put down a 1GB block
236                  */
237                 if (use_1G_block(addr, next, phys)) {
238                         pud_t old_pud = *pud;
239                         set_pud(pud, __pud(phys |
240                                            pgprot_val(mk_sect_prot(prot))));
241
242                         /*
243                          * If we have an old value for a pud, it will
244                          * be pointing to a pmd table that we no longer
245                          * need (from swapper_pg_dir).
246                          *
247                          * Look up the old pmd table and free it.
248                          */
249                         if (!pud_none(old_pud)) {
250                                 flush_tlb_all();
251                                 if (pud_table(old_pud)) {
252                                         phys_addr_t table = pud_page_paddr(old_pud);
253                                         if (!WARN_ON_ONCE(slab_is_available()))
254                                                 memblock_free(table, PAGE_SIZE);
255                                 }
256                         }
257                 } else {
258                         alloc_init_pmd(pud, addr, next, phys, prot,
259                                        pgtable_alloc);
260                 }
261                 phys += next - addr;
262         } while (pud++, addr = next, addr != end);
263
264         pud_clear_fixmap();
265 }
266
267 /*
268  * Create the page directory entries and any necessary page tables for the
269  * mapping specified by 'md'.
270  */
271 static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
272                                     phys_addr_t size, pgprot_t prot,
273                                     phys_addr_t (*pgtable_alloc)(void))
274 {
275         unsigned long addr, length, end, next;
276
277         addr = virt & PAGE_MASK;
278         length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
279
280         end = addr + length;
281         do {
282                 next = pgd_addr_end(addr, end);
283                 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
284                 phys += next - addr;
285         } while (pgd++, addr = next, addr != end);
286 }
287
288 static phys_addr_t late_pgtable_alloc(void)
289 {
290         void *ptr = (void *)__get_free_page(PGALLOC_GFP);
291         BUG_ON(!ptr);
292
293         /* Ensure the zeroed page is visible to the page table walker */
294         dsb(ishst);
295         return __pa(ptr);
296 }
297
298 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
299                                  unsigned long virt, phys_addr_t size,
300                                  pgprot_t prot,
301                                  phys_addr_t (*alloc)(void))
302 {
303         init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
304 }
305
306 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
307                                   phys_addr_t size, pgprot_t prot)
308 {
309         if (virt < VMALLOC_START) {
310                 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
311                         &phys, virt);
312                 return;
313         }
314         __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
315                              early_pgtable_alloc);
316 }
317
318 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
319                                unsigned long virt, phys_addr_t size,
320                                pgprot_t prot)
321 {
322         __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
323                              late_pgtable_alloc);
324 }
325
326 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
327                                   phys_addr_t size, pgprot_t prot)
328 {
329         if (virt < VMALLOC_START) {
330                 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
331                         &phys, virt);
332                 return;
333         }
334
335         __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
336                              late_pgtable_alloc);
337 }
338
339 #ifdef CONFIG_DEBUG_RODATA
340 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
341 {
342         /*
343          * Set up the executable regions using the existing section mappings
344          * for now. This will get more fine grained later once all memory
345          * is mapped
346          */
347         unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
348         unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
349
350         if (end < kernel_x_start) {
351                 create_mapping(start, __phys_to_virt(start),
352                         end - start, PAGE_KERNEL);
353         } else if (start >= kernel_x_end) {
354                 create_mapping(start, __phys_to_virt(start),
355                         end - start, PAGE_KERNEL);
356         } else {
357                 if (start < kernel_x_start)
358                         create_mapping(start, __phys_to_virt(start),
359                                 kernel_x_start - start,
360                                 PAGE_KERNEL);
361                 create_mapping(kernel_x_start,
362                                 __phys_to_virt(kernel_x_start),
363                                 kernel_x_end - kernel_x_start,
364                                 PAGE_KERNEL_EXEC);
365                 if (kernel_x_end < end)
366                         create_mapping(kernel_x_end,
367                                 __phys_to_virt(kernel_x_end),
368                                 end - kernel_x_end,
369                                 PAGE_KERNEL);
370         }
371
372 }
373 #else
374 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
375 {
376         create_mapping(start, __phys_to_virt(start), end - start,
377                         PAGE_KERNEL_EXEC);
378 }
379 #endif
380
381 static void __init map_mem(void)
382 {
383         struct memblock_region *reg;
384
385         /* map all the memory banks */
386         for_each_memblock(memory, reg) {
387                 phys_addr_t start = reg->base;
388                 phys_addr_t end = start + reg->size;
389
390                 if (start >= end)
391                         break;
392
393                 __map_memblock(start, end);
394         }
395 }
396
397 static void __init fixup_executable(void)
398 {
399 #ifdef CONFIG_DEBUG_RODATA
400         /* now that we are actually fully mapped, make the start/end more fine grained */
401         if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
402                 unsigned long aligned_start = round_down(__pa(_stext),
403                                                          SWAPPER_BLOCK_SIZE);
404
405                 create_mapping(aligned_start, __phys_to_virt(aligned_start),
406                                 __pa(_stext) - aligned_start,
407                                 PAGE_KERNEL);
408         }
409
410         if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
411                 unsigned long aligned_end = round_up(__pa(__init_end),
412                                                           SWAPPER_BLOCK_SIZE);
413                 create_mapping(__pa(__init_end), (unsigned long)__init_end,
414                                 aligned_end - __pa(__init_end),
415                                 PAGE_KERNEL);
416         }
417 #endif
418 }
419
420 #ifdef CONFIG_DEBUG_RODATA
421 void mark_rodata_ro(void)
422 {
423         create_mapping_late(__pa(_stext), (unsigned long)_stext,
424                                 (unsigned long)_etext - (unsigned long)_stext,
425                                 PAGE_KERNEL_ROX);
426
427 }
428 #endif
429
430 void fixup_init(void)
431 {
432         create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
433                         (unsigned long)__init_end - (unsigned long)__init_begin,
434                         PAGE_KERNEL);
435 }
436
437 /*
438  * paging_init() sets up the page tables, initialises the zone memory
439  * maps and sets up the zero page.
440  */
441 void __init paging_init(void)
442 {
443         map_mem();
444         fixup_executable();
445
446         bootmem_init();
447 }
448
449 /*
450  * Check whether a kernel address is valid (derived from arch/x86/).
451  */
452 int kern_addr_valid(unsigned long addr)
453 {
454         pgd_t *pgd;
455         pud_t *pud;
456         pmd_t *pmd;
457         pte_t *pte;
458
459         if ((((long)addr) >> VA_BITS) != -1UL)
460                 return 0;
461
462         pgd = pgd_offset_k(addr);
463         if (pgd_none(*pgd))
464                 return 0;
465
466         pud = pud_offset(pgd, addr);
467         if (pud_none(*pud))
468                 return 0;
469
470         if (pud_sect(*pud))
471                 return pfn_valid(pud_pfn(*pud));
472
473         pmd = pmd_offset(pud, addr);
474         if (pmd_none(*pmd))
475                 return 0;
476
477         if (pmd_sect(*pmd))
478                 return pfn_valid(pmd_pfn(*pmd));
479
480         pte = pte_offset_kernel(pmd, addr);
481         if (pte_none(*pte))
482                 return 0;
483
484         return pfn_valid(pte_pfn(*pte));
485 }
486 #ifdef CONFIG_SPARSEMEM_VMEMMAP
487 #if !ARM64_SWAPPER_USES_SECTION_MAPS
488 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
489 {
490         return vmemmap_populate_basepages(start, end, node);
491 }
492 #else   /* !ARM64_SWAPPER_USES_SECTION_MAPS */
493 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
494 {
495         unsigned long addr = start;
496         unsigned long next;
497         pgd_t *pgd;
498         pud_t *pud;
499         pmd_t *pmd;
500
501         do {
502                 next = pmd_addr_end(addr, end);
503
504                 pgd = vmemmap_pgd_populate(addr, node);
505                 if (!pgd)
506                         return -ENOMEM;
507
508                 pud = vmemmap_pud_populate(pgd, addr, node);
509                 if (!pud)
510                         return -ENOMEM;
511
512                 pmd = pmd_offset(pud, addr);
513                 if (pmd_none(*pmd)) {
514                         void *p = NULL;
515
516                         p = vmemmap_alloc_block_buf(PMD_SIZE, node);
517                         if (!p)
518                                 return -ENOMEM;
519
520                         set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
521                 } else
522                         vmemmap_verify((pte_t *)pmd, node, addr, next);
523         } while (addr = next, addr != end);
524
525         return 0;
526 }
527 #endif  /* CONFIG_ARM64_64K_PAGES */
528 void vmemmap_free(unsigned long start, unsigned long end)
529 {
530 }
531 #endif  /* CONFIG_SPARSEMEM_VMEMMAP */
532
533 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
534 #if CONFIG_PGTABLE_LEVELS > 2
535 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
536 #endif
537 #if CONFIG_PGTABLE_LEVELS > 3
538 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
539 #endif
540
541 static inline pud_t * fixmap_pud(unsigned long addr)
542 {
543         pgd_t *pgd = pgd_offset_k(addr);
544
545         BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
546
547         return pud_offset(pgd, addr);
548 }
549
550 static inline pmd_t * fixmap_pmd(unsigned long addr)
551 {
552         pud_t *pud = fixmap_pud(addr);
553
554         BUG_ON(pud_none(*pud) || pud_bad(*pud));
555
556         return pmd_offset(pud, addr);
557 }
558
559 static inline pte_t * fixmap_pte(unsigned long addr)
560 {
561         pmd_t *pmd = fixmap_pmd(addr);
562
563         BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
564
565         return pte_offset_kernel(pmd, addr);
566 }
567
568 void __init early_fixmap_init(void)
569 {
570         pgd_t *pgd;
571         pud_t *pud;
572         pmd_t *pmd;
573         unsigned long addr = FIXADDR_START;
574
575         pgd = pgd_offset_k(addr);
576         pgd_populate(&init_mm, pgd, bm_pud);
577         pud = pud_offset(pgd, addr);
578         pud_populate(&init_mm, pud, bm_pmd);
579         pmd = pmd_offset(pud, addr);
580         pmd_populate_kernel(&init_mm, pmd, bm_pte);
581
582         /*
583          * The boot-ioremap range spans multiple pmds, for which
584          * we are not preparted:
585          */
586         BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
587                      != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
588
589         if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
590              || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
591                 WARN_ON(1);
592                 pr_warn("pmd %p != %p, %p\n",
593                         pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
594                         fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
595                 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
596                         fix_to_virt(FIX_BTMAP_BEGIN));
597                 pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
598                         fix_to_virt(FIX_BTMAP_END));
599
600                 pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
601                 pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
602         }
603 }
604
605 void __set_fixmap(enum fixed_addresses idx,
606                                phys_addr_t phys, pgprot_t flags)
607 {
608         unsigned long addr = __fix_to_virt(idx);
609         pte_t *pte;
610
611         BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
612
613         pte = fixmap_pte(addr);
614
615         if (pgprot_val(flags)) {
616                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
617         } else {
618                 pte_clear(&init_mm, addr, pte);
619                 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
620         }
621 }
622
623 void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
624 {
625         const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
626         pgprot_t prot = PAGE_KERNEL_RO;
627         int size, offset;
628         void *dt_virt;
629
630         /*
631          * Check whether the physical FDT address is set and meets the minimum
632          * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
633          * at least 8 bytes so that we can always access the magic and size
634          * fields of the FDT header after mapping the first chunk, double check
635          * here if that is indeed the case.
636          */
637         BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
638         if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
639                 return NULL;
640
641         /*
642          * Make sure that the FDT region can be mapped without the need to
643          * allocate additional translation table pages, so that it is safe
644          * to call create_mapping() this early.
645          *
646          * On 64k pages, the FDT will be mapped using PTEs, so we need to
647          * be in the same PMD as the rest of the fixmap.
648          * On 4k pages, we'll use section mappings for the FDT so we only
649          * have to be in the same PUD.
650          */
651         BUILD_BUG_ON(dt_virt_base % SZ_2M);
652
653         BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
654                      __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
655
656         offset = dt_phys % SWAPPER_BLOCK_SIZE;
657         dt_virt = (void *)dt_virt_base + offset;
658
659         /* map the first chunk so we can read the size from the header */
660         create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
661                        SWAPPER_BLOCK_SIZE, prot);
662
663         if (fdt_magic(dt_virt) != FDT_MAGIC)
664                 return NULL;
665
666         size = fdt_totalsize(dt_virt);
667         if (size > MAX_FDT_SIZE)
668                 return NULL;
669
670         if (offset + size > SWAPPER_BLOCK_SIZE)
671                 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
672                                round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
673
674         memblock_reserve(dt_phys, size);
675
676         return dt_virt;
677 }