Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / arch / arm / mm / mmu.c
1 /*
2  *  linux/arch/arm/mm/mmu.c
3  *
4  *  Copyright (C) 1995-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/mman.h>
15 #include <linux/nodemask.h>
16 #include <linux/memblock.h>
17 #include <linux/fs.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sizes.h>
20
21 #include <asm/cp15.h>
22 #include <asm/cputype.h>
23 #include <asm/sections.h>
24 #include <asm/cachetype.h>
25 #include <asm/setup.h>
26 #include <asm/smp_plat.h>
27 #include <asm/tlb.h>
28 #include <asm/highmem.h>
29 #include <asm/system_info.h>
30 #include <asm/traps.h>
31
32 #include <asm/mach/arch.h>
33 #include <asm/mach/map.h>
34 #include <asm/mach/pci.h>
35
36 #include "mm.h"
37 #include "tcm.h"
38
39 /*
40  * empty_zero_page is a special page that is used for
41  * zero-initialized data and COW.
42  */
43 struct page *empty_zero_page;
44 EXPORT_SYMBOL(empty_zero_page);
45
46 /*
47  * The pmd table for the upper-most set of pages.
48  */
49 pmd_t *top_pmd;
50
51 #define CPOLICY_UNCACHED        0
52 #define CPOLICY_BUFFERED        1
53 #define CPOLICY_WRITETHROUGH    2
54 #define CPOLICY_WRITEBACK       3
55 #define CPOLICY_WRITEALLOC      4
56
57 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
58 static unsigned int ecc_mask __initdata = 0;
59 pgprot_t pgprot_user;
60 pgprot_t pgprot_kernel;
61 pgprot_t pgprot_hyp_device;
62 pgprot_t pgprot_s2;
63 pgprot_t pgprot_s2_device;
64
65 EXPORT_SYMBOL(pgprot_user);
66 EXPORT_SYMBOL(pgprot_kernel);
67
68 struct cachepolicy {
69         const char      policy[16];
70         unsigned int    cr_mask;
71         pmdval_t        pmd;
72         pteval_t        pte;
73         pteval_t        pte_s2;
74 };
75
76 #ifdef CONFIG_ARM_LPAE
77 #define s2_policy(policy)       policy
78 #else
79 #define s2_policy(policy)       0
80 #endif
81
82 static struct cachepolicy cache_policies[] __initdata = {
83         {
84                 .policy         = "uncached",
85                 .cr_mask        = CR_W|CR_C,
86                 .pmd            = PMD_SECT_UNCACHED,
87                 .pte            = L_PTE_MT_UNCACHED,
88                 .pte_s2         = s2_policy(L_PTE_S2_MT_UNCACHED),
89         }, {
90                 .policy         = "buffered",
91                 .cr_mask        = CR_C,
92                 .pmd            = PMD_SECT_BUFFERED,
93                 .pte            = L_PTE_MT_BUFFERABLE,
94                 .pte_s2         = s2_policy(L_PTE_S2_MT_UNCACHED),
95         }, {
96                 .policy         = "writethrough",
97                 .cr_mask        = 0,
98                 .pmd            = PMD_SECT_WT,
99                 .pte            = L_PTE_MT_WRITETHROUGH,
100                 .pte_s2         = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
101         }, {
102                 .policy         = "writeback",
103                 .cr_mask        = 0,
104                 .pmd            = PMD_SECT_WB,
105                 .pte            = L_PTE_MT_WRITEBACK,
106                 .pte_s2         = s2_policy(L_PTE_S2_MT_WRITEBACK),
107         }, {
108                 .policy         = "writealloc",
109                 .cr_mask        = 0,
110                 .pmd            = PMD_SECT_WBWA,
111                 .pte            = L_PTE_MT_WRITEALLOC,
112                 .pte_s2         = s2_policy(L_PTE_S2_MT_WRITEBACK),
113         }
114 };
115
116 #ifdef CONFIG_CPU_CP15
117 /*
118  * These are useful for identifying cache coherency
119  * problems by allowing the cache or the cache and
120  * writebuffer to be turned off.  (Note: the write
121  * buffer should not be on and the cache off).
122  */
123 static int __init early_cachepolicy(char *p)
124 {
125         int i;
126
127         for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
128                 int len = strlen(cache_policies[i].policy);
129
130                 if (memcmp(p, cache_policies[i].policy, len) == 0) {
131                         cachepolicy = i;
132                         cr_alignment &= ~cache_policies[i].cr_mask;
133                         cr_no_alignment &= ~cache_policies[i].cr_mask;
134                         break;
135                 }
136         }
137         if (i == ARRAY_SIZE(cache_policies))
138                 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
139         /*
140          * This restriction is partly to do with the way we boot; it is
141          * unpredictable to have memory mapped using two different sets of
142          * memory attributes (shared, type, and cache attribs).  We can not
143          * change these attributes once the initial assembly has setup the
144          * page tables.
145          */
146         if (cpu_architecture() >= CPU_ARCH_ARMv6) {
147                 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
148                 cachepolicy = CPOLICY_WRITEBACK;
149         }
150         flush_cache_all();
151         set_cr(cr_alignment);
152         return 0;
153 }
154 early_param("cachepolicy", early_cachepolicy);
155
156 static int __init early_nocache(char *__unused)
157 {
158         char *p = "buffered";
159         printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
160         early_cachepolicy(p);
161         return 0;
162 }
163 early_param("nocache", early_nocache);
164
165 static int __init early_nowrite(char *__unused)
166 {
167         char *p = "uncached";
168         printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
169         early_cachepolicy(p);
170         return 0;
171 }
172 early_param("nowb", early_nowrite);
173
174 #ifndef CONFIG_ARM_LPAE
175 static int __init early_ecc(char *p)
176 {
177         if (memcmp(p, "on", 2) == 0)
178                 ecc_mask = PMD_PROTECTION;
179         else if (memcmp(p, "off", 3) == 0)
180                 ecc_mask = 0;
181         return 0;
182 }
183 early_param("ecc", early_ecc);
184 #endif
185
186 static int __init noalign_setup(char *__unused)
187 {
188         cr_alignment &= ~CR_A;
189         cr_no_alignment &= ~CR_A;
190         set_cr(cr_alignment);
191         return 1;
192 }
193 __setup("noalign", noalign_setup);
194
195 #ifndef CONFIG_SMP
196 void adjust_cr(unsigned long mask, unsigned long set)
197 {
198         unsigned long flags;
199
200         mask &= ~CR_A;
201
202         set &= mask;
203
204         local_irq_save(flags);
205
206         cr_no_alignment = (cr_no_alignment & ~mask) | set;
207         cr_alignment = (cr_alignment & ~mask) | set;
208
209         set_cr((get_cr() & ~mask) | set);
210
211         local_irq_restore(flags);
212 }
213 #endif
214
215 #else /* ifdef CONFIG_CPU_CP15 */
216
217 static int __init early_cachepolicy(char *p)
218 {
219         pr_warning("cachepolicy kernel parameter not supported without cp15\n");
220 }
221 early_param("cachepolicy", early_cachepolicy);
222
223 static int __init noalign_setup(char *__unused)
224 {
225         pr_warning("noalign kernel parameter not supported without cp15\n");
226 }
227 __setup("noalign", noalign_setup);
228
229 #endif /* ifdef CONFIG_CPU_CP15 / else */
230
231 #define PROT_PTE_DEVICE         L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
232 #define PROT_SECT_DEVICE        PMD_TYPE_SECT|PMD_SECT_AP_WRITE
233
234 static struct mem_type mem_types[] = {
235         [MT_DEVICE] = {           /* Strongly ordered / ARMv6 shared device */
236                 .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
237                                   L_PTE_SHARED,
238                 .prot_l1        = PMD_TYPE_TABLE,
239                 .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_S,
240                 .domain         = DOMAIN_IO,
241         },
242         [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
243                 .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
244                 .prot_l1        = PMD_TYPE_TABLE,
245                 .prot_sect      = PROT_SECT_DEVICE,
246                 .domain         = DOMAIN_IO,
247         },
248         [MT_DEVICE_CACHED] = {    /* ioremap_cached */
249                 .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
250                 .prot_l1        = PMD_TYPE_TABLE,
251                 .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_WB,
252                 .domain         = DOMAIN_IO,
253         },
254         [MT_DEVICE_WC] = {      /* ioremap_wc */
255                 .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
256                 .prot_l1        = PMD_TYPE_TABLE,
257                 .prot_sect      = PROT_SECT_DEVICE,
258                 .domain         = DOMAIN_IO,
259         },
260         [MT_UNCACHED] = {
261                 .prot_pte       = PROT_PTE_DEVICE,
262                 .prot_l1        = PMD_TYPE_TABLE,
263                 .prot_sect      = PMD_TYPE_SECT | PMD_SECT_XN,
264                 .domain         = DOMAIN_IO,
265         },
266         [MT_CACHECLEAN] = {
267                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
268                 .domain    = DOMAIN_KERNEL,
269         },
270 #ifndef CONFIG_ARM_LPAE
271         [MT_MINICLEAN] = {
272                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
273                 .domain    = DOMAIN_KERNEL,
274         },
275 #endif
276         [MT_LOW_VECTORS] = {
277                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
278                                 L_PTE_RDONLY,
279                 .prot_l1   = PMD_TYPE_TABLE,
280                 .domain    = DOMAIN_USER,
281         },
282         [MT_HIGH_VECTORS] = {
283                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
284                                 L_PTE_USER | L_PTE_RDONLY,
285                 .prot_l1   = PMD_TYPE_TABLE,
286                 .domain    = DOMAIN_USER,
287         },
288         [MT_MEMORY] = {
289                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
290                 .prot_l1   = PMD_TYPE_TABLE,
291                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
292                 .domain    = DOMAIN_KERNEL,
293         },
294         [MT_ROM] = {
295                 .prot_sect = PMD_TYPE_SECT,
296                 .domain    = DOMAIN_KERNEL,
297         },
298         [MT_MEMORY_NONCACHED] = {
299                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
300                                 L_PTE_MT_BUFFERABLE,
301                 .prot_l1   = PMD_TYPE_TABLE,
302                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
303                 .domain    = DOMAIN_KERNEL,
304         },
305         [MT_MEMORY_DTCM] = {
306                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
307                                 L_PTE_XN,
308                 .prot_l1   = PMD_TYPE_TABLE,
309                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
310                 .domain    = DOMAIN_KERNEL,
311         },
312         [MT_MEMORY_ITCM] = {
313                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
314                 .prot_l1   = PMD_TYPE_TABLE,
315                 .domain    = DOMAIN_KERNEL,
316         },
317         [MT_MEMORY_SO] = {
318                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
319                                 L_PTE_MT_UNCACHED | L_PTE_XN,
320                 .prot_l1   = PMD_TYPE_TABLE,
321                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
322                                 PMD_SECT_UNCACHED | PMD_SECT_XN,
323                 .domain    = DOMAIN_KERNEL,
324         },
325         [MT_MEMORY_DMA_READY] = {
326                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
327                 .prot_l1   = PMD_TYPE_TABLE,
328                 .domain    = DOMAIN_KERNEL,
329         },
330 };
331
332 const struct mem_type *get_mem_type(unsigned int type)
333 {
334         return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
335 }
336 EXPORT_SYMBOL(get_mem_type);
337
338 /*
339  * Adjust the PMD section entries according to the CPU in use.
340  */
341 static void __init build_mem_type_table(void)
342 {
343         struct cachepolicy *cp;
344         unsigned int cr = get_cr();
345         pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
346         pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
347         int cpu_arch = cpu_architecture();
348         int i;
349
350         if (cpu_arch < CPU_ARCH_ARMv6) {
351 #if defined(CONFIG_CPU_DCACHE_DISABLE)
352                 if (cachepolicy > CPOLICY_BUFFERED)
353                         cachepolicy = CPOLICY_BUFFERED;
354 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
355                 if (cachepolicy > CPOLICY_WRITETHROUGH)
356                         cachepolicy = CPOLICY_WRITETHROUGH;
357 #endif
358         }
359         if (cpu_arch < CPU_ARCH_ARMv5) {
360                 if (cachepolicy >= CPOLICY_WRITEALLOC)
361                         cachepolicy = CPOLICY_WRITEBACK;
362                 ecc_mask = 0;
363         }
364         if (is_smp())
365                 cachepolicy = CPOLICY_WRITEALLOC;
366
367         /*
368          * Strip out features not present on earlier architectures.
369          * Pre-ARMv5 CPUs don't have TEX bits.  Pre-ARMv6 CPUs or those
370          * without extended page tables don't have the 'Shared' bit.
371          */
372         if (cpu_arch < CPU_ARCH_ARMv5)
373                 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
374                         mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
375         if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
376                 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
377                         mem_types[i].prot_sect &= ~PMD_SECT_S;
378
379         /*
380          * ARMv5 and lower, bit 4 must be set for page tables (was: cache
381          * "update-able on write" bit on ARM610).  However, Xscale and
382          * Xscale3 require this bit to be cleared.
383          */
384         if (cpu_is_xscale() || cpu_is_xsc3()) {
385                 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
386                         mem_types[i].prot_sect &= ~PMD_BIT4;
387                         mem_types[i].prot_l1 &= ~PMD_BIT4;
388                 }
389         } else if (cpu_arch < CPU_ARCH_ARMv6) {
390                 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
391                         if (mem_types[i].prot_l1)
392                                 mem_types[i].prot_l1 |= PMD_BIT4;
393                         if (mem_types[i].prot_sect)
394                                 mem_types[i].prot_sect |= PMD_BIT4;
395                 }
396         }
397
398         /*
399          * Mark the device areas according to the CPU/architecture.
400          */
401         if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
402                 if (!cpu_is_xsc3()) {
403                         /*
404                          * Mark device regions on ARMv6+ as execute-never
405                          * to prevent speculative instruction fetches.
406                          */
407                         mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
408                         mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
409                         mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
410                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
411                 }
412                 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
413                         /*
414                          * For ARMv7 with TEX remapping,
415                          * - shared device is SXCB=1100
416                          * - nonshared device is SXCB=0100
417                          * - write combine device mem is SXCB=0001
418                          * (Uncached Normal memory)
419                          */
420                         mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
421                         mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
422                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
423                 } else if (cpu_is_xsc3()) {
424                         /*
425                          * For Xscale3,
426                          * - shared device is TEXCB=00101
427                          * - nonshared device is TEXCB=01000
428                          * - write combine device mem is TEXCB=00100
429                          * (Inner/Outer Uncacheable in xsc3 parlance)
430                          */
431                         mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
432                         mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
433                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
434                 } else {
435                         /*
436                          * For ARMv6 and ARMv7 without TEX remapping,
437                          * - shared device is TEXCB=00001
438                          * - nonshared device is TEXCB=01000
439                          * - write combine device mem is TEXCB=00100
440                          * (Uncached Normal in ARMv6 parlance).
441                          */
442                         mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
443                         mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
444                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
445                 }
446         } else {
447                 /*
448                  * On others, write combining is "Uncached/Buffered"
449                  */
450                 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
451         }
452
453         /*
454          * Now deal with the memory-type mappings
455          */
456         cp = &cache_policies[cachepolicy];
457         vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
458         s2_pgprot = cp->pte_s2;
459         hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
460
461         /*
462          * We don't use domains on ARMv6 (since this causes problems with
463          * v6/v7 kernels), so we must use a separate memory type for user
464          * r/o, kernel r/w to map the vectors page.
465          */
466 #ifndef CONFIG_ARM_LPAE
467         if (cpu_arch == CPU_ARCH_ARMv6)
468                 vecs_pgprot |= L_PTE_MT_VECTORS;
469 #endif
470
471         /*
472          * ARMv6 and above have extended page tables.
473          */
474         if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
475 #ifndef CONFIG_ARM_LPAE
476                 /*
477                  * Mark cache clean areas and XIP ROM read only
478                  * from SVC mode and no access from userspace.
479                  */
480                 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
481                 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
482                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
483 #endif
484
485                 if (is_smp()) {
486                         /*
487                          * Mark memory with the "shared" attribute
488                          * for SMP systems
489                          */
490                         user_pgprot |= L_PTE_SHARED;
491                         kern_pgprot |= L_PTE_SHARED;
492                         vecs_pgprot |= L_PTE_SHARED;
493                         s2_pgprot |= L_PTE_SHARED;
494                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
495                         mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
496                         mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
497                         mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
498                         mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
499                         mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
500                         mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
501                         mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
502                         mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
503                 }
504         }
505
506         /*
507          * Non-cacheable Normal - intended for memory areas that must
508          * not cause dirty cache line writebacks when used
509          */
510         if (cpu_arch >= CPU_ARCH_ARMv6) {
511                 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
512                         /* Non-cacheable Normal is XCB = 001 */
513                         mem_types[MT_MEMORY_NONCACHED].prot_sect |=
514                                 PMD_SECT_BUFFERED;
515                 } else {
516                         /* For both ARMv6 and non-TEX-remapping ARMv7 */
517                         mem_types[MT_MEMORY_NONCACHED].prot_sect |=
518                                 PMD_SECT_TEX(1);
519                 }
520         } else {
521                 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
522         }
523
524 #ifdef CONFIG_ARM_LPAE
525         /*
526          * Do not generate access flag faults for the kernel mappings.
527          */
528         for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
529                 mem_types[i].prot_pte |= PTE_EXT_AF;
530                 if (mem_types[i].prot_sect)
531                         mem_types[i].prot_sect |= PMD_SECT_AF;
532         }
533         kern_pgprot |= PTE_EXT_AF;
534         vecs_pgprot |= PTE_EXT_AF;
535 #endif
536
537         for (i = 0; i < 16; i++) {
538                 pteval_t v = pgprot_val(protection_map[i]);
539                 protection_map[i] = __pgprot(v | user_pgprot);
540         }
541
542         mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
543         mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
544
545         pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
546         pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
547                                  L_PTE_DIRTY | kern_pgprot);
548         pgprot_s2  = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
549         pgprot_s2_device  = __pgprot(s2_device_pgprot);
550         pgprot_hyp_device  = __pgprot(hyp_device_pgprot);
551
552         mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
553         mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
554         mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
555         mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
556         mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
557         mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
558         mem_types[MT_ROM].prot_sect |= cp->pmd;
559
560         switch (cp->pmd) {
561         case PMD_SECT_WT:
562                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
563                 break;
564         case PMD_SECT_WB:
565         case PMD_SECT_WBWA:
566                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
567                 break;
568         }
569         printk("Memory policy: ECC %sabled, Data cache %s\n",
570                 ecc_mask ? "en" : "dis", cp->policy);
571
572         for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
573                 struct mem_type *t = &mem_types[i];
574                 if (t->prot_l1)
575                         t->prot_l1 |= PMD_DOMAIN(t->domain);
576                 if (t->prot_sect)
577                         t->prot_sect |= PMD_DOMAIN(t->domain);
578         }
579 }
580
581 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
582 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
583                               unsigned long size, pgprot_t vma_prot)
584 {
585         if (!pfn_valid(pfn))
586                 return pgprot_noncached(vma_prot);
587         else if (file->f_flags & O_SYNC)
588                 return pgprot_writecombine(vma_prot);
589         return vma_prot;
590 }
591 EXPORT_SYMBOL(phys_mem_access_prot);
592 #endif
593
594 #define vectors_base()  (vectors_high() ? 0xffff0000 : 0)
595
596 static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
597 {
598         void *ptr = __va(memblock_alloc(sz, align));
599         memset(ptr, 0, sz);
600         return ptr;
601 }
602
603 static void __init *early_alloc(unsigned long sz)
604 {
605         return early_alloc_aligned(sz, sz);
606 }
607
608 static pte_t * __init early_pte_alloc(pmd_t *pmd)
609 {
610         if (pmd_none(*pmd) || pmd_bad(*pmd))
611                 return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
612         return pmd_page_vaddr(*pmd);
613 }
614
615 static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
616 {
617         __pmd_populate(pmd, __pa(pte), prot);
618         BUG_ON(pmd_bad(*pmd));
619 }
620
621 static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
622         unsigned long addr, unsigned long prot)
623 {
624         if (pmd_none(*pmd)) {
625                 pte_t *pte = early_pte_alloc(pmd);
626                 early_pte_install(pmd, pte, prot);
627         }
628         BUG_ON(pmd_bad(*pmd));
629         return pte_offset_kernel(pmd, addr);
630 }
631
632 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
633                                   unsigned long end, unsigned long pfn,
634                                   const struct mem_type *type)
635 {
636         pte_t *start_pte = early_pte_alloc(pmd);
637         pte_t *pte = start_pte + pte_index(addr);
638
639         /* If replacing a section mapping, the whole section must be replaced */
640         BUG_ON(!pmd_none(*pmd) && pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
641
642         do {
643                 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
644                 pfn++;
645         } while (pte++, addr += PAGE_SIZE, addr != end);
646         early_pte_install(pmd, start_pte, type->prot_l1);
647 }
648
649 static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
650                         unsigned long end, phys_addr_t phys,
651                         const struct mem_type *type)
652 {
653         pmd_t *p = pmd;
654
655 #ifndef CONFIG_ARM_LPAE
656         /*
657          * In classic MMU format, puds and pmds are folded in to
658          * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
659          * group of L1 entries making up one logical pointer to
660          * an L2 table (2MB), where as PMDs refer to the individual
661          * L1 entries (1MB). Hence increment to get the correct
662          * offset for odd 1MB sections.
663          * (See arch/arm/include/asm/pgtable-2level.h)
664          */
665         if (addr & SECTION_SIZE)
666                 pmd++;
667 #endif
668         do {
669                 *pmd = __pmd(phys | type->prot_sect);
670                 phys += SECTION_SIZE;
671         } while (pmd++, addr += SECTION_SIZE, addr != end);
672
673         flush_pmd_entry(p);
674 }
675
676 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
677                                       unsigned long end, phys_addr_t phys,
678                                       const struct mem_type *type,
679                                       bool force_pages)
680 {
681         pmd_t *pmd = pmd_offset(pud, addr);
682         unsigned long next;
683
684         do {
685                 /*
686                  * With LPAE, we must loop over to map
687                  * all the pmds for the given range.
688                  */
689                 next = pmd_addr_end(addr, end);
690
691                 /*
692                  * Try a section mapping - addr, next and phys must all be
693                  * aligned to a section boundary.
694                  */
695                 if (type->prot_sect &&
696                                 ((addr | next | phys) & ~SECTION_MASK) == 0 &&
697                                 !force_pages) {
698                         __map_init_section(pmd, addr, next, phys, type);
699                 } else {
700                         alloc_init_pte(pmd, addr, next,
701                                                 __phys_to_pfn(phys), type);
702                 }
703
704                 phys += next - addr;
705
706         } while (pmd++, addr = next, addr != end);
707 }
708
709 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
710                                   unsigned long end, phys_addr_t phys,
711                                   const struct mem_type *type,
712                                   bool force_pages)
713 {
714         pud_t *pud = pud_offset(pgd, addr);
715         unsigned long next;
716
717         do {
718                 next = pud_addr_end(addr, end);
719                 alloc_init_pmd(pud, addr, next, phys, type, force_pages);
720                 phys += next - addr;
721         } while (pud++, addr = next, addr != end);
722 }
723
724 #ifndef CONFIG_ARM_LPAE
725 static void __init create_36bit_mapping(struct map_desc *md,
726                                         const struct mem_type *type)
727 {
728         unsigned long addr, length, end;
729         phys_addr_t phys;
730         pgd_t *pgd;
731
732         addr = md->virtual;
733         phys = __pfn_to_phys(md->pfn);
734         length = PAGE_ALIGN(md->length);
735
736         if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
737                 printk(KERN_ERR "MM: CPU does not support supersection "
738                        "mapping for 0x%08llx at 0x%08lx\n",
739                        (long long)__pfn_to_phys((u64)md->pfn), addr);
740                 return;
741         }
742
743         /* N.B. ARMv6 supersections are only defined to work with domain 0.
744          *      Since domain assignments can in fact be arbitrary, the
745          *      'domain == 0' check below is required to insure that ARMv6
746          *      supersections are only allocated for domain 0 regardless
747          *      of the actual domain assignments in use.
748          */
749         if (type->domain) {
750                 printk(KERN_ERR "MM: invalid domain in supersection "
751                        "mapping for 0x%08llx at 0x%08lx\n",
752                        (long long)__pfn_to_phys((u64)md->pfn), addr);
753                 return;
754         }
755
756         if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
757                 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
758                        " at 0x%08lx invalid alignment\n",
759                        (long long)__pfn_to_phys((u64)md->pfn), addr);
760                 return;
761         }
762
763         /*
764          * Shift bits [35:32] of address into bits [23:20] of PMD
765          * (See ARMv6 spec).
766          */
767         phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
768
769         pgd = pgd_offset_k(addr);
770         end = addr + length;
771         do {
772                 pud_t *pud = pud_offset(pgd, addr);
773                 pmd_t *pmd = pmd_offset(pud, addr);
774                 int i;
775
776                 for (i = 0; i < 16; i++)
777                         *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
778
779                 addr += SUPERSECTION_SIZE;
780                 phys += SUPERSECTION_SIZE;
781                 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
782         } while (addr != end);
783 }
784 #endif  /* !CONFIG_ARM_LPAE */
785
786 /*
787  * Create the page directory entries and any necessary
788  * page tables for the mapping specified by `md'.  We
789  * are able to cope here with varying sizes and address
790  * offsets, and we take full advantage of sections and
791  * supersections.
792  */
793 static void __init create_mapping(struct map_desc *md, bool force_pages)
794 {
795         unsigned long addr, length, end;
796         phys_addr_t phys;
797         const struct mem_type *type;
798         pgd_t *pgd;
799
800         if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
801                 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
802                        " at 0x%08lx in user region\n",
803                        (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
804                 return;
805         }
806
807         if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
808             md->virtual >= PAGE_OFFSET &&
809             (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
810                 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
811                        " at 0x%08lx out of vmalloc space\n",
812                        (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
813         }
814
815         type = &mem_types[md->type];
816
817 #ifndef CONFIG_ARM_LPAE
818         /*
819          * Catch 36-bit addresses
820          */
821         if (md->pfn >= 0x100000) {
822                 create_36bit_mapping(md, type);
823                 return;
824         }
825 #endif
826
827         addr = md->virtual & PAGE_MASK;
828         phys = __pfn_to_phys(md->pfn);
829         length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
830
831         if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
832                 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
833                        "be mapped using pages, ignoring.\n",
834                        (long long)__pfn_to_phys(md->pfn), addr);
835                 return;
836         }
837
838         pgd = pgd_offset_k(addr);
839         end = addr + length;
840         do {
841                 unsigned long next = pgd_addr_end(addr, end);
842
843                 alloc_init_pud(pgd, addr, next, phys, type, force_pages);
844
845                 phys += next - addr;
846                 addr = next;
847         } while (pgd++, addr != end);
848 }
849
850 /*
851  * Create the architecture specific mappings
852  */
853 void __init iotable_init(struct map_desc *io_desc, int nr)
854 {
855         struct map_desc *md;
856         struct vm_struct *vm;
857         struct static_vm *svm;
858
859         if (!nr)
860                 return;
861
862         svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
863
864         for (md = io_desc; nr; md++, nr--) {
865                 create_mapping(md, false);
866
867                 vm = &svm->vm;
868                 vm->addr = (void *)(md->virtual & PAGE_MASK);
869                 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
870                 vm->phys_addr = __pfn_to_phys(md->pfn);
871                 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
872                 vm->flags |= VM_ARM_MTYPE(md->type);
873                 vm->caller = iotable_init;
874                 add_static_vm_early(svm++);
875         }
876 }
877
878 void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
879                                   void *caller)
880 {
881         struct vm_struct *vm;
882         struct static_vm *svm;
883
884         svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
885
886         vm = &svm->vm;
887         vm->addr = (void *)addr;
888         vm->size = size;
889         vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
890         vm->caller = caller;
891         add_static_vm_early(svm);
892 }
893
894 #ifndef CONFIG_ARM_LPAE
895
896 /*
897  * The Linux PMD is made of two consecutive section entries covering 2MB
898  * (see definition in include/asm/pgtable-2level.h).  However a call to
899  * create_mapping() may optimize static mappings by using individual
900  * 1MB section mappings.  This leaves the actual PMD potentially half
901  * initialized if the top or bottom section entry isn't used, leaving it
902  * open to problems if a subsequent ioremap() or vmalloc() tries to use
903  * the virtual space left free by that unused section entry.
904  *
905  * Let's avoid the issue by inserting dummy vm entries covering the unused
906  * PMD halves once the static mappings are in place.
907  */
908
909 static void __init pmd_empty_section_gap(unsigned long addr)
910 {
911         vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
912 }
913
914 static void __init fill_pmd_gaps(void)
915 {
916         struct static_vm *svm;
917         struct vm_struct *vm;
918         unsigned long addr, next = 0;
919         pmd_t *pmd;
920
921         list_for_each_entry(svm, &static_vmlist, list) {
922                 vm = &svm->vm;
923                 addr = (unsigned long)vm->addr;
924                 if (addr < next)
925                         continue;
926
927                 /*
928                  * Check if this vm starts on an odd section boundary.
929                  * If so and the first section entry for this PMD is free
930                  * then we block the corresponding virtual address.
931                  */
932                 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
933                         pmd = pmd_off_k(addr);
934                         if (pmd_none(*pmd))
935                                 pmd_empty_section_gap(addr & PMD_MASK);
936                 }
937
938                 /*
939                  * Then check if this vm ends on an odd section boundary.
940                  * If so and the second section entry for this PMD is empty
941                  * then we block the corresponding virtual address.
942                  */
943                 addr += vm->size;
944                 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
945                         pmd = pmd_off_k(addr) + 1;
946                         if (pmd_none(*pmd))
947                                 pmd_empty_section_gap(addr);
948                 }
949
950                 /* no need to look at any vm entry until we hit the next PMD */
951                 next = (addr + PMD_SIZE - 1) & PMD_MASK;
952         }
953 }
954
955 #else
956 #define fill_pmd_gaps() do { } while (0)
957 #endif
958
959 #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
960 static void __init pci_reserve_io(void)
961 {
962         struct static_vm *svm;
963
964         svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
965         if (svm)
966                 return;
967
968         vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
969 }
970 #else
971 #define pci_reserve_io() do { } while (0)
972 #endif
973
974 #ifdef CONFIG_DEBUG_LL
975 void __init debug_ll_io_init(void)
976 {
977         struct map_desc map;
978
979         debug_ll_addr(&map.pfn, &map.virtual);
980         if (!map.pfn || !map.virtual)
981                 return;
982         map.pfn = __phys_to_pfn(map.pfn);
983         map.virtual &= PAGE_MASK;
984         map.length = PAGE_SIZE;
985         map.type = MT_DEVICE;
986         create_mapping(&map, false);
987 }
988 #endif
989
990 static void * __initdata vmalloc_min =
991         (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
992
993 /*
994  * vmalloc=size forces the vmalloc area to be exactly 'size'
995  * bytes. This can be used to increase (or decrease) the vmalloc
996  * area - the default is 240m.
997  */
998 static int __init early_vmalloc(char *arg)
999 {
1000         unsigned long vmalloc_reserve = memparse(arg, NULL);
1001
1002         if (vmalloc_reserve < SZ_16M) {
1003                 vmalloc_reserve = SZ_16M;
1004                 printk(KERN_WARNING
1005                         "vmalloc area too small, limiting to %luMB\n",
1006                         vmalloc_reserve >> 20);
1007         }
1008
1009         if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
1010                 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
1011                 printk(KERN_WARNING
1012                         "vmalloc area is too big, limiting to %luMB\n",
1013                         vmalloc_reserve >> 20);
1014         }
1015
1016         vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
1017         return 0;
1018 }
1019 early_param("vmalloc", early_vmalloc);
1020
1021 phys_addr_t arm_lowmem_limit __initdata = 0;
1022
1023 void __init sanity_check_meminfo(void)
1024 {
1025         int i, j, highmem = 0;
1026
1027         for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
1028                 struct membank *bank = &meminfo.bank[j];
1029                 *bank = meminfo.bank[i];
1030
1031 #ifdef CONFIG_SPARSEMEM
1032                 if (pfn_to_section_nr(bank_pfn_start(bank)) !=
1033                     pfn_to_section_nr(bank_pfn_end(bank) - 1)) {
1034                         phys_addr_t sz;
1035                         unsigned long start_pfn = bank_pfn_start(bank);
1036                         unsigned long end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
1037                         sz = ((phys_addr_t)(end_pfn - start_pfn) << PAGE_SHIFT);
1038
1039                         if (meminfo.nr_banks >= NR_BANKS) {
1040                                 pr_crit("NR_BANKS too low, ignoring %lld bytes of memory\n",
1041                                         (unsigned long long)(bank->size - sz));
1042                         } else {
1043                                 memmove(bank + 1, bank,
1044                                         (meminfo.nr_banks - i) * sizeof(*bank));
1045                                 meminfo.nr_banks++;
1046                                 bank[1].size -= sz;
1047                                 bank[1].start = __pfn_to_phys(end_pfn);
1048                         }
1049                         bank->size = sz;
1050                 }
1051 #endif
1052
1053                 if (bank->start > ULONG_MAX)
1054                         highmem = 1;
1055
1056 #ifdef CONFIG_HIGHMEM
1057                 if (__va(bank->start) >= vmalloc_min ||
1058                     __va(bank->start) < (void *)PAGE_OFFSET)
1059                         highmem = 1;
1060
1061                 bank->highmem = highmem;
1062
1063                 /*
1064                  * Split those memory banks which are partially overlapping
1065                  * the vmalloc area greatly simplifying things later.
1066                  */
1067                 if (!highmem && __va(bank->start) < vmalloc_min &&
1068                     bank->size > vmalloc_min - __va(bank->start)) {
1069                         if (meminfo.nr_banks >= NR_BANKS) {
1070                                 printk(KERN_CRIT "NR_BANKS too low, "
1071                                                  "ignoring high memory\n");
1072                         } else {
1073                                 memmove(bank + 1, bank,
1074                                         (meminfo.nr_banks - i) * sizeof(*bank));
1075                                 meminfo.nr_banks++;
1076                                 i++;
1077                                 bank[1].size -= vmalloc_min - __va(bank->start);
1078                                 bank[1].start = __pa(vmalloc_min - 1) + 1;
1079                                 bank[1].highmem = highmem = 1;
1080                                 j++;
1081                         }
1082                         bank->size = vmalloc_min - __va(bank->start);
1083                 }
1084 #else
1085                 bank->highmem = highmem;
1086
1087                 /*
1088                  * Highmem banks not allowed with !CONFIG_HIGHMEM.
1089                  */
1090                 if (highmem) {
1091                         printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
1092                                "(!CONFIG_HIGHMEM).\n",
1093                                (unsigned long long)bank->start,
1094                                (unsigned long long)bank->start + bank->size - 1);
1095                         continue;
1096                 }
1097
1098                 /*
1099                  * Check whether this memory bank would entirely overlap
1100                  * the vmalloc area.
1101                  */
1102                 if (__va(bank->start) >= vmalloc_min ||
1103                     __va(bank->start) < (void *)PAGE_OFFSET) {
1104                         printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
1105                                "(vmalloc region overlap).\n",
1106                                (unsigned long long)bank->start,
1107                                (unsigned long long)bank->start + bank->size - 1);
1108                         continue;
1109                 }
1110
1111                 /*
1112                  * Check whether this memory bank would partially overlap
1113                  * the vmalloc area.
1114                  */
1115                 if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
1116                     __va(bank->start + bank->size - 1) <= __va(bank->start)) {
1117                         unsigned long newsize = vmalloc_min - __va(bank->start);
1118                         printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
1119                                "to -%.8llx (vmalloc region overlap).\n",
1120                                (unsigned long long)bank->start,
1121                                (unsigned long long)bank->start + bank->size - 1,
1122                                (unsigned long long)bank->start + newsize - 1);
1123                         bank->size = newsize;
1124                 }
1125 #endif
1126                 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
1127                         arm_lowmem_limit = bank->start + bank->size;
1128
1129                 j++;
1130         }
1131 #ifdef CONFIG_HIGHMEM
1132         if (highmem) {
1133                 const char *reason = NULL;
1134
1135                 if (cache_is_vipt_aliasing()) {
1136                         /*
1137                          * Interactions between kmap and other mappings
1138                          * make highmem support with aliasing VIPT caches
1139                          * rather difficult.
1140                          */
1141                         reason = "with VIPT aliasing cache";
1142                 }
1143                 if (reason) {
1144                         printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
1145                                 reason);
1146                         while (j > 0 && meminfo.bank[j - 1].highmem)
1147                                 j--;
1148                 }
1149         }
1150 #endif
1151         meminfo.nr_banks = j;
1152         high_memory = __va(arm_lowmem_limit - 1) + 1;
1153         memblock_set_current_limit(arm_lowmem_limit);
1154 }
1155
1156 static inline void prepare_page_table(void)
1157 {
1158         unsigned long addr;
1159         phys_addr_t end;
1160
1161         /*
1162          * Clear out all the mappings below the kernel image.
1163          */
1164         for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
1165                 pmd_clear(pmd_off_k(addr));
1166
1167 #ifdef CONFIG_XIP_KERNEL
1168         /* The XIP kernel is mapped in the module area -- skip over it */
1169         addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
1170 #endif
1171         for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
1172                 pmd_clear(pmd_off_k(addr));
1173
1174         /*
1175          * Find the end of the first block of lowmem.
1176          */
1177         end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
1178         if (end >= arm_lowmem_limit)
1179                 end = arm_lowmem_limit;
1180
1181         /*
1182          * Clear out all the kernel space mappings, except for the first
1183          * memory bank, up to the vmalloc region.
1184          */
1185         for (addr = __phys_to_virt(end);
1186              addr < VMALLOC_START; addr += PMD_SIZE)
1187                 pmd_clear(pmd_off_k(addr));
1188 }
1189
1190 #ifdef CONFIG_ARM_LPAE
1191 /* the first page is reserved for pgd */
1192 #define SWAPPER_PG_DIR_SIZE     (PAGE_SIZE + \
1193                                  PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1194 #else
1195 #define SWAPPER_PG_DIR_SIZE     (PTRS_PER_PGD * sizeof(pgd_t))
1196 #endif
1197
1198 /*
1199  * Reserve the special regions of memory
1200  */
1201 void __init arm_mm_memblock_reserve(void)
1202 {
1203         /*
1204          * Reserve the page tables.  These are already in use,
1205          * and can only be in node 0.
1206          */
1207         memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
1208
1209 #ifdef CONFIG_SA1111
1210         /*
1211          * Because of the SA1111 DMA bug, we want to preserve our
1212          * precious DMA-able memory...
1213          */
1214         memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
1215 #endif
1216 }
1217
1218 /*
1219  * Set up the device mappings.  Since we clear out the page tables for all
1220  * mappings above VMALLOC_START, we will remove any debug device mappings.
1221  * This means you have to be careful how you debug this function, or any
1222  * called function.  This means you can't use any function or debugging
1223  * method which may touch any device, otherwise the kernel _will_ crash.
1224  */
1225 static void __init devicemaps_init(struct machine_desc *mdesc)
1226 {
1227         struct map_desc map;
1228         unsigned long addr;
1229         void *vectors;
1230
1231         /*
1232          * Allocate the vector page early.
1233          */
1234         vectors = early_alloc(PAGE_SIZE * 2);
1235
1236         early_trap_init(vectors);
1237
1238         for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
1239                 pmd_clear(pmd_off_k(addr));
1240
1241         /*
1242          * Map the kernel if it is XIP.
1243          * It is always first in the modulearea.
1244          */
1245 #ifdef CONFIG_XIP_KERNEL
1246         map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
1247         map.virtual = MODULES_VADDR;
1248         map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
1249         map.type = MT_ROM;
1250         create_mapping(&map, false);
1251 #endif
1252
1253         /*
1254          * Map the cache flushing regions.
1255          */
1256 #ifdef FLUSH_BASE
1257         map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1258         map.virtual = FLUSH_BASE;
1259         map.length = SZ_1M;
1260         map.type = MT_CACHECLEAN;
1261         create_mapping(&map, false);
1262 #endif
1263 #ifdef FLUSH_BASE_MINICACHE
1264         map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1265         map.virtual = FLUSH_BASE_MINICACHE;
1266         map.length = SZ_1M;
1267         map.type = MT_MINICLEAN;
1268         create_mapping(&map, false);
1269 #endif
1270
1271         /*
1272          * Create a mapping for the machine vectors at the high-vectors
1273          * location (0xffff0000).  If we aren't using high-vectors, also
1274          * create a mapping at the low-vectors virtual address.
1275          */
1276         map.pfn = __phys_to_pfn(virt_to_phys(vectors));
1277         map.virtual = 0xffff0000;
1278         map.length = PAGE_SIZE;
1279 #ifdef CONFIG_KUSER_HELPERS
1280         map.type = MT_HIGH_VECTORS;
1281 #else
1282         map.type = MT_LOW_VECTORS;
1283 #endif
1284         create_mapping(&map, false);
1285
1286         if (!vectors_high()) {
1287                 map.virtual = 0;
1288                 map.length = PAGE_SIZE * 2;
1289                 map.type = MT_LOW_VECTORS;
1290                 create_mapping(&map, false);
1291         }
1292
1293         /* Now create a kernel read-only mapping */
1294         map.pfn += 1;
1295         map.virtual = 0xffff0000 + PAGE_SIZE;
1296         map.length = PAGE_SIZE;
1297         map.type = MT_LOW_VECTORS;
1298         create_mapping(&map, false);
1299
1300         /*
1301          * Ask the machine support to map in the statically mapped devices.
1302          */
1303         if (mdesc->map_io)
1304                 mdesc->map_io();
1305         fill_pmd_gaps();
1306
1307         /* Reserve fixed i/o space in VMALLOC region */
1308         pci_reserve_io();
1309
1310         /*
1311          * Finally flush the caches and tlb to ensure that we're in a
1312          * consistent state wrt the writebuffer.  This also ensures that
1313          * any write-allocated cache lines in the vector page are written
1314          * back.  After this point, we can start to touch devices again.
1315          */
1316         local_flush_tlb_all();
1317         flush_cache_all();
1318 }
1319
1320 static void __init kmap_init(void)
1321 {
1322 #ifdef CONFIG_HIGHMEM
1323         pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
1324                 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1325 #endif
1326 }
1327
1328
1329 static void __init map_lowmem(void)
1330 {
1331         struct memblock_region *reg;
1332         phys_addr_t start;
1333         phys_addr_t end;
1334         struct map_desc map;
1335
1336         /* Map all the lowmem memory banks. */
1337         for_each_memblock(memory, reg) {
1338                 start = reg->base;
1339                 end = start + reg->size;
1340
1341                 if (end > arm_lowmem_limit)
1342                         end = arm_lowmem_limit;
1343                 if (start >= end)
1344                         break;
1345
1346                 map.pfn = __phys_to_pfn(start);
1347                 map.virtual = __phys_to_virt(start);
1348                 map.length = end - start;
1349                 map.type = MT_MEMORY;
1350
1351                 create_mapping(&map, false);
1352         }
1353
1354 #ifdef CONFIG_DEBUG_RODATA
1355         start = __pa(_stext) & PMD_MASK;
1356         end = ALIGN(__pa(__end_rodata), PMD_SIZE);
1357
1358         map.pfn = __phys_to_pfn(start);
1359         map.virtual = __phys_to_virt(start);
1360         map.length = end - start;
1361         map.type = MT_MEMORY;
1362
1363         create_mapping(&map, true);
1364 #endif
1365 }
1366
1367 /*
1368  * paging_init() sets up the page tables, initialises the zone memory
1369  * maps, and sets up the zero page, bad page and bad page tables.
1370  */
1371 void __init paging_init(struct machine_desc *mdesc)
1372 {
1373         void *zero_page;
1374
1375         memblock_set_current_limit(arm_lowmem_limit);
1376
1377         build_mem_type_table();
1378         prepare_page_table();
1379         map_lowmem();
1380         dma_contiguous_remap();
1381         devicemaps_init(mdesc);
1382         kmap_init();
1383         tcm_init();
1384
1385         top_pmd = pmd_off_k(0xffff0000);
1386
1387         /* allocate the zero page. */
1388         zero_page = early_alloc(PAGE_SIZE);
1389
1390         bootmem_init();
1391
1392         empty_zero_page = virt_to_page(zero_page);
1393         __flush_dcache_page(NULL, empty_zero_page);
1394 }