Merge branch 'for-lsk' of git://git.linaro.org/arm/big.LITTLE/mp into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33
34 #include <asm/unified.h>
35 #include <asm/cp15.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/psci.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
48
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/system_info.h>
54 #include <asm/system_misc.h>
55 #include <asm/traps.h>
56 #include <asm/unwind.h>
57 #include <asm/memblock.h>
58 #include <asm/virt.h>
59
60 #include "atags.h"
61
62
63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64 char fpe_type[8];
65
66 static int __init fpe_setup(char *line)
67 {
68         memcpy(fpe_type, line, 8);
69         return 1;
70 }
71
72 __setup("fpe=", fpe_setup);
73 #endif
74
75 extern void paging_init(struct machine_desc *desc);
76 extern void sanity_check_meminfo(void);
77 extern void reboot_setup(char *str);
78 extern void setup_dma_zone(struct machine_desc *desc);
79
80 unsigned int processor_id;
81 EXPORT_SYMBOL(processor_id);
82 unsigned int __machine_arch_type __read_mostly;
83 EXPORT_SYMBOL(__machine_arch_type);
84 unsigned int cacheid __read_mostly;
85 EXPORT_SYMBOL(cacheid);
86
87 unsigned int __atags_pointer __initdata;
88
89 unsigned int system_rev;
90 EXPORT_SYMBOL(system_rev);
91
92 unsigned int system_serial_low;
93 EXPORT_SYMBOL(system_serial_low);
94
95 unsigned int system_serial_high;
96 EXPORT_SYMBOL(system_serial_high);
97
98 unsigned int elf_hwcap __read_mostly;
99 EXPORT_SYMBOL(elf_hwcap);
100
101
102 #ifdef MULTI_CPU
103 struct processor processor __read_mostly;
104 #endif
105 #ifdef MULTI_TLB
106 struct cpu_tlb_fns cpu_tlb __read_mostly;
107 #endif
108 #ifdef MULTI_USER
109 struct cpu_user_fns cpu_user __read_mostly;
110 #endif
111 #ifdef MULTI_CACHE
112 struct cpu_cache_fns cpu_cache __read_mostly;
113 #endif
114 #ifdef CONFIG_OUTER_CACHE
115 struct outer_cache_fns outer_cache __read_mostly;
116 EXPORT_SYMBOL(outer_cache);
117 #endif
118
119 /*
120  * Cached cpu_architecture() result for use by assembler code.
121  * C code should use the cpu_architecture() function instead of accessing this
122  * variable directly.
123  */
124 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
125
126 struct stack {
127         u32 irq[3];
128         u32 abt[3];
129         u32 und[3];
130 } ____cacheline_aligned;
131
132 static struct stack stacks[NR_CPUS];
133
134 char elf_platform[ELF_PLATFORM_SIZE];
135 EXPORT_SYMBOL(elf_platform);
136
137 static const char *cpu_name;
138 static const char *machine_name;
139 static char __initdata cmd_line[COMMAND_LINE_SIZE];
140 struct machine_desc *machine_desc __initdata;
141
142 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
143 #define ENDIANNESS ((char)endian_test.l)
144
145 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
146
147 /*
148  * Standard memory resources
149  */
150 static struct resource mem_res[] = {
151         {
152                 .name = "Video RAM",
153                 .start = 0,
154                 .end = 0,
155                 .flags = IORESOURCE_MEM
156         },
157         {
158                 .name = "Kernel code",
159                 .start = 0,
160                 .end = 0,
161                 .flags = IORESOURCE_MEM
162         },
163         {
164                 .name = "Kernel data",
165                 .start = 0,
166                 .end = 0,
167                 .flags = IORESOURCE_MEM
168         }
169 };
170
171 #define video_ram   mem_res[0]
172 #define kernel_code mem_res[1]
173 #define kernel_data mem_res[2]
174
175 static struct resource io_res[] = {
176         {
177                 .name = "reserved",
178                 .start = 0x3bc,
179                 .end = 0x3be,
180                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
181         },
182         {
183                 .name = "reserved",
184                 .start = 0x378,
185                 .end = 0x37f,
186                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
187         },
188         {
189                 .name = "reserved",
190                 .start = 0x278,
191                 .end = 0x27f,
192                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
193         }
194 };
195
196 #define lp0 io_res[0]
197 #define lp1 io_res[1]
198 #define lp2 io_res[2]
199
200 static const char *proc_arch[] = {
201         "undefined/unknown",
202         "3",
203         "4",
204         "4T",
205         "5",
206         "5T",
207         "5TE",
208         "5TEJ",
209         "6TEJ",
210         "7",
211         "?(11)",
212         "?(12)",
213         "?(13)",
214         "?(14)",
215         "?(15)",
216         "?(16)",
217         "?(17)",
218 };
219
220 static int __get_cpu_architecture(void)
221 {
222         int cpu_arch;
223
224         if ((read_cpuid_id() & 0x0008f000) == 0) {
225                 cpu_arch = CPU_ARCH_UNKNOWN;
226         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
227                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
228         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
229                 cpu_arch = (read_cpuid_id() >> 16) & 7;
230                 if (cpu_arch)
231                         cpu_arch += CPU_ARCH_ARMv3;
232         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
233                 unsigned int mmfr0;
234
235                 /* Revised CPUID format. Read the Memory Model Feature
236                  * Register 0 and check for VMSAv7 or PMSAv7 */
237                 asm("mrc        p15, 0, %0, c0, c1, 4"
238                     : "=r" (mmfr0));
239                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
240                     (mmfr0 & 0x000000f0) >= 0x00000030)
241                         cpu_arch = CPU_ARCH_ARMv7;
242                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
243                          (mmfr0 & 0x000000f0) == 0x00000020)
244                         cpu_arch = CPU_ARCH_ARMv6;
245                 else
246                         cpu_arch = CPU_ARCH_UNKNOWN;
247         } else
248                 cpu_arch = CPU_ARCH_UNKNOWN;
249
250         return cpu_arch;
251 }
252
253 int __pure cpu_architecture(void)
254 {
255         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
256
257         return __cpu_architecture;
258 }
259
260 static int cpu_has_aliasing_icache(unsigned int arch)
261 {
262         int aliasing_icache;
263         unsigned int id_reg, num_sets, line_size;
264
265 #ifdef CONFIG_BIG_LITTLE
266         /*
267          * We expect a combination of Cortex-A15 and Cortex-A7 cores.
268          * A7 = VIPT aliasing I-cache
269          * A15 = PIPT (non-aliasing) I-cache
270          * To cater for this discrepancy, let's assume aliasing I-cache
271          * all the time.  This means unneeded extra work on the A15 but
272          * only ptrace is affected which is not performance critical.
273          */
274         if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc0f0)
275                 return 1;
276 #endif
277
278         /* PIPT caches never alias. */
279         if (icache_is_pipt())
280                 return 0;
281
282         /* arch specifies the register format */
283         switch (arch) {
284         case CPU_ARCH_ARMv7:
285                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
286                     : /* No output operands */
287                     : "r" (1));
288                 isb();
289                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
290                     : "=r" (id_reg));
291                 line_size = 4 << ((id_reg & 0x7) + 2);
292                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
293                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
294                 break;
295         case CPU_ARCH_ARMv6:
296                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
297                 break;
298         default:
299                 /* I-cache aliases will be handled by D-cache aliasing code */
300                 aliasing_icache = 0;
301         }
302
303         return aliasing_icache;
304 }
305
306 static void __init cacheid_init(void)
307 {
308         unsigned int arch = cpu_architecture();
309
310         if (arch >= CPU_ARCH_ARMv6) {
311                 unsigned int cachetype = read_cpuid_cachetype();
312                 if ((cachetype & (7 << 29)) == 4 << 29) {
313                         /* ARMv7 register format */
314                         arch = CPU_ARCH_ARMv7;
315                         cacheid = CACHEID_VIPT_NONALIASING;
316                         switch (cachetype & (3 << 14)) {
317                         case (1 << 14):
318                                 cacheid |= CACHEID_ASID_TAGGED;
319                                 break;
320                         case (3 << 14):
321                                 cacheid |= CACHEID_PIPT;
322                                 break;
323                         }
324                 } else {
325                         arch = CPU_ARCH_ARMv6;
326                         if (cachetype & (1 << 23))
327                                 cacheid = CACHEID_VIPT_ALIASING;
328                         else
329                                 cacheid = CACHEID_VIPT_NONALIASING;
330                 }
331                 if (cpu_has_aliasing_icache(arch))
332                         cacheid |= CACHEID_VIPT_I_ALIASING;
333         } else {
334                 cacheid = CACHEID_VIVT;
335         }
336
337         printk("CPU: %s data cache, %s instruction cache\n",
338                 cache_is_vivt() ? "VIVT" :
339                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
340                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
341                 cache_is_vivt() ? "VIVT" :
342                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
343                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
344                 icache_is_pipt() ? "PIPT" :
345                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
346 }
347
348 /*
349  * These functions re-use the assembly code in head.S, which
350  * already provide the required functionality.
351  */
352 extern struct proc_info_list *lookup_processor_type(unsigned int);
353
354 void __init early_print(const char *str, ...)
355 {
356         extern void printascii(const char *);
357         char buf[256];
358         va_list ap;
359
360         va_start(ap, str);
361         vsnprintf(buf, sizeof(buf), str, ap);
362         va_end(ap);
363
364 #ifdef CONFIG_DEBUG_LL
365         printascii(buf);
366 #endif
367         printk("%s", buf);
368 }
369
370 static void __init cpuid_init_hwcaps(void)
371 {
372         unsigned int divide_instrs;
373
374         if (cpu_architecture() < CPU_ARCH_ARMv7)
375                 return;
376
377         divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
378
379         switch (divide_instrs) {
380         case 2:
381                 elf_hwcap |= HWCAP_IDIVA;
382         case 1:
383                 elf_hwcap |= HWCAP_IDIVT;
384         }
385 }
386
387 static void __init feat_v6_fixup(void)
388 {
389         int id = read_cpuid_id();
390
391         if ((id & 0xff0f0000) != 0x41070000)
392                 return;
393
394         /*
395          * HWCAP_TLS is available only on 1136 r1p0 and later,
396          * see also kuser_get_tls_init.
397          */
398         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
399                 elf_hwcap &= ~HWCAP_TLS;
400 }
401
402 /*
403  * cpu_init - initialise one CPU.
404  *
405  * cpu_init sets up the per-CPU stacks.
406  */
407 void notrace cpu_init(void)
408 {
409         unsigned int cpu = smp_processor_id();
410         struct stack *stk = &stacks[cpu];
411
412         if (cpu >= NR_CPUS) {
413                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
414                 BUG();
415         }
416
417         /*
418          * This only works on resume and secondary cores. For booting on the
419          * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
420          */
421         set_my_cpu_offset(per_cpu_offset(cpu));
422
423         cpu_proc_init();
424
425         /*
426          * Define the placement constraint for the inline asm directive below.
427          * In Thumb-2, msr with an immediate value is not allowed.
428          */
429 #ifdef CONFIG_THUMB2_KERNEL
430 #define PLC     "r"
431 #else
432 #define PLC     "I"
433 #endif
434
435         /*
436          * setup stacks for re-entrant exception handlers
437          */
438         __asm__ (
439         "msr    cpsr_c, %1\n\t"
440         "add    r14, %0, %2\n\t"
441         "mov    sp, r14\n\t"
442         "msr    cpsr_c, %3\n\t"
443         "add    r14, %0, %4\n\t"
444         "mov    sp, r14\n\t"
445         "msr    cpsr_c, %5\n\t"
446         "add    r14, %0, %6\n\t"
447         "mov    sp, r14\n\t"
448         "msr    cpsr_c, %7"
449             :
450             : "r" (stk),
451               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
452               "I" (offsetof(struct stack, irq[0])),
453               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
454               "I" (offsetof(struct stack, abt[0])),
455               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
456               "I" (offsetof(struct stack, und[0])),
457               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
458             : "r14");
459 }
460
461 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
462
463 void __init smp_setup_processor_id(void)
464 {
465         int i;
466         u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
467         u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
468
469         cpu_logical_map(0) = cpu;
470         for (i = 1; i < nr_cpu_ids; ++i)
471                 cpu_logical_map(i) = i == cpu ? 0 : i;
472
473         printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
474 }
475
476 static void __init setup_processor(void)
477 {
478         struct proc_info_list *list;
479
480         /*
481          * locate processor in the list of supported processor
482          * types.  The linker builds this table for us from the
483          * entries in arch/arm/mm/proc-*.S
484          */
485         list = lookup_processor_type(read_cpuid_id());
486         if (!list) {
487                 printk("CPU configuration botched (ID %08x), unable "
488                        "to continue.\n", read_cpuid_id());
489                 while (1);
490         }
491
492         cpu_name = list->cpu_name;
493         __cpu_architecture = __get_cpu_architecture();
494
495 #ifdef MULTI_CPU
496         processor = *list->proc;
497 #endif
498 #ifdef MULTI_TLB
499         cpu_tlb = *list->tlb;
500 #endif
501 #ifdef MULTI_USER
502         cpu_user = *list->user;
503 #endif
504 #ifdef MULTI_CACHE
505         cpu_cache = *list->cache;
506 #endif
507
508         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
509                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
510                proc_arch[cpu_architecture()], cr_alignment);
511
512         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
513                  list->arch_name, ENDIANNESS);
514         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
515                  list->elf_name, ENDIANNESS);
516         elf_hwcap = list->elf_hwcap;
517
518         cpuid_init_hwcaps();
519
520 #ifndef CONFIG_ARM_THUMB
521         elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
522 #endif
523
524         feat_v6_fixup();
525
526         cacheid_init();
527         cpu_init();
528 }
529
530 void __init dump_machine_table(void)
531 {
532         struct machine_desc *p;
533
534         early_print("Available machine support:\n\nID (hex)\tNAME\n");
535         for_each_machine_desc(p)
536                 early_print("%08x\t%s\n", p->nr, p->name);
537
538         early_print("\nPlease check your kernel config and/or bootloader.\n");
539
540         while (true)
541                 /* can't use cpu_relax() here as it may require MMU setup */;
542 }
543
544 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
545 {
546         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
547         u64 aligned_start;
548
549         if (meminfo.nr_banks >= NR_BANKS) {
550                 printk(KERN_CRIT "NR_BANKS too low, "
551                         "ignoring memory at 0x%08llx\n", (long long)start);
552                 return -EINVAL;
553         }
554
555         /*
556          * Ensure that start/size are aligned to a page boundary.
557          * Size is appropriately rounded down, start is rounded up.
558          */
559         size -= start & ~PAGE_MASK;
560         aligned_start = PAGE_ALIGN(start);
561
562 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
563         if (aligned_start > ULONG_MAX) {
564                 printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
565                        "32-bit physical address space\n", (long long)start);
566                 return -EINVAL;
567         }
568
569         if (aligned_start + size > ULONG_MAX) {
570                 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
571                         "32-bit physical address space\n", (long long)start);
572                 /*
573                  * To ensure bank->start + bank->size is representable in
574                  * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
575                  * This means we lose a page after masking.
576                  */
577                 size = ULONG_MAX - aligned_start;
578         }
579 #endif
580
581         if (aligned_start < PHYS_OFFSET) {
582                 if (aligned_start + size <= PHYS_OFFSET) {
583                         pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
584                                 aligned_start, aligned_start + size);
585                         return -EINVAL;
586                 }
587
588                 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
589                         aligned_start, (u64)PHYS_OFFSET);
590
591                 size -= PHYS_OFFSET - aligned_start;
592                 aligned_start = PHYS_OFFSET;
593         }
594
595         bank->start = aligned_start;
596         bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
597
598         /*
599          * Check whether this memory region has non-zero size or
600          * invalid node number.
601          */
602         if (bank->size == 0)
603                 return -EINVAL;
604
605         meminfo.nr_banks++;
606         return 0;
607 }
608
609 /*
610  * Pick out the memory size.  We look for mem=size@start,
611  * where start and size are "size[KkMm]"
612  */
613 static int __init early_mem(char *p)
614 {
615         static int usermem __initdata = 0;
616         phys_addr_t size;
617         phys_addr_t start;
618         char *endp;
619
620         /*
621          * If the user specifies memory size, we
622          * blow away any automatically generated
623          * size.
624          */
625         if (usermem == 0) {
626                 usermem = 1;
627                 meminfo.nr_banks = 0;
628         }
629
630         start = PHYS_OFFSET;
631         size  = memparse(p, &endp);
632         if (*endp == '@')
633                 start = memparse(endp + 1, NULL);
634
635         arm_add_memory(start, size);
636
637         return 0;
638 }
639 early_param("mem", early_mem);
640
641 static void __init request_standard_resources(struct machine_desc *mdesc)
642 {
643         struct memblock_region *region;
644         struct resource *res;
645
646         kernel_code.start   = virt_to_phys(_text);
647         kernel_code.end     = virt_to_phys(_etext - 1);
648         kernel_data.start   = virt_to_phys(_sdata);
649         kernel_data.end     = virt_to_phys(_end - 1);
650
651         for_each_memblock(memory, region) {
652                 res = alloc_bootmem_low(sizeof(*res));
653                 res->name  = "System RAM";
654                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
655                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
656                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
657
658                 request_resource(&iomem_resource, res);
659
660                 if (kernel_code.start >= res->start &&
661                     kernel_code.end <= res->end)
662                         request_resource(res, &kernel_code);
663                 if (kernel_data.start >= res->start &&
664                     kernel_data.end <= res->end)
665                         request_resource(res, &kernel_data);
666         }
667
668         if (mdesc->video_start) {
669                 video_ram.start = mdesc->video_start;
670                 video_ram.end   = mdesc->video_end;
671                 request_resource(&iomem_resource, &video_ram);
672         }
673
674         /*
675          * Some machines don't have the possibility of ever
676          * possessing lp0, lp1 or lp2
677          */
678         if (mdesc->reserve_lp0)
679                 request_resource(&ioport_resource, &lp0);
680         if (mdesc->reserve_lp1)
681                 request_resource(&ioport_resource, &lp1);
682         if (mdesc->reserve_lp2)
683                 request_resource(&ioport_resource, &lp2);
684 }
685
686 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
687 struct screen_info screen_info = {
688  .orig_video_lines      = 30,
689  .orig_video_cols       = 80,
690  .orig_video_mode       = 0,
691  .orig_video_ega_bx     = 0,
692  .orig_video_isVGA      = 1,
693  .orig_video_points     = 8
694 };
695 #endif
696
697 static int __init customize_machine(void)
698 {
699         /*
700          * customizes platform devices, or adds new ones
701          * On DT based machines, we fall back to populating the
702          * machine from the device tree, if no callback is provided,
703          * otherwise we would always need an init_machine callback.
704          */
705         if (machine_desc->init_machine)
706                 machine_desc->init_machine();
707 #ifdef CONFIG_OF
708         else
709                 of_platform_populate(NULL, of_default_bus_match_table,
710                                         NULL, NULL);
711 #endif
712         return 0;
713 }
714 arch_initcall(customize_machine);
715
716 static int __init init_machine_late(void)
717 {
718         if (machine_desc->init_late)
719                 machine_desc->init_late();
720         return 0;
721 }
722 late_initcall(init_machine_late);
723
724 #ifdef CONFIG_KEXEC
725 static inline unsigned long long get_total_mem(void)
726 {
727         unsigned long total;
728
729         total = max_low_pfn - min_low_pfn;
730         return total << PAGE_SHIFT;
731 }
732
733 /**
734  * reserve_crashkernel() - reserves memory are for crash kernel
735  *
736  * This function reserves memory area given in "crashkernel=" kernel command
737  * line parameter. The memory reserved is used by a dump capture kernel when
738  * primary kernel is crashing.
739  */
740 static void __init reserve_crashkernel(void)
741 {
742         unsigned long long crash_size, crash_base;
743         unsigned long long total_mem;
744         int ret;
745
746         total_mem = get_total_mem();
747         ret = parse_crashkernel(boot_command_line, total_mem,
748                                 &crash_size, &crash_base);
749         if (ret)
750                 return;
751
752         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
753         if (ret < 0) {
754                 printk(KERN_WARNING "crashkernel reservation failed - "
755                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
756                 return;
757         }
758
759         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
760                "for crashkernel (System RAM: %ldMB)\n",
761                (unsigned long)(crash_size >> 20),
762                (unsigned long)(crash_base >> 20),
763                (unsigned long)(total_mem >> 20));
764
765         crashk_res.start = crash_base;
766         crashk_res.end = crash_base + crash_size - 1;
767         insert_resource(&iomem_resource, &crashk_res);
768 }
769 #else
770 static inline void reserve_crashkernel(void) {}
771 #endif /* CONFIG_KEXEC */
772
773 static int __init meminfo_cmp(const void *_a, const void *_b)
774 {
775         const struct membank *a = _a, *b = _b;
776         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
777         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
778 }
779
780 void __init hyp_mode_check(void)
781 {
782 #ifdef CONFIG_ARM_VIRT_EXT
783         if (is_hyp_mode_available()) {
784                 pr_info("CPU: All CPU(s) started in HYP mode.\n");
785                 pr_info("CPU: Virtualization extensions available.\n");
786         } else if (is_hyp_mode_mismatched()) {
787                 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
788                         __boot_cpu_mode & MODE_MASK);
789                 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
790         } else
791                 pr_info("CPU: All CPU(s) started in SVC mode.\n");
792 #endif
793 }
794
795 void __init setup_arch(char **cmdline_p)
796 {
797         struct machine_desc *mdesc;
798
799         setup_processor();
800         mdesc = setup_machine_fdt(__atags_pointer);
801         if (!mdesc)
802                 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
803         machine_desc = mdesc;
804         machine_name = mdesc->name;
805
806         setup_dma_zone(mdesc);
807
808         if (mdesc->restart_mode)
809                 reboot_setup(&mdesc->restart_mode);
810
811         init_mm.start_code = (unsigned long) _text;
812         init_mm.end_code   = (unsigned long) _etext;
813         init_mm.end_data   = (unsigned long) _edata;
814         init_mm.brk        = (unsigned long) _end;
815
816         /* populate cmd_line too for later use, preserving boot_command_line */
817         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
818         *cmdline_p = cmd_line;
819
820         parse_early_param();
821
822         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
823         sanity_check_meminfo();
824         arm_memblock_init(&meminfo, mdesc);
825
826         paging_init(mdesc);
827         request_standard_resources(mdesc);
828
829         if (mdesc->restart)
830                 arm_pm_restart = mdesc->restart;
831
832         unflatten_device_tree();
833
834         arm_dt_init_cpu_maps();
835         psci_init();
836 #ifdef CONFIG_SMP
837         if (is_smp()) {
838                 if (!mdesc->smp_init || !mdesc->smp_init()) {
839                         if (psci_smp_available())
840                                 smp_set_ops(&psci_smp_ops);
841                         else if (mdesc->smp)
842                                 smp_set_ops(mdesc->smp);
843                 }
844                 smp_init_cpus();
845         }
846 #endif
847
848         if (!is_smp())
849                 hyp_mode_check();
850
851         reserve_crashkernel();
852
853 #ifdef CONFIG_MULTI_IRQ_HANDLER
854         handle_arch_irq = mdesc->handle_irq;
855 #endif
856
857 #ifdef CONFIG_VT
858 #if defined(CONFIG_VGA_CONSOLE)
859         conswitchp = &vga_con;
860 #elif defined(CONFIG_DUMMY_CONSOLE)
861         conswitchp = &dummy_con;
862 #endif
863 #endif
864
865         if (mdesc->init_early)
866                 mdesc->init_early();
867 }
868
869
870 static int __init topology_init(void)
871 {
872         int cpu;
873
874         for_each_possible_cpu(cpu) {
875                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
876                 cpuinfo->cpu.hotpluggable = 1;
877                 register_cpu(&cpuinfo->cpu, cpu);
878         }
879
880         return 0;
881 }
882 subsys_initcall(topology_init);
883
884 #ifdef CONFIG_HAVE_PROC_CPU
885 static int __init proc_cpu_init(void)
886 {
887         struct proc_dir_entry *res;
888
889         res = proc_mkdir("cpu", NULL);
890         if (!res)
891                 return -ENOMEM;
892         return 0;
893 }
894 fs_initcall(proc_cpu_init);
895 #endif
896
897 static const char *hwcap_str[] = {
898         "swp",
899         "half",
900         "thumb",
901         "26bit",
902         "fastmult",
903         "fpa",
904         "vfp",
905         "edsp",
906         "java",
907         "iwmmxt",
908         "crunch",
909         "thumbee",
910         "neon",
911         "vfpv3",
912         "vfpv3d16",
913         "tls",
914         "vfpv4",
915         "idiva",
916         "idivt",
917         "vfpd32",
918         "lpae",
919         "evtstrm",
920         NULL
921 };
922
923 static int c_show(struct seq_file *m, void *v)
924 {
925         int i, j;
926         u32 cpuid;
927
928         for_each_online_cpu(i) {
929                 /*
930                  * glibc reads /proc/cpuinfo to determine the number of
931                  * online processors, looking for lines beginning with
932                  * "processor".  Give glibc what it expects.
933                  */
934                 seq_printf(m, "processor\t: %d\n", i);
935                 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
936                 seq_printf(m, "model name\t: %s rev %d (%s)\n",
937                            cpu_name, cpuid & 15, elf_platform);
938
939 #if defined(CONFIG_SMP)
940                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
941                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
942                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
943 #else
944                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
945                            loops_per_jiffy / (500000/HZ),
946                            (loops_per_jiffy / (5000/HZ)) % 100);
947 #endif
948                 /* dump out the processor features */
949                 seq_puts(m, "Features\t: ");
950
951                 for (j = 0; hwcap_str[j]; j++)
952                         if (elf_hwcap & (1 << j))
953                                 seq_printf(m, "%s ", hwcap_str[j]);
954
955                 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
956                 seq_printf(m, "CPU architecture: %s\n",
957                            proc_arch[cpu_architecture()]);
958
959                 if ((cpuid & 0x0008f000) == 0x00000000) {
960                         /* pre-ARM7 */
961                         seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
962                 } else {
963                         if ((cpuid & 0x0008f000) == 0x00007000) {
964                                 /* ARM7 */
965                                 seq_printf(m, "CPU variant\t: 0x%02x\n",
966                                            (cpuid >> 16) & 127);
967                         } else {
968                                 /* post-ARM7 */
969                                 seq_printf(m, "CPU variant\t: 0x%x\n",
970                                            (cpuid >> 20) & 15);
971                         }
972                         seq_printf(m, "CPU part\t: 0x%03x\n",
973                                    (cpuid >> 4) & 0xfff);
974                 }
975                 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
976         }
977
978         seq_printf(m, "Hardware\t: %s\n", machine_name);
979         seq_printf(m, "Revision\t: %04x\n", system_rev);
980         seq_printf(m, "Serial\t\t: %08x%08x\n",
981                    system_serial_high, system_serial_low);
982
983         return 0;
984 }
985
986 static void *c_start(struct seq_file *m, loff_t *pos)
987 {
988         return *pos < 1 ? (void *)1 : NULL;
989 }
990
991 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
992 {
993         ++*pos;
994         return NULL;
995 }
996
997 static void c_stop(struct seq_file *m, void *v)
998 {
999 }
1000
1001 const struct seq_operations cpuinfo_op = {
1002         .start  = c_start,
1003         .next   = c_next,
1004         .stop   = c_stop,
1005         .show   = c_show
1006 };