Merge remote-tracking branch 'lsk/v3.10/topic/big.LITTLE' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33
34 #include <asm/unified.h>
35 #include <asm/cp15.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/psci.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
48
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/system_info.h>
54 #include <asm/system_misc.h>
55 #include <asm/traps.h>
56 #include <asm/unwind.h>
57 #include <asm/memblock.h>
58 #include <asm/virt.h>
59
60 #include "atags.h"
61
62
63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64 char fpe_type[8];
65
66 static int __init fpe_setup(char *line)
67 {
68         memcpy(fpe_type, line, 8);
69         return 1;
70 }
71
72 __setup("fpe=", fpe_setup);
73 #endif
74
75 extern void paging_init(struct machine_desc *desc);
76 extern void sanity_check_meminfo(void);
77 extern void reboot_setup(char *str);
78 extern void setup_dma_zone(struct machine_desc *desc);
79
80 unsigned int processor_id;
81 EXPORT_SYMBOL(processor_id);
82 unsigned int __machine_arch_type __read_mostly;
83 EXPORT_SYMBOL(__machine_arch_type);
84 unsigned int cacheid __read_mostly;
85 EXPORT_SYMBOL(cacheid);
86
87 unsigned int __atags_pointer __initdata;
88
89 unsigned int system_rev;
90 EXPORT_SYMBOL(system_rev);
91
92 unsigned int system_serial_low;
93 EXPORT_SYMBOL(system_serial_low);
94
95 unsigned int system_serial_high;
96 EXPORT_SYMBOL(system_serial_high);
97
98 unsigned int elf_hwcap __read_mostly;
99 EXPORT_SYMBOL(elf_hwcap);
100
101
102 #ifdef MULTI_CPU
103 struct processor processor __read_mostly;
104 #endif
105 #ifdef MULTI_TLB
106 struct cpu_tlb_fns cpu_tlb __read_mostly;
107 #endif
108 #ifdef MULTI_USER
109 struct cpu_user_fns cpu_user __read_mostly;
110 #endif
111 #ifdef MULTI_CACHE
112 struct cpu_cache_fns cpu_cache __read_mostly;
113 #endif
114 #ifdef CONFIG_OUTER_CACHE
115 struct outer_cache_fns outer_cache __read_mostly;
116 EXPORT_SYMBOL(outer_cache);
117 #endif
118
119 /*
120  * Cached cpu_architecture() result for use by assembler code.
121  * C code should use the cpu_architecture() function instead of accessing this
122  * variable directly.
123  */
124 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
125
126 struct stack {
127         u32 irq[3];
128         u32 abt[3];
129         u32 und[3];
130 } ____cacheline_aligned;
131
132 static struct stack stacks[NR_CPUS];
133
134 char elf_platform[ELF_PLATFORM_SIZE];
135 EXPORT_SYMBOL(elf_platform);
136
137 static const char *cpu_name;
138 static const char *machine_name;
139 static char __initdata cmd_line[COMMAND_LINE_SIZE];
140 struct machine_desc *machine_desc __initdata;
141
142 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
143 #define ENDIANNESS ((char)endian_test.l)
144
145 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
146
147 /*
148  * Standard memory resources
149  */
150 static struct resource mem_res[] = {
151         {
152                 .name = "Video RAM",
153                 .start = 0,
154                 .end = 0,
155                 .flags = IORESOURCE_MEM
156         },
157         {
158                 .name = "Kernel code",
159                 .start = 0,
160                 .end = 0,
161                 .flags = IORESOURCE_MEM
162         },
163         {
164                 .name = "Kernel data",
165                 .start = 0,
166                 .end = 0,
167                 .flags = IORESOURCE_MEM
168         }
169 };
170
171 #define video_ram   mem_res[0]
172 #define kernel_code mem_res[1]
173 #define kernel_data mem_res[2]
174
175 static struct resource io_res[] = {
176         {
177                 .name = "reserved",
178                 .start = 0x3bc,
179                 .end = 0x3be,
180                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
181         },
182         {
183                 .name = "reserved",
184                 .start = 0x378,
185                 .end = 0x37f,
186                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
187         },
188         {
189                 .name = "reserved",
190                 .start = 0x278,
191                 .end = 0x27f,
192                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
193         }
194 };
195
196 #define lp0 io_res[0]
197 #define lp1 io_res[1]
198 #define lp2 io_res[2]
199
200 static const char *proc_arch[] = {
201         "undefined/unknown",
202         "3",
203         "4",
204         "4T",
205         "5",
206         "5T",
207         "5TE",
208         "5TEJ",
209         "6TEJ",
210         "7",
211         "?(11)",
212         "?(12)",
213         "?(13)",
214         "?(14)",
215         "?(15)",
216         "?(16)",
217         "?(17)",
218 };
219
220 static int __get_cpu_architecture(void)
221 {
222         int cpu_arch;
223
224         if ((read_cpuid_id() & 0x0008f000) == 0) {
225                 cpu_arch = CPU_ARCH_UNKNOWN;
226         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
227                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
228         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
229                 cpu_arch = (read_cpuid_id() >> 16) & 7;
230                 if (cpu_arch)
231                         cpu_arch += CPU_ARCH_ARMv3;
232         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
233                 unsigned int mmfr0;
234
235                 /* Revised CPUID format. Read the Memory Model Feature
236                  * Register 0 and check for VMSAv7 or PMSAv7 */
237                 asm("mrc        p15, 0, %0, c0, c1, 4"
238                     : "=r" (mmfr0));
239                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
240                     (mmfr0 & 0x000000f0) >= 0x00000030)
241                         cpu_arch = CPU_ARCH_ARMv7;
242                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
243                          (mmfr0 & 0x000000f0) == 0x00000020)
244                         cpu_arch = CPU_ARCH_ARMv6;
245                 else
246                         cpu_arch = CPU_ARCH_UNKNOWN;
247         } else
248                 cpu_arch = CPU_ARCH_UNKNOWN;
249
250         return cpu_arch;
251 }
252
253 int __pure cpu_architecture(void)
254 {
255         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
256
257         return __cpu_architecture;
258 }
259
260 static int cpu_has_aliasing_icache(unsigned int arch)
261 {
262         int aliasing_icache;
263         unsigned int id_reg, num_sets, line_size;
264
265 #ifdef CONFIG_BIG_LITTLE
266         /*
267          * We expect a combination of Cortex-A15 and Cortex-A7 cores.
268          * A7 = VIPT aliasing I-cache
269          * A15 = PIPT (non-aliasing) I-cache
270          * To cater for this discrepancy, let's assume aliasing I-cache
271          * all the time.  This means unneeded extra work on the A15 but
272          * only ptrace is affected which is not performance critical.
273          */
274         if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc0f0)
275                 return 1;
276 #endif
277
278         /* PIPT caches never alias. */
279         if (icache_is_pipt())
280                 return 0;
281
282         /* arch specifies the register format */
283         switch (arch) {
284         case CPU_ARCH_ARMv7:
285                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
286                     : /* No output operands */
287                     : "r" (1));
288                 isb();
289                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
290                     : "=r" (id_reg));
291                 line_size = 4 << ((id_reg & 0x7) + 2);
292                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
293                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
294                 break;
295         case CPU_ARCH_ARMv6:
296                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
297                 break;
298         default:
299                 /* I-cache aliases will be handled by D-cache aliasing code */
300                 aliasing_icache = 0;
301         }
302
303         return aliasing_icache;
304 }
305
306 static void __init cacheid_init(void)
307 {
308         unsigned int arch = cpu_architecture();
309
310         if (arch >= CPU_ARCH_ARMv6) {
311                 unsigned int cachetype = read_cpuid_cachetype();
312                 if ((cachetype & (7 << 29)) == 4 << 29) {
313                         /* ARMv7 register format */
314                         arch = CPU_ARCH_ARMv7;
315                         cacheid = CACHEID_VIPT_NONALIASING;
316                         switch (cachetype & (3 << 14)) {
317                         case (1 << 14):
318                                 cacheid |= CACHEID_ASID_TAGGED;
319                                 break;
320                         case (3 << 14):
321                                 cacheid |= CACHEID_PIPT;
322                                 break;
323                         }
324                 } else {
325                         arch = CPU_ARCH_ARMv6;
326                         if (cachetype & (1 << 23))
327                                 cacheid = CACHEID_VIPT_ALIASING;
328                         else
329                                 cacheid = CACHEID_VIPT_NONALIASING;
330                 }
331                 if (cpu_has_aliasing_icache(arch))
332                         cacheid |= CACHEID_VIPT_I_ALIASING;
333         } else {
334                 cacheid = CACHEID_VIVT;
335         }
336
337         printk("CPU: %s data cache, %s instruction cache\n",
338                 cache_is_vivt() ? "VIVT" :
339                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
340                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
341                 cache_is_vivt() ? "VIVT" :
342                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
343                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
344                 icache_is_pipt() ? "PIPT" :
345                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
346 }
347
348 /*
349  * These functions re-use the assembly code in head.S, which
350  * already provide the required functionality.
351  */
352 extern struct proc_info_list *lookup_processor_type(unsigned int);
353
354 void __init early_print(const char *str, ...)
355 {
356         extern void printascii(const char *);
357         char buf[256];
358         va_list ap;
359
360         va_start(ap, str);
361         vsnprintf(buf, sizeof(buf), str, ap);
362         va_end(ap);
363
364 #ifdef CONFIG_DEBUG_LL
365         printascii(buf);
366 #endif
367         printk("%s", buf);
368 }
369
370 static void __init cpuid_init_hwcaps(void)
371 {
372         unsigned int divide_instrs;
373
374         if (cpu_architecture() < CPU_ARCH_ARMv7)
375                 return;
376
377         divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
378
379         switch (divide_instrs) {
380         case 2:
381                 elf_hwcap |= HWCAP_IDIVA;
382         case 1:
383                 elf_hwcap |= HWCAP_IDIVT;
384         }
385 }
386
387 static void __init feat_v6_fixup(void)
388 {
389         int id = read_cpuid_id();
390
391         if ((id & 0xff0f0000) != 0x41070000)
392                 return;
393
394         /*
395          * HWCAP_TLS is available only on 1136 r1p0 and later,
396          * see also kuser_get_tls_init.
397          */
398         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
399                 elf_hwcap &= ~HWCAP_TLS;
400 }
401
402 /*
403  * cpu_init - initialise one CPU.
404  *
405  * cpu_init sets up the per-CPU stacks.
406  */
407 void notrace cpu_init(void)
408 {
409         unsigned int cpu = smp_processor_id();
410         struct stack *stk = &stacks[cpu];
411
412         if (cpu >= NR_CPUS) {
413                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
414                 BUG();
415         }
416
417         /*
418          * This only works on resume and secondary cores. For booting on the
419          * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
420          */
421         set_my_cpu_offset(per_cpu_offset(cpu));
422
423         cpu_proc_init();
424
425         /*
426          * Define the placement constraint for the inline asm directive below.
427          * In Thumb-2, msr with an immediate value is not allowed.
428          */
429 #ifdef CONFIG_THUMB2_KERNEL
430 #define PLC     "r"
431 #else
432 #define PLC     "I"
433 #endif
434
435         /*
436          * setup stacks for re-entrant exception handlers
437          */
438         __asm__ (
439         "msr    cpsr_c, %1\n\t"
440         "add    r14, %0, %2\n\t"
441         "mov    sp, r14\n\t"
442         "msr    cpsr_c, %3\n\t"
443         "add    r14, %0, %4\n\t"
444         "mov    sp, r14\n\t"
445         "msr    cpsr_c, %5\n\t"
446         "add    r14, %0, %6\n\t"
447         "mov    sp, r14\n\t"
448         "msr    cpsr_c, %7"
449             :
450             : "r" (stk),
451               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
452               "I" (offsetof(struct stack, irq[0])),
453               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
454               "I" (offsetof(struct stack, abt[0])),
455               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
456               "I" (offsetof(struct stack, und[0])),
457               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
458             : "r14");
459 }
460
461 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
462
463 void __init smp_setup_processor_id(void)
464 {
465         int i;
466         u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
467         u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
468
469         cpu_logical_map(0) = cpu;
470         for (i = 1; i < nr_cpu_ids; ++i)
471                 cpu_logical_map(i) = i == cpu ? 0 : i;
472
473         printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
474 }
475
476 static void __init setup_processor(void)
477 {
478         struct proc_info_list *list;
479
480         /*
481          * locate processor in the list of supported processor
482          * types.  The linker builds this table for us from the
483          * entries in arch/arm/mm/proc-*.S
484          */
485         list = lookup_processor_type(read_cpuid_id());
486         if (!list) {
487                 printk("CPU configuration botched (ID %08x), unable "
488                        "to continue.\n", read_cpuid_id());
489                 while (1);
490         }
491
492         cpu_name = list->cpu_name;
493         __cpu_architecture = __get_cpu_architecture();
494
495 #ifdef MULTI_CPU
496         processor = *list->proc;
497 #endif
498 #ifdef MULTI_TLB
499         cpu_tlb = *list->tlb;
500 #endif
501 #ifdef MULTI_USER
502         cpu_user = *list->user;
503 #endif
504 #ifdef MULTI_CACHE
505         cpu_cache = *list->cache;
506 #endif
507
508         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
509                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
510                proc_arch[cpu_architecture()], cr_alignment);
511
512         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
513                  list->arch_name, ENDIANNESS);
514         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
515                  list->elf_name, ENDIANNESS);
516         elf_hwcap = list->elf_hwcap;
517
518         cpuid_init_hwcaps();
519
520 #ifndef CONFIG_ARM_THUMB
521         elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
522 #endif
523
524         feat_v6_fixup();
525
526         cacheid_init();
527         cpu_init();
528 }
529
530 void __init dump_machine_table(void)
531 {
532         struct machine_desc *p;
533
534         early_print("Available machine support:\n\nID (hex)\tNAME\n");
535         for_each_machine_desc(p)
536                 early_print("%08x\t%s\n", p->nr, p->name);
537
538         early_print("\nPlease check your kernel config and/or bootloader.\n");
539
540         while (true)
541                 /* can't use cpu_relax() here as it may require MMU setup */;
542 }
543
544 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
545 {
546         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
547
548         if (meminfo.nr_banks >= NR_BANKS) {
549                 printk(KERN_CRIT "NR_BANKS too low, "
550                         "ignoring memory at 0x%08llx\n", (long long)start);
551                 return -EINVAL;
552         }
553
554         /*
555          * Ensure that start/size are aligned to a page boundary.
556          * Size is appropriately rounded down, start is rounded up.
557          */
558         size -= start & ~PAGE_MASK;
559         bank->start = PAGE_ALIGN(start);
560
561 #ifndef CONFIG_ARM_LPAE
562         if (bank->start + size < bank->start) {
563                 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
564                         "32-bit physical address space\n", (long long)start);
565                 /*
566                  * To ensure bank->start + bank->size is representable in
567                  * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
568                  * This means we lose a page after masking.
569                  */
570                 size = ULONG_MAX - bank->start;
571         }
572 #endif
573
574         bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
575
576         /*
577          * Check whether this memory region has non-zero size or
578          * invalid node number.
579          */
580         if (bank->size == 0)
581                 return -EINVAL;
582
583         meminfo.nr_banks++;
584         return 0;
585 }
586
587 /*
588  * Pick out the memory size.  We look for mem=size@start,
589  * where start and size are "size[KkMm]"
590  */
591 static int __init early_mem(char *p)
592 {
593         static int usermem __initdata = 0;
594         phys_addr_t size;
595         phys_addr_t start;
596         char *endp;
597
598         /*
599          * If the user specifies memory size, we
600          * blow away any automatically generated
601          * size.
602          */
603         if (usermem == 0) {
604                 usermem = 1;
605                 meminfo.nr_banks = 0;
606         }
607
608         start = PHYS_OFFSET;
609         size  = memparse(p, &endp);
610         if (*endp == '@')
611                 start = memparse(endp + 1, NULL);
612
613         arm_add_memory(start, size);
614
615         return 0;
616 }
617 early_param("mem", early_mem);
618
619 static void __init request_standard_resources(struct machine_desc *mdesc)
620 {
621         struct memblock_region *region;
622         struct resource *res;
623
624         kernel_code.start   = virt_to_phys(_text);
625         kernel_code.end     = virt_to_phys(_etext - 1);
626         kernel_data.start   = virt_to_phys(_sdata);
627         kernel_data.end     = virt_to_phys(_end - 1);
628
629         for_each_memblock(memory, region) {
630                 res = alloc_bootmem_low(sizeof(*res));
631                 res->name  = "System RAM";
632                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
633                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
634                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
635
636                 request_resource(&iomem_resource, res);
637
638                 if (kernel_code.start >= res->start &&
639                     kernel_code.end <= res->end)
640                         request_resource(res, &kernel_code);
641                 if (kernel_data.start >= res->start &&
642                     kernel_data.end <= res->end)
643                         request_resource(res, &kernel_data);
644         }
645
646         if (mdesc->video_start) {
647                 video_ram.start = mdesc->video_start;
648                 video_ram.end   = mdesc->video_end;
649                 request_resource(&iomem_resource, &video_ram);
650         }
651
652         /*
653          * Some machines don't have the possibility of ever
654          * possessing lp0, lp1 or lp2
655          */
656         if (mdesc->reserve_lp0)
657                 request_resource(&ioport_resource, &lp0);
658         if (mdesc->reserve_lp1)
659                 request_resource(&ioport_resource, &lp1);
660         if (mdesc->reserve_lp2)
661                 request_resource(&ioport_resource, &lp2);
662 }
663
664 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
665 struct screen_info screen_info = {
666  .orig_video_lines      = 30,
667  .orig_video_cols       = 80,
668  .orig_video_mode       = 0,
669  .orig_video_ega_bx     = 0,
670  .orig_video_isVGA      = 1,
671  .orig_video_points     = 8
672 };
673 #endif
674
675 static int __init customize_machine(void)
676 {
677         /*
678          * customizes platform devices, or adds new ones
679          * On DT based machines, we fall back to populating the
680          * machine from the device tree, if no callback is provided,
681          * otherwise we would always need an init_machine callback.
682          */
683         if (machine_desc->init_machine)
684                 machine_desc->init_machine();
685 #ifdef CONFIG_OF
686         else
687                 of_platform_populate(NULL, of_default_bus_match_table,
688                                         NULL, NULL);
689 #endif
690         return 0;
691 }
692 arch_initcall(customize_machine);
693
694 static int __init init_machine_late(void)
695 {
696         if (machine_desc->init_late)
697                 machine_desc->init_late();
698         return 0;
699 }
700 late_initcall(init_machine_late);
701
702 #ifdef CONFIG_KEXEC
703 static inline unsigned long long get_total_mem(void)
704 {
705         unsigned long total;
706
707         total = max_low_pfn - min_low_pfn;
708         return total << PAGE_SHIFT;
709 }
710
711 /**
712  * reserve_crashkernel() - reserves memory are for crash kernel
713  *
714  * This function reserves memory area given in "crashkernel=" kernel command
715  * line parameter. The memory reserved is used by a dump capture kernel when
716  * primary kernel is crashing.
717  */
718 static void __init reserve_crashkernel(void)
719 {
720         unsigned long long crash_size, crash_base;
721         unsigned long long total_mem;
722         int ret;
723
724         total_mem = get_total_mem();
725         ret = parse_crashkernel(boot_command_line, total_mem,
726                                 &crash_size, &crash_base);
727         if (ret)
728                 return;
729
730         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
731         if (ret < 0) {
732                 printk(KERN_WARNING "crashkernel reservation failed - "
733                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
734                 return;
735         }
736
737         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
738                "for crashkernel (System RAM: %ldMB)\n",
739                (unsigned long)(crash_size >> 20),
740                (unsigned long)(crash_base >> 20),
741                (unsigned long)(total_mem >> 20));
742
743         crashk_res.start = crash_base;
744         crashk_res.end = crash_base + crash_size - 1;
745         insert_resource(&iomem_resource, &crashk_res);
746 }
747 #else
748 static inline void reserve_crashkernel(void) {}
749 #endif /* CONFIG_KEXEC */
750
751 static int __init meminfo_cmp(const void *_a, const void *_b)
752 {
753         const struct membank *a = _a, *b = _b;
754         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
755         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
756 }
757
758 void __init hyp_mode_check(void)
759 {
760 #ifdef CONFIG_ARM_VIRT_EXT
761         if (is_hyp_mode_available()) {
762                 pr_info("CPU: All CPU(s) started in HYP mode.\n");
763                 pr_info("CPU: Virtualization extensions available.\n");
764         } else if (is_hyp_mode_mismatched()) {
765                 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
766                         __boot_cpu_mode & MODE_MASK);
767                 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
768         } else
769                 pr_info("CPU: All CPU(s) started in SVC mode.\n");
770 #endif
771 }
772
773 void __init setup_arch(char **cmdline_p)
774 {
775         struct machine_desc *mdesc;
776
777         setup_processor();
778         mdesc = setup_machine_fdt(__atags_pointer);
779         if (!mdesc)
780                 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
781         machine_desc = mdesc;
782         machine_name = mdesc->name;
783
784         setup_dma_zone(mdesc);
785
786         if (mdesc->restart_mode)
787                 reboot_setup(&mdesc->restart_mode);
788
789         init_mm.start_code = (unsigned long) _text;
790         init_mm.end_code   = (unsigned long) _etext;
791         init_mm.end_data   = (unsigned long) _edata;
792         init_mm.brk        = (unsigned long) _end;
793
794         /* populate cmd_line too for later use, preserving boot_command_line */
795         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
796         *cmdline_p = cmd_line;
797
798         parse_early_param();
799
800         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
801         sanity_check_meminfo();
802         arm_memblock_init(&meminfo, mdesc);
803
804         paging_init(mdesc);
805         request_standard_resources(mdesc);
806
807         if (mdesc->restart)
808                 arm_pm_restart = mdesc->restart;
809
810         unflatten_device_tree();
811
812         arm_dt_init_cpu_maps();
813         psci_init();
814 #ifdef CONFIG_SMP
815         if (is_smp()) {
816                 if (!mdesc->smp_init || !mdesc->smp_init()) {
817                         if (psci_smp_available())
818                                 smp_set_ops(&psci_smp_ops);
819                         else if (mdesc->smp)
820                                 smp_set_ops(mdesc->smp);
821                 }
822                 smp_init_cpus();
823         }
824 #endif
825
826         if (!is_smp())
827                 hyp_mode_check();
828
829         reserve_crashkernel();
830
831 #ifdef CONFIG_MULTI_IRQ_HANDLER
832         handle_arch_irq = mdesc->handle_irq;
833 #endif
834
835 #ifdef CONFIG_VT
836 #if defined(CONFIG_VGA_CONSOLE)
837         conswitchp = &vga_con;
838 #elif defined(CONFIG_DUMMY_CONSOLE)
839         conswitchp = &dummy_con;
840 #endif
841 #endif
842
843         if (mdesc->init_early)
844                 mdesc->init_early();
845 }
846
847
848 static int __init topology_init(void)
849 {
850         int cpu;
851
852         for_each_possible_cpu(cpu) {
853                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
854                 cpuinfo->cpu.hotpluggable = 1;
855                 register_cpu(&cpuinfo->cpu, cpu);
856         }
857
858         return 0;
859 }
860 subsys_initcall(topology_init);
861
862 #ifdef CONFIG_HAVE_PROC_CPU
863 static int __init proc_cpu_init(void)
864 {
865         struct proc_dir_entry *res;
866
867         res = proc_mkdir("cpu", NULL);
868         if (!res)
869                 return -ENOMEM;
870         return 0;
871 }
872 fs_initcall(proc_cpu_init);
873 #endif
874
875 static const char *hwcap_str[] = {
876         "swp",
877         "half",
878         "thumb",
879         "26bit",
880         "fastmult",
881         "fpa",
882         "vfp",
883         "edsp",
884         "java",
885         "iwmmxt",
886         "crunch",
887         "thumbee",
888         "neon",
889         "vfpv3",
890         "vfpv3d16",
891         "tls",
892         "vfpv4",
893         "idiva",
894         "idivt",
895         NULL
896 };
897
898 static int c_show(struct seq_file *m, void *v)
899 {
900         int i, j;
901         u32 cpuid;
902
903         for_each_online_cpu(i) {
904                 /*
905                  * glibc reads /proc/cpuinfo to determine the number of
906                  * online processors, looking for lines beginning with
907                  * "processor".  Give glibc what it expects.
908                  */
909                 seq_printf(m, "processor\t: %d\n", i);
910                 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
911                 seq_printf(m, "model name\t: %s rev %d (%s)\n",
912                            cpu_name, cpuid & 15, elf_platform);
913
914 #if defined(CONFIG_SMP)
915                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
916                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
917                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
918 #else
919                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
920                            loops_per_jiffy / (500000/HZ),
921                            (loops_per_jiffy / (5000/HZ)) % 100);
922 #endif
923                 /* dump out the processor features */
924                 seq_puts(m, "Features\t: ");
925
926                 for (j = 0; hwcap_str[j]; j++)
927                         if (elf_hwcap & (1 << j))
928                                 seq_printf(m, "%s ", hwcap_str[j]);
929
930                 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
931                 seq_printf(m, "CPU architecture: %s\n",
932                            proc_arch[cpu_architecture()]);
933
934                 if ((cpuid & 0x0008f000) == 0x00000000) {
935                         /* pre-ARM7 */
936                         seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
937                 } else {
938                         if ((cpuid & 0x0008f000) == 0x00007000) {
939                                 /* ARM7 */
940                                 seq_printf(m, "CPU variant\t: 0x%02x\n",
941                                            (cpuid >> 16) & 127);
942                         } else {
943                                 /* post-ARM7 */
944                                 seq_printf(m, "CPU variant\t: 0x%x\n",
945                                            (cpuid >> 20) & 15);
946                         }
947                         seq_printf(m, "CPU part\t: 0x%03x\n",
948                                    (cpuid >> 4) & 0xfff);
949                 }
950                 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
951         }
952
953         seq_printf(m, "Hardware\t: %s\n", machine_name);
954         seq_printf(m, "Revision\t: %04x\n", system_rev);
955         seq_printf(m, "Serial\t\t: %08x%08x\n",
956                    system_serial_high, system_serial_low);
957
958         return 0;
959 }
960
961 static void *c_start(struct seq_file *m, loff_t *pos)
962 {
963         return *pos < 1 ? (void *)1 : NULL;
964 }
965
966 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
967 {
968         ++*pos;
969         return NULL;
970 }
971
972 static void c_stop(struct seq_file *m, void *v)
973 {
974 }
975
976 const struct seq_operations cpuinfo_op = {
977         .start  = c_start,
978         .next   = c_next,
979         .stop   = c_stop,
980         .show   = c_show
981 };