Merge branches 'x86/acpi', 'x86/asm', 'x86/cpudetect', 'x86/crashdump', 'x86/debug...
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / setup_percpu.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/proto.h>
17 #include <asm/cpumask.h>
18 #include <asm/cpu.h>
19 #include <asm/stackprotector.h>
20
21 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
22 # define DBG(x...) printk(KERN_DEBUG x)
23 #else
24 # define DBG(x...)
25 #endif
26
27 DEFINE_PER_CPU(int, cpu_number);
28 EXPORT_PER_CPU_SYMBOL(cpu_number);
29
30 #ifdef CONFIG_X86_64
31 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
32 #else
33 #define BOOT_PERCPU_OFFSET 0
34 #endif
35
36 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
37 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
38
39 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
40         [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
41 };
42 EXPORT_SYMBOL(__per_cpu_offset);
43
44 static inline void setup_percpu_segment(int cpu)
45 {
46 #ifdef CONFIG_X86_32
47         struct desc_struct gdt;
48
49         pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
50                         0x2 | DESCTYPE_S, 0x8);
51         gdt.s = 1;
52         write_gdt_entry(get_cpu_gdt_table(cpu),
53                         GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
54 #endif
55 }
56
57 /*
58  * Great future plan:
59  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
60  * Always point %gs to its beginning
61  */
62 void __init setup_per_cpu_areas(void)
63 {
64         ssize_t size;
65         char *ptr;
66         int cpu;
67
68         /* Copy section for each CPU (we discard the original) */
69         size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
70
71         pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
72                 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
73
74         pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
75
76         for_each_possible_cpu(cpu) {
77 #ifndef CONFIG_NEED_MULTIPLE_NODES
78                 ptr = alloc_bootmem_pages(size);
79 #else
80                 int node = early_cpu_to_node(cpu);
81                 if (!node_online(node) || !NODE_DATA(node)) {
82                         ptr = alloc_bootmem_pages(size);
83                         pr_info("cpu %d has no node %d or node-local memory\n",
84                                 cpu, node);
85                         pr_debug("per cpu data for cpu%d at %016lx\n",
86                                  cpu, __pa(ptr));
87                 } else {
88                         ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
89                         pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
90                                 cpu, node, __pa(ptr));
91                 }
92 #endif
93
94                 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
95                 per_cpu_offset(cpu) = ptr - __per_cpu_start;
96                 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
97                 per_cpu(cpu_number, cpu) = cpu;
98                 setup_percpu_segment(cpu);
99                 setup_stack_canary_segment(cpu);
100                 /*
101                  * Copy data used in early init routines from the
102                  * initial arrays to the per cpu data areas.  These
103                  * arrays then become expendable and the *_early_ptr's
104                  * are zeroed indicating that the static arrays are
105                  * gone.
106                  */
107 #ifdef CONFIG_X86_LOCAL_APIC
108                 per_cpu(x86_cpu_to_apicid, cpu) =
109                         early_per_cpu_map(x86_cpu_to_apicid, cpu);
110                 per_cpu(x86_bios_cpu_apicid, cpu) =
111                         early_per_cpu_map(x86_bios_cpu_apicid, cpu);
112 #endif
113 #ifdef CONFIG_X86_64
114                 per_cpu(irq_stack_ptr, cpu) =
115                         per_cpu(irq_stack_union.irq_stack, cpu) +
116                         IRQ_STACK_SIZE - 64;
117 #ifdef CONFIG_NUMA
118                 per_cpu(x86_cpu_to_node_map, cpu) =
119                         early_per_cpu_map(x86_cpu_to_node_map, cpu);
120 #endif
121 #endif
122                 /*
123                  * Up to this point, the boot CPU has been using .data.init
124                  * area.  Reload any changed state for the boot CPU.
125                  */
126                 if (cpu == boot_cpu_id)
127                         switch_to_new_gdt(cpu);
128
129                 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
130         }
131
132         /* indicate the early static arrays will soon be gone */
133 #ifdef CONFIG_X86_LOCAL_APIC
134         early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
135         early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
136 #endif
137 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
138         early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
139 #endif
140
141         /* Setup node to cpumask map */
142         setup_node_to_cpumask_map();
143
144         /* Setup cpu initialized, callin, callout masks */
145         setup_cpu_local_masks();
146 }