1 #include <linux/init.h>
2 #include <linux/kernel.h>
3 #include <linux/sched.h>
4 #include <linux/string.h>
5 #include <linux/bootmem.h>
6 #include <linux/bitops.h>
7 #include <linux/module.h>
8 #include <linux/kgdb.h>
9 #include <linux/topology.h>
10 #include <linux/delay.h>
11 #include <linux/smp.h>
12 #include <linux/percpu.h>
16 #include <asm/linkage.h>
17 #include <asm/mmu_context.h>
20 #include <asm/perf_counter.h>
26 #include <asm/cpumask.h>
27 #ifdef CONFIG_X86_LOCAL_APIC
28 #include <asm/mpspec.h>
30 #include <mach_apic.h>
31 #include <asm/genapic.h>
35 #include <asm/pgtable.h>
36 #include <asm/processor.h>
38 #include <asm/atomic.h>
39 #include <asm/proto.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/hypervisor.h>
48 /* all of these masks are initialized in setup_cpu_local_masks() */
49 cpumask_var_t cpu_callin_mask;
50 cpumask_var_t cpu_callout_mask;
51 cpumask_var_t cpu_initialized_mask;
53 /* representing cpus for which sibling maps can be computed */
54 cpumask_var_t cpu_sibling_setup_mask;
56 #else /* CONFIG_X86_32 */
58 cpumask_t cpu_callin_map;
59 cpumask_t cpu_callout_map;
60 cpumask_t cpu_initialized;
61 cpumask_t cpu_sibling_setup_map;
63 #endif /* CONFIG_X86_32 */
66 static struct cpu_dev *this_cpu __cpuinitdata;
69 /* We need valid kernel segments for data and code in long mode too
70 * IRET will check the segment types kkeil 2000/10/28
71 * Also sysret mandates a special GDT layout
73 /* The TLS descriptors are currently at a different place compared to i386.
74 Hopefully nobody expects them at a fixed place (Wine?) */
75 DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
76 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
77 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
78 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
79 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
80 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
81 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
84 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
85 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
86 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
87 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
88 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
90 * Segments used for calling PnP BIOS have byte granularity.
91 * They code segments and data segments have fixed 64k limits,
92 * the transfer segment sizes are set at run time.
95 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
97 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
99 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
101 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
103 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
105 * The APM segments have byte granularity and their bases
106 * are set at run time. All have 64k limits.
109 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
111 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
113 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
115 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
116 [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
119 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
122 static int cachesize_override __cpuinitdata = -1;
123 static int disable_x86_serial_nr __cpuinitdata = 1;
125 static int __init cachesize_setup(char *str)
127 get_option(&str, &cachesize_override);
130 __setup("cachesize=", cachesize_setup);
132 static int __init x86_fxsr_setup(char *s)
134 setup_clear_cpu_cap(X86_FEATURE_FXSR);
135 setup_clear_cpu_cap(X86_FEATURE_XMM);
138 __setup("nofxsr", x86_fxsr_setup);
140 static int __init x86_sep_setup(char *s)
142 setup_clear_cpu_cap(X86_FEATURE_SEP);
145 __setup("nosep", x86_sep_setup);
147 /* Standard macro to see if a specific flag is changeable */
148 static inline int flag_is_changeable_p(u32 flag)
153 * Cyrix and IDT cpus allow disabling of CPUID
154 * so the code below may return different results
155 * when it is executed before and after enabling
156 * the CPUID. Add "volatile" to not allow gcc to
157 * optimize the subsequent calls to this function.
159 asm volatile ("pushfl\n\t"
169 : "=&r" (f1), "=&r" (f2)
172 return ((f1^f2) & flag) != 0;
175 /* Probe for the CPUID instruction */
176 static int __cpuinit have_cpuid_p(void)
178 return flag_is_changeable_p(X86_EFLAGS_ID);
181 static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
183 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
184 /* Disable processor serial number */
185 unsigned long lo, hi;
186 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
188 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
189 printk(KERN_NOTICE "CPU serial number disabled.\n");
190 clear_cpu_cap(c, X86_FEATURE_PN);
192 /* Disabling the serial number may affect the cpuid level */
193 c->cpuid_level = cpuid_eax(0);
197 static int __init x86_serial_nr_setup(char *s)
199 disable_x86_serial_nr = 0;
202 __setup("serialnumber", x86_serial_nr_setup);
204 static inline int flag_is_changeable_p(u32 flag)
208 /* Probe for the CPUID instruction */
209 static inline int have_cpuid_p(void)
213 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
219 * Naming convention should be: <Name> [(<Codename>)]
220 * This table only is used unless init_<vendor>() below doesn't set it;
221 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
225 /* Look up CPU names by table lookup. */
226 static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
228 struct cpu_model_info *info;
230 if (c->x86_model >= 16)
231 return NULL; /* Range check */
236 info = this_cpu->c_models;
238 while (info && info->family) {
239 if (info->family == c->x86)
240 return info->model_names[c->x86_model];
243 return NULL; /* Not found */
246 __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
248 /* Current gdt points %fs at the "master" per-cpu area: after this,
249 * it's on the real one. */
250 void switch_to_new_gdt(void)
252 struct desc_ptr gdt_descr;
254 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
255 gdt_descr.size = GDT_SIZE - 1;
256 load_gdt(&gdt_descr);
258 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
262 static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
264 static void __cpuinit default_init(struct cpuinfo_x86 *c)
267 display_cacheinfo(c);
269 /* Not much we can do here... */
270 /* Check if at least it has cpuid */
271 if (c->cpuid_level == -1) {
272 /* No cpuid. It must be an ancient CPU */
274 strcpy(c->x86_model_id, "486");
275 else if (c->x86 == 3)
276 strcpy(c->x86_model_id, "386");
281 static struct cpu_dev __cpuinitdata default_cpu = {
282 .c_init = default_init,
283 .c_vendor = "Unknown",
284 .c_x86_vendor = X86_VENDOR_UNKNOWN,
287 static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
292 if (c->extended_cpuid_level < 0x80000004)
295 v = (unsigned int *) c->x86_model_id;
296 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
297 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
298 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
299 c->x86_model_id[48] = 0;
301 /* Intel chips right-justify this string for some dumb reason;
302 undo that brain damage */
303 p = q = &c->x86_model_id[0];
309 while (q <= &c->x86_model_id[48])
310 *q++ = '\0'; /* Zero-pad the rest */
314 void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
316 unsigned int n, dummy, ebx, ecx, edx, l2size;
318 n = c->extended_cpuid_level;
320 if (n >= 0x80000005) {
321 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
322 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
323 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
324 c->x86_cache_size = (ecx>>24) + (edx>>24);
326 /* On K8 L1 TLB is inclusive, so don't count it */
331 if (n < 0x80000006) /* Some chips just has a large L1. */
334 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
338 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
340 /* do processor-specific cache resizing */
341 if (this_cpu->c_size_cache)
342 l2size = this_cpu->c_size_cache(c, l2size);
344 /* Allow user to override all this if necessary. */
345 if (cachesize_override != -1)
346 l2size = cachesize_override;
349 return; /* Again, no L2 cache is possible */
352 c->x86_cache_size = l2size;
354 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
358 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
361 u32 eax, ebx, ecx, edx;
362 int index_msb, core_bits;
364 if (!cpu_has(c, X86_FEATURE_HT))
367 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
370 if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
373 cpuid(1, &eax, &ebx, &ecx, &edx);
375 smp_num_siblings = (ebx & 0xff0000) >> 16;
377 if (smp_num_siblings == 1) {
378 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
379 } else if (smp_num_siblings > 1) {
381 if (smp_num_siblings > nr_cpu_ids) {
382 printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
384 smp_num_siblings = 1;
388 index_msb = get_count_order(smp_num_siblings);
390 c->phys_proc_id = phys_pkg_id(index_msb);
392 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
395 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
397 index_msb = get_count_order(smp_num_siblings);
399 core_bits = get_count_order(c->x86_max_cores);
402 c->cpu_core_id = phys_pkg_id(index_msb) &
403 ((1 << core_bits) - 1);
405 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
406 ((1 << core_bits) - 1);
411 if ((c->x86_max_cores * smp_num_siblings) > 1) {
412 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
414 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
420 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
422 char *v = c->x86_vendor_id;
426 for (i = 0; i < X86_VENDOR_NUM; i++) {
430 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
431 (cpu_devs[i]->c_ident[1] &&
432 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
433 this_cpu = cpu_devs[i];
434 c->x86_vendor = this_cpu->c_x86_vendor;
441 printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v);
442 printk(KERN_ERR "CPU: Your system may be unstable.\n");
445 c->x86_vendor = X86_VENDOR_UNKNOWN;
446 this_cpu = &default_cpu;
449 void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
451 /* Get vendor name */
452 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
453 (unsigned int *)&c->x86_vendor_id[0],
454 (unsigned int *)&c->x86_vendor_id[8],
455 (unsigned int *)&c->x86_vendor_id[4]);
458 /* Intel-defined flags: level 0x00000001 */
459 if (c->cpuid_level >= 0x00000001) {
460 u32 junk, tfms, cap0, misc;
461 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
462 c->x86 = (tfms >> 8) & 0xf;
463 c->x86_model = (tfms >> 4) & 0xf;
464 c->x86_mask = tfms & 0xf;
466 c->x86 += (tfms >> 20) & 0xff;
468 c->x86_model += ((tfms >> 16) & 0xf) << 4;
469 if (cap0 & (1<<19)) {
470 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
471 c->x86_cache_alignment = c->x86_clflush_size;
476 static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
481 /* Intel-defined flags: level 0x00000001 */
482 if (c->cpuid_level >= 0x00000001) {
483 u32 capability, excap;
484 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
485 c->x86_capability[0] = capability;
486 c->x86_capability[4] = excap;
489 /* AMD-defined flags: level 0x80000001 */
490 xlvl = cpuid_eax(0x80000000);
491 c->extended_cpuid_level = xlvl;
492 if ((xlvl & 0xffff0000) == 0x80000000) {
493 if (xlvl >= 0x80000001) {
494 c->x86_capability[1] = cpuid_edx(0x80000001);
495 c->x86_capability[6] = cpuid_ecx(0x80000001);
500 if (c->extended_cpuid_level >= 0x80000008) {
501 u32 eax = cpuid_eax(0x80000008);
503 c->x86_virt_bits = (eax >> 8) & 0xff;
504 c->x86_phys_bits = eax & 0xff;
508 if (c->extended_cpuid_level >= 0x80000007)
509 c->x86_power = cpuid_edx(0x80000007);
513 static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
519 * First of all, decide if this is a 486 or higher
520 * It's a 486 if we can modify the AC flag
522 if (flag_is_changeable_p(X86_EFLAGS_AC))
527 for (i = 0; i < X86_VENDOR_NUM; i++)
528 if (cpu_devs[i] && cpu_devs[i]->c_identify) {
529 c->x86_vendor_id[0] = 0;
530 cpu_devs[i]->c_identify(c);
531 if (c->x86_vendor_id[0]) {
540 * Do minimum CPU detection early.
541 * Fields really needed: vendor, cpuid_level, family, model, mask,
543 * The others are not touched to avoid unwanted side effects.
545 * WARNING: this function is only called on the BP. Don't add code here
546 * that is supposed to run on all CPUs.
548 static void __init early_identify_cpu(struct cpuinfo_x86 *c)
551 c->x86_clflush_size = 64;
553 c->x86_clflush_size = 32;
555 c->x86_cache_alignment = c->x86_clflush_size;
557 memset(&c->x86_capability, 0, sizeof c->x86_capability);
558 c->extended_cpuid_level = 0;
561 identify_cpu_without_cpuid(c);
563 /* cyrix could have cpuid enabled via c_identify()*/
573 if (this_cpu->c_early_init)
574 this_cpu->c_early_init(c);
576 validate_pat_support(c);
579 c->cpu_index = boot_cpu_id;
583 void __init early_cpu_init(void)
585 struct cpu_dev **cdev;
588 printk("KERNEL supported cpus:\n");
589 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
590 struct cpu_dev *cpudev = *cdev;
593 if (count >= X86_VENDOR_NUM)
595 cpu_devs[count] = cpudev;
598 for (j = 0; j < 2; j++) {
599 if (!cpudev->c_ident[j])
601 printk(" %s %s\n", cpudev->c_vendor,
606 early_identify_cpu(&boot_cpu_data);
610 * The NOPL instruction is supposed to exist on all CPUs with
611 * family >= 6; unfortunately, that's not true in practice because
612 * of early VIA chips and (more importantly) broken virtualizers that
613 * are not easy to detect. In the latter case it doesn't even *fail*
614 * reliably, so probing for it doesn't even work. Disable it completely
615 * unless we can find a reliable way to detect all the broken cases.
617 static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
619 clear_cpu_cap(c, X86_FEATURE_NOPL);
622 static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
624 c->extended_cpuid_level = 0;
627 identify_cpu_without_cpuid(c);
629 /* cyrix could have cpuid enabled via c_identify()*/
639 if (c->cpuid_level >= 0x00000001) {
640 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
642 # ifdef CONFIG_X86_HT
643 c->apicid = phys_pkg_id(c->initial_apicid, 0);
645 c->apicid = c->initial_apicid;
650 c->phys_proc_id = c->initial_apicid;
654 get_model_name(c); /* Default name */
656 init_scattered_cpuid_features(c);
661 * This does the hard work of actually picking apart the CPU stuff...
663 static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
667 c->loops_per_jiffy = loops_per_jiffy;
668 c->x86_cache_size = -1;
669 c->x86_vendor = X86_VENDOR_UNKNOWN;
670 c->x86_model = c->x86_mask = 0; /* So far unknown... */
671 c->x86_vendor_id[0] = '\0'; /* Unset */
672 c->x86_model_id[0] = '\0'; /* Unset */
673 c->x86_max_cores = 1;
674 c->x86_coreid_bits = 0;
676 c->x86_clflush_size = 64;
678 c->cpuid_level = -1; /* CPUID not detected */
679 c->x86_clflush_size = 32;
681 c->x86_cache_alignment = c->x86_clflush_size;
682 memset(&c->x86_capability, 0, sizeof c->x86_capability);
686 if (this_cpu->c_identify)
687 this_cpu->c_identify(c);
690 c->apicid = phys_pkg_id(0);
694 * Vendor-specific initialization. In this section we
695 * canonicalize the feature flags, meaning if there are
696 * features a certain CPU supports which CPUID doesn't
697 * tell us, CPUID claiming incorrect flags, or other bugs,
698 * we handle them here.
700 * At the end of this section, c->x86_capability better
701 * indicate the features this CPU genuinely supports!
703 if (this_cpu->c_init)
706 /* Disable the PN if appropriate */
707 squash_the_stupid_serial_number(c);
710 * The vendor-specific functions might have changed features. Now
711 * we do "generic changes."
714 /* If the model name is still unset, do table lookup. */
715 if (!c->x86_model_id[0]) {
717 p = table_lookup_model(c);
719 strcpy(c->x86_model_id, p);
722 sprintf(c->x86_model_id, "%02x/%02x",
723 c->x86, c->x86_model);
732 * On SMP, boot_cpu_data holds the common feature set between
733 * all CPUs; so make sure that we indicate which features are
734 * common between the CPUs. The first time this routine gets
735 * executed, c == &boot_cpu_data.
737 if (c != &boot_cpu_data) {
738 /* AND the already accumulated flags with these */
739 for (i = 0; i < NCAPINTS; i++)
740 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
743 /* Clear all flags overriden by options */
744 for (i = 0; i < NCAPINTS; i++)
745 c->x86_capability[i] &= ~cleared_cpu_caps[i];
747 #ifdef CONFIG_X86_MCE
748 /* Init Machine Check Exception if available. */
752 select_idle_routine(c);
754 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
755 numa_add_cpu(smp_processor_id());
760 static void vgetcpu_set_mode(void)
762 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
763 vgetcpu_mode = VGETCPU_RDTSCP;
765 vgetcpu_mode = VGETCPU_LSL;
769 void __init identify_boot_cpu(void)
771 identify_cpu(&boot_cpu_data);
778 init_hw_perf_counters();
781 void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
783 BUG_ON(c == &boot_cpu_data);
796 static struct msr_range msr_range_array[] __cpuinitdata = {
797 { 0x00000000, 0x00000418},
798 { 0xc0000000, 0xc000040b},
799 { 0xc0010000, 0xc0010142},
800 { 0xc0011000, 0xc001103b},
803 static void __cpuinit print_cpu_msr(void)
808 unsigned index_min, index_max;
810 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
811 index_min = msr_range_array[i].min;
812 index_max = msr_range_array[i].max;
813 for (index = index_min; index < index_max; index++) {
814 if (rdmsrl_amd_safe(index, &val))
816 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
821 static int show_msr __cpuinitdata;
822 static __init int setup_show_msr(char *arg)
826 get_option(&arg, &num);
832 __setup("show_msr=", setup_show_msr);
834 static __init int setup_noclflush(char *arg)
836 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
839 __setup("noclflush", setup_noclflush);
841 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
845 if (c->x86_vendor < X86_VENDOR_NUM)
846 vendor = this_cpu->c_vendor;
847 else if (c->cpuid_level >= 0)
848 vendor = c->x86_vendor_id;
850 if (vendor && !strstr(c->x86_model_id, vendor))
851 printk(KERN_CONT "%s ", vendor);
853 if (c->x86_model_id[0])
854 printk(KERN_CONT "%s", c->x86_model_id);
856 printk(KERN_CONT "%d86", c->x86);
858 if (c->x86_mask || c->cpuid_level >= 0)
859 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
861 printk(KERN_CONT "\n");
864 if (c->cpu_index < show_msr)
872 static __init int setup_disablecpuid(char *arg)
875 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
876 setup_clear_cpu_cap(bit);
881 __setup("clearcpuid=", setup_disablecpuid);
884 struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
886 DEFINE_PER_CPU_PAGE_ALIGNED(char[IRQ_STACK_SIZE], irq_stack);
888 DEFINE_PER_CPU(char *, irq_stack_ptr); /* will be set during per cpu init */
890 DEFINE_PER_CPU(char *, irq_stack_ptr) =
891 per_cpu_var(irq_stack) + IRQ_STACK_SIZE - 64;
894 DEFINE_PER_CPU(unsigned long, kernel_stack) =
895 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
896 EXPORT_PER_CPU_SYMBOL(kernel_stack);
898 DEFINE_PER_CPU(unsigned int, irq_count) = -1;
900 void __cpuinit pda_init(int cpu)
902 /* Setup up data that may be needed in __get_free_pages early */
906 load_pda_offset(cpu);
909 static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
910 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
911 __aligned(PAGE_SIZE);
913 extern asmlinkage void ignore_sysret(void);
915 /* May not be marked __init: used by software suspend */
916 void syscall_init(void)
919 * LSTAR and STAR live in a bit strange symbiosis.
920 * They both write to the same internal register. STAR allows to
921 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
923 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
924 wrmsrl(MSR_LSTAR, system_call);
925 wrmsrl(MSR_CSTAR, ignore_sysret);
927 #ifdef CONFIG_IA32_EMULATION
928 syscall32_cpu_init();
931 /* Flags to clear on syscall */
932 wrmsrl(MSR_SYSCALL_MASK,
933 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
936 unsigned long kernel_eflags;
939 * Copies of the original ist values from the tss are only accessed during
940 * debugging, no special alignment required.
942 DEFINE_PER_CPU(struct orig_ist, orig_ist);
946 /* Make sure %fs is initialized properly in idle threads */
947 struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
949 memset(regs, 0, sizeof(struct pt_regs));
950 regs->fs = __KERNEL_PERCPU;
956 * cpu_init() initializes state that is per-CPU. Some data is already
957 * initialized (naturally) in the bootstrap process, such as the GDT
958 * and IDT. We reload them nevertheless, this function acts as a
959 * 'CPU state barrier', nothing should get across.
960 * A lot of state is already set up in PDA init for 64 bit
963 void __cpuinit cpu_init(void)
965 int cpu = stack_smp_processor_id();
966 struct tss_struct *t = &per_cpu(init_tss, cpu);
967 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
969 struct task_struct *me;
972 /* CPU 0 is initialised in head64.c */
977 if (cpu != 0 && percpu_read(node_number) == 0 &&
978 cpu_to_node(cpu) != NUMA_NO_NODE)
979 percpu_write(node_number, cpu_to_node(cpu));
984 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
985 panic("CPU#%d already initialized!\n", cpu);
987 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
989 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
992 * Initialize the per-CPU GDT with the boot GDT,
993 * and set up the GDT descriptor:
997 load_idt((const struct desc_ptr *)&idt_descr);
999 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1002 wrmsrl(MSR_FS_BASE, 0);
1003 wrmsrl(MSR_KERNEL_GS_BASE, 0);
1007 if (cpu != 0 && x2apic)
1011 * set up and load the per-CPU TSS
1013 if (!orig_ist->ist[0]) {
1014 static const unsigned int sizes[N_EXCEPTION_STACKS] = {
1015 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1016 [DEBUG_STACK - 1] = DEBUG_STKSZ
1018 char *estacks = per_cpu(exception_stacks, cpu);
1019 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1020 estacks += sizes[v];
1021 orig_ist->ist[v] = t->x86_tss.ist[v] =
1022 (unsigned long)estacks;
1026 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1028 * <= is required because the CPU will access up to
1029 * 8 bits beyond the end of the IO permission bitmap.
1031 for (i = 0; i <= IO_BITMAP_LONGS; i++)
1032 t->io_bitmap[i] = ~0UL;
1034 atomic_inc(&init_mm.mm_count);
1035 me->active_mm = &init_mm;
1038 enter_lazy_tlb(&init_mm, me);
1040 load_sp0(t, ¤t->thread);
1041 set_tss_desc(cpu, t);
1043 load_LDT(&init_mm.context);
1047 * If the kgdb is connected no debug regs should be altered. This
1048 * is only applicable when KGDB and a KGDB I/O module are built
1049 * into the kernel and you are using early debugging with
1050 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
1052 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1053 arch_kgdb_ops.correct_hw_break();
1057 * Clear all 6 debug registers:
1060 set_debugreg(0UL, 0);
1061 set_debugreg(0UL, 1);
1062 set_debugreg(0UL, 2);
1063 set_debugreg(0UL, 3);
1064 set_debugreg(0UL, 6);
1065 set_debugreg(0UL, 7);
1067 /* If the kgdb is connected no debug regs should be altered. */
1073 raw_local_save_flags(kernel_eflags);
1081 void __cpuinit cpu_init(void)
1083 int cpu = smp_processor_id();
1084 struct task_struct *curr = current;
1085 struct tss_struct *t = &per_cpu(init_tss, cpu);
1086 struct thread_struct *thread = &curr->thread;
1088 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
1089 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1090 for (;;) local_irq_enable();
1093 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1095 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
1096 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1098 load_idt(&idt_descr);
1099 switch_to_new_gdt();
1102 * Set up and load the per-CPU TSS and LDT
1104 atomic_inc(&init_mm.mm_count);
1105 curr->active_mm = &init_mm;
1108 enter_lazy_tlb(&init_mm, curr);
1110 load_sp0(t, thread);
1111 set_tss_desc(cpu, t);
1113 load_LDT(&init_mm.context);
1115 #ifdef CONFIG_DOUBLEFAULT
1116 /* Set up doublefault TSS pointer in the GDT */
1117 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1121 asm volatile ("mov %0, %%gs" : : "r" (0));
1123 /* Clear all 6 debug registers: */
1132 * Force FPU initialization:
1135 current_thread_info()->status = TS_XSAVE;
1137 current_thread_info()->status = 0;
1139 mxcsr_feature_mask_init();
1142 * Boot processor to setup the FP and extended state context info.
1144 if (smp_processor_id() == boot_cpu_id)
1145 init_thread_xstate();