1 #include <linux/kernel.h>
3 #include <linux/string.h>
4 #include <linux/bitops.h>
6 #include <linux/sched.h>
7 #include <linux/thread_info.h>
8 #include <linux/module.h>
9 #include <linux/uaccess.h>
11 #include <asm/processor.h>
12 #include <asm/pgtable.h>
18 #include <linux/topology.h>
23 #ifdef CONFIG_X86_LOCAL_APIC
24 #include <asm/mpspec.h>
28 static void early_init_intel(struct cpuinfo_x86 *c)
32 /* Unmask CPUID levels if masked: */
33 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
34 if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_BIT_LIMIT_CPUID) > 0) {
35 c->cpuid_level = cpuid_eax(0);
40 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
41 (c->x86 == 0x6 && c->x86_model >= 0x0e))
42 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
44 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
47 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
48 /* Required by the SDM */
50 rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
54 * Atom erratum AAE44/AAF40/AAG38/AAH41:
56 * A race condition between speculative fetches and invalidating
57 * a large page. This is worked around in microcode, but we
58 * need the microcode to have already been loaded... so if it is
59 * not, recommend a BIOS update and disable large pages.
61 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
62 c->microcode < 0x20e) {
63 printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
64 clear_cpu_cap(c, X86_FEATURE_PSE);
68 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
70 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
71 if (c->x86 == 15 && c->x86_cache_alignment == 64)
72 c->x86_cache_alignment = 128;
75 /* CPUID workaround for 0F33/0F34 CPU */
76 if (c->x86 == 0xF && c->x86_model == 0x3
77 && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
78 c->x86_phys_bits = 36;
81 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
82 * with P/T states and does not stop in deep C-states.
84 * It is also reliable across cores and sockets. (but not across
85 * cabinets - we turn it off in that case explicitly.)
87 if (c->x86_power & (1 << 8)) {
88 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
89 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
90 if (!check_tsc_unstable())
91 set_sched_clock_stable();
94 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
96 switch (c->x86_model) {
97 case 0x27: /* Penwell */
98 case 0x35: /* Cloverview */
99 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
107 * There is a known erratum on Pentium III and Core Solo
109 * " Page with PAT set to WC while associated MTRR is UC
110 * may consolidate to UC "
111 * Because of this erratum, it is better to stick with
112 * setting WC in MTRR rather than using PAT on these CPUs.
114 * Enable PAT WC only on P4, Core 2 or later CPUs.
116 if (c->x86 == 6 && c->x86_model < 15)
117 clear_cpu_cap(c, X86_FEATURE_PAT);
119 #ifdef CONFIG_KMEMCHECK
121 * P4s have a "fast strings" feature which causes single-
122 * stepping REP instructions to only generate a #DB on
123 * cache-line boundaries.
125 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
126 * (model 2) with the same problem.
129 if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_BIT_FAST_STRING) > 0)
130 pr_info("kmemcheck: Disabling fast string operations\n");
134 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
135 * clear the fast string and enhanced fast string CPU capabilities.
137 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
138 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
139 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
140 printk(KERN_INFO "Disabled fast string operations\n");
141 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
142 setup_clear_cpu_cap(X86_FEATURE_ERMS);
149 * Early probe support logic for ppro memory erratum #50
151 * This is called before we do cpu ident work
154 int ppro_with_ram_bug(void)
156 /* Uses data from early_cpu_detect now */
157 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
158 boot_cpu_data.x86 == 6 &&
159 boot_cpu_data.x86_model == 1 &&
160 boot_cpu_data.x86_mask < 8) {
161 printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
167 static void intel_smp_check(struct cpuinfo_x86 *c)
169 /* calling is from identify_secondary_cpu() ? */
174 * Mask B, Pentium, but not Pentium MMX
177 c->x86_mask >= 1 && c->x86_mask <= 4 &&
180 * Remember we have B step Pentia with bugs
182 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
183 "with B stepping processors.\n");
187 static void intel_workarounds(struct cpuinfo_x86 *c)
189 #ifdef CONFIG_X86_F00F_BUG
191 * All current models of Pentium and Pentium with MMX technology CPUs
192 * have the F0 0F bug, which lets nonprivileged users lock up the
193 * system. Announce that the fault handler will be checking for it.
195 clear_cpu_bug(c, X86_BUG_F00F);
196 if (!paravirt_enabled() && c->x86 == 5) {
197 static int f00f_workaround_enabled;
199 set_cpu_bug(c, X86_BUG_F00F);
200 if (!f00f_workaround_enabled) {
201 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
202 f00f_workaround_enabled = 1;
208 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
211 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
212 clear_cpu_cap(c, X86_FEATURE_SEP);
215 * P4 Xeon errata 037 workaround.
216 * Hardware prefetcher may cause stale data to be loaded into the cache.
218 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
219 if (msr_set_bit(MSR_IA32_MISC_ENABLE, MSR_BIT_PRF_DIS) > 0) {
220 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
221 pr_info("CPU: Disabling hardware prefetching (Errata 037)\n");
226 * See if we have a good local APIC by checking for buggy Pentia,
227 * i.e. all B steppings and the C2 stepping of P54C when using their
228 * integrated APIC (see 11AP erratum in "Pentium Processor
229 * Specification Update").
231 if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
232 (c->x86_mask < 0x6 || c->x86_mask == 0xb))
233 set_cpu_cap(c, X86_FEATURE_11AP);
236 #ifdef CONFIG_X86_INTEL_USERCOPY
238 * Set up the preferred alignment for movsl bulk memory moves
241 case 4: /* 486: untested */
243 case 5: /* Old Pentia: untested */
245 case 6: /* PII/PIII only like movsl with 8-byte alignment */
248 case 15: /* P4 is OK down to 8-byte alignment */
254 #ifdef CONFIG_X86_NUMAQ
261 static void intel_workarounds(struct cpuinfo_x86 *c)
266 static void srat_detect_node(struct cpuinfo_x86 *c)
270 int cpu = smp_processor_id();
272 /* Don't do the funky fallback heuristics the AMD version employs
274 node = numa_cpu_node(cpu);
275 if (node == NUMA_NO_NODE || !node_online(node)) {
276 /* reuse the value from init_cpu_to_node() */
277 node = cpu_to_node(cpu);
279 numa_set_node(cpu, node);
284 * find out the number of processor cores on the die
286 static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
288 unsigned int eax, ebx, ecx, edx;
290 if (c->cpuid_level < 4)
293 /* Intel has a non-standard dependency on %ecx for this CPUID level. */
294 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
296 return (eax >> 26) + 1;
301 static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
303 /* Intel VMX MSR indicated features */
304 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
305 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
306 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
307 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
308 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
309 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
311 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
313 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
314 clear_cpu_cap(c, X86_FEATURE_VNMI);
315 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
316 clear_cpu_cap(c, X86_FEATURE_EPT);
317 clear_cpu_cap(c, X86_FEATURE_VPID);
319 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
320 msr_ctl = vmx_msr_high | vmx_msr_low;
321 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
322 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
323 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
324 set_cpu_cap(c, X86_FEATURE_VNMI);
325 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
326 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
327 vmx_msr_low, vmx_msr_high);
328 msr_ctl2 = vmx_msr_high | vmx_msr_low;
329 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
330 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
331 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
332 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
333 set_cpu_cap(c, X86_FEATURE_EPT);
334 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
335 set_cpu_cap(c, X86_FEATURE_VPID);
339 static void init_intel(struct cpuinfo_x86 *c)
345 intel_workarounds(c);
348 * Detect the extended topology information if available. This
349 * will reinitialise the initial_apicid which will be used
350 * in init_intel_cacheinfo()
352 detect_extended_topology(c);
354 l2 = init_intel_cacheinfo(c);
355 if (c->cpuid_level > 9) {
356 unsigned eax = cpuid_eax(10);
357 /* Check for version and the number of counters */
358 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
359 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
363 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
366 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
368 set_cpu_cap(c, X86_FEATURE_BTS);
370 set_cpu_cap(c, X86_FEATURE_PEBS);
373 if (c->x86 == 6 && cpu_has_clflush &&
374 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
375 set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
379 c->x86_cache_alignment = c->x86_clflush_size * 2;
381 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
384 * Names for the Pentium II/Celeron processors
385 * detectable only by also checking the cache size.
386 * Dixon is NOT a Celeron.
391 switch (c->x86_model) {
394 p = "Celeron (Covington)";
396 p = "Mobile Pentium II (Dixon)";
401 p = "Celeron (Mendocino)";
402 else if (c->x86_mask == 0 || c->x86_mask == 5)
408 p = "Celeron (Coppermine)";
413 strcpy(c->x86_model_id, p);
417 set_cpu_cap(c, X86_FEATURE_P4);
419 set_cpu_cap(c, X86_FEATURE_P3);
422 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
424 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
427 c->x86_max_cores = intel_num_cpu_cores(c);
433 /* Work around errata */
436 if (cpu_has(c, X86_FEATURE_VMX))
437 detect_vmx_virtcap(c);
440 * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not.
441 * x86_energy_perf_policy(8) is available to change it at run-time
443 if (cpu_has(c, X86_FEATURE_EPB)) {
446 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
447 if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
448 printk_once(KERN_WARNING "ENERGY_PERF_BIAS:"
449 " Set to 'normal', was 'performance'\n"
450 "ENERGY_PERF_BIAS: View and update with"
451 " x86_energy_perf_policy(8)\n");
452 epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
453 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
459 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
462 * Intel PIII Tualatin. This comes in two flavours.
463 * One has 256kb of cache, the other 512. We have no way
464 * to determine which, so we use a boottime override
465 * for the 512kb model, and assume 256 otherwise.
467 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
473 #define TLB_INST_4K 0x01
474 #define TLB_INST_4M 0x02
475 #define TLB_INST_2M_4M 0x03
477 #define TLB_INST_ALL 0x05
478 #define TLB_INST_1G 0x06
480 #define TLB_DATA_4K 0x11
481 #define TLB_DATA_4M 0x12
482 #define TLB_DATA_2M_4M 0x13
483 #define TLB_DATA_4K_4M 0x14
485 #define TLB_DATA_1G 0x16
487 #define TLB_DATA0_4K 0x21
488 #define TLB_DATA0_4M 0x22
489 #define TLB_DATA0_2M_4M 0x23
492 #define STLB_4K_2M 0x42
494 static const struct _tlb_table intel_tlb_table[] = {
495 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
496 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
497 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
498 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
499 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
500 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
501 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" },
502 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
503 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
504 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
505 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
506 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
507 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
508 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
509 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
510 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
511 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
512 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
513 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
514 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
515 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
516 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
517 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
518 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
519 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
520 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
521 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set ssociative" },
522 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set ssociative" },
523 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
524 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
525 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
526 { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" },
527 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
531 static void intel_tlb_lookup(const unsigned char desc)
537 /* look up this descriptor in the table */
538 for (k = 0; intel_tlb_table[k].descriptor != desc && \
539 intel_tlb_table[k].descriptor != 0; k++)
542 if (intel_tlb_table[k].tlb_type == 0)
545 switch (intel_tlb_table[k].tlb_type) {
547 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
548 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
549 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
550 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
553 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
554 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
555 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
556 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
557 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
558 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
559 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
560 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
561 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
562 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
563 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
564 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
567 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
568 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
569 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
570 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
571 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
572 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
575 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
576 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
579 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
580 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
583 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
584 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
585 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
586 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
590 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
591 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
595 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
596 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
599 case TLB_DATA0_2M_4M:
600 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
601 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
602 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
603 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
606 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
607 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
608 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
609 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
612 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
613 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
618 static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
620 switch ((c->x86 << 8) + c->x86_model) {
621 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
622 case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
623 case 0x617: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
624 case 0x61d: /* six-core 45 nm xeon "Dunnington" */
625 tlb_flushall_shift = -1;
627 case 0x63a: /* Ivybridge */
628 tlb_flushall_shift = 2;
630 case 0x61a: /* 45 nm nehalem, "Bloomfield" */
631 case 0x61e: /* 45 nm nehalem, "Lynnfield" */
632 case 0x625: /* 32 nm nehalem, "Clarkdale" */
633 case 0x62c: /* 32 nm nehalem, "Gulftown" */
634 case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
635 case 0x62f: /* 32 nm Xeon E7 */
636 case 0x62a: /* SandyBridge */
637 case 0x62d: /* SandyBridge, "Romely-EP" */
639 tlb_flushall_shift = 6;
643 static void intel_detect_tlb(struct cpuinfo_x86 *c)
646 unsigned int regs[4];
647 unsigned char *desc = (unsigned char *)regs;
649 if (c->cpuid_level < 2)
652 /* Number of times to iterate */
653 n = cpuid_eax(2) & 0xFF;
655 for (i = 0 ; i < n ; i++) {
656 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
658 /* If bit 31 is set, this is an unknown format */
659 for (j = 0 ; j < 3 ; j++)
660 if (regs[j] & (1 << 31))
663 /* Byte 0 is level count, not a descriptor */
664 for (j = 1 ; j < 16 ; j++)
665 intel_tlb_lookup(desc[j]);
667 intel_tlb_flushall_shift_set(c);
670 static const struct cpu_dev intel_cpu_dev = {
672 .c_ident = { "GenuineIntel" },
675 { .family = 4, .model_names =
677 [0] = "486 DX-25/33",
688 { .family = 5, .model_names =
690 [0] = "Pentium 60/66 A-step",
691 [1] = "Pentium 60/66",
692 [2] = "Pentium 75 - 200",
693 [3] = "OverDrive PODP5V83",
695 [7] = "Mobile Pentium 75 - 200",
696 [8] = "Mobile Pentium MMX"
699 { .family = 6, .model_names =
701 [0] = "Pentium Pro A-step",
703 [3] = "Pentium II (Klamath)",
704 [4] = "Pentium II (Deschutes)",
705 [5] = "Pentium II (Deschutes)",
706 [6] = "Mobile Pentium II",
707 [7] = "Pentium III (Katmai)",
708 [8] = "Pentium III (Coppermine)",
709 [10] = "Pentium III (Cascades)",
710 [11] = "Pentium III (Tualatin)",
713 { .family = 15, .model_names =
715 [0] = "Pentium 4 (Unknown)",
716 [1] = "Pentium 4 (Willamette)",
717 [2] = "Pentium 4 (Northwood)",
718 [4] = "Pentium 4 (Foster)",
719 [5] = "Pentium 4 (Foster)",
723 .legacy_cache_size = intel_size_cache,
725 .c_detect_tlb = intel_detect_tlb,
726 .c_early_init = early_init_intel,
727 .c_init = init_intel,
728 .c_x86_vendor = X86_VENDOR_INTEL,
731 cpu_dev_register(intel_cpu_dev);