2 * Intel specific MCE features.
3 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
4 * Copyright (C) 2008, 2009 Intel Corporation
9 #include <linux/init.h>
10 #include <linux/interrupt.h>
11 #include <linux/percpu.h>
12 #include <linux/sched.h>
14 #include <asm/processor.h>
18 #include "mce-internal.h"
21 * Support for Intel Correct Machine Check Interrupts. This allows
22 * the CPU to raise an interrupt when a corrected machine check happened.
23 * Normally we pick those up using a regular polling timer.
24 * Also supports reliable discovery of shared banks.
27 static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
30 * cmci_discover_lock protects against parallel discovery attempts
31 * which could race against each other.
33 static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
35 #define CMCI_THRESHOLD 1
36 #define CMCI_POLL_INTERVAL (30 * HZ)
37 #define CMCI_STORM_INTERVAL (1 * HZ)
38 #define CMCI_STORM_THRESHOLD 15
40 static DEFINE_PER_CPU(unsigned long, cmci_time_stamp);
41 static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt);
42 static DEFINE_PER_CPU(unsigned int, cmci_storm_state);
50 static atomic_t cmci_storm_on_cpus;
52 static int cmci_supported(int *banks)
56 if (mce_cmci_disabled || mce_ignore_ce)
60 * Vendor check is not strictly needed, but the initial
61 * initialization is vendor keyed and this
62 * makes sure none of the backdoors are entered otherwise.
64 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
66 if (!cpu_has_apic || lapic_get_maxlvt() < 6)
68 rdmsrl(MSR_IA32_MCG_CAP, cap);
69 *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
70 return !!(cap & MCG_CMCI_P);
73 void mce_intel_cmci_poll(void)
75 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
77 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
80 void mce_intel_hcpu_update(unsigned long cpu)
82 if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE)
83 atomic_dec(&cmci_storm_on_cpus);
85 per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
88 unsigned long mce_intel_adjust_timer(unsigned long interval)
92 if (interval < CMCI_POLL_INTERVAL)
95 switch (__this_cpu_read(cmci_storm_state)) {
96 case CMCI_STORM_ACTIVE:
98 * We switch back to interrupt mode once the poll timer has
99 * silenced itself. That means no events recorded and the
100 * timer interval is back to our poll interval.
102 __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED);
103 r = atomic_sub_return(1, &cmci_storm_on_cpus);
105 pr_notice("CMCI storm subsided: switching to interrupt mode\n");
108 case CMCI_STORM_SUBSIDED:
110 * We wait for all cpus to go back to SUBSIDED
111 * state. When that happens we switch back to
114 if (!atomic_read(&cmci_storm_on_cpus)) {
115 __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
119 return CMCI_POLL_INTERVAL;
122 * We have shiny weather. Let the poll do whatever it
129 static bool cmci_storm_detect(void)
131 unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
132 unsigned long ts = __this_cpu_read(cmci_time_stamp);
133 unsigned long now = jiffies;
136 if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE)
139 if (time_before_eq(now, ts + CMCI_STORM_INTERVAL)) {
143 __this_cpu_write(cmci_time_stamp, now);
145 __this_cpu_write(cmci_storm_cnt, cnt);
147 if (cnt <= CMCI_STORM_THRESHOLD)
151 __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
152 r = atomic_add_return(1, &cmci_storm_on_cpus);
153 mce_timer_kick(CMCI_POLL_INTERVAL);
156 pr_notice("CMCI storm detected: switching to poll mode\n");
161 * The interrupt handler. This is called on every event.
162 * Just call the poller directly to log any events.
163 * This could in theory increase the threshold under high load,
164 * but doesn't for now.
166 static void intel_threshold_interrupt(void)
168 if (cmci_storm_detect())
170 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
175 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
176 * on this CPU. Use the algorithm recommended in the SDM to discover shared
179 static void cmci_discover(int banks)
181 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
184 int bios_wrong_thresh = 0;
186 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
187 for (i = 0; i < banks; i++) {
189 int bios_zero_thresh = 0;
191 if (test_bit(i, owned))
194 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
196 /* Already owned by someone else? */
197 if (val & MCI_CTL2_CMCI_EN) {
199 __clear_bit(i, __get_cpu_var(mce_poll_banks));
203 if (!mce_bios_cmci_threshold) {
204 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
205 val |= CMCI_THRESHOLD;
206 } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) {
208 * If bios_cmci_threshold boot option was specified
209 * but the threshold is zero, we'll try to initialize
212 bios_zero_thresh = 1;
213 val |= CMCI_THRESHOLD;
216 val |= MCI_CTL2_CMCI_EN;
217 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
218 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
220 /* Did the enable bit stick? -- the bank supports CMCI */
221 if (val & MCI_CTL2_CMCI_EN) {
223 __clear_bit(i, __get_cpu_var(mce_poll_banks));
225 * We are able to set thresholds for some banks that
226 * had a threshold of 0. This means the BIOS has not
227 * set the thresholds properly or does not work with
228 * this boot option. Note down now and report later.
230 if (mce_bios_cmci_threshold && bios_zero_thresh &&
231 (val & MCI_CTL2_CMCI_THRESHOLD_MASK))
232 bios_wrong_thresh = 1;
234 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
237 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
238 if (mce_bios_cmci_threshold && bios_wrong_thresh) {
240 "bios_cmci_threshold: Some banks do not have valid thresholds set\n");
242 "bios_cmci_threshold: Make sure your BIOS supports this boot option\n");
247 * Just in case we missed an event during initialization check
248 * all the CMCI owned banks.
250 void cmci_recheck(void)
255 if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
257 local_irq_save(flags);
258 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
259 local_irq_restore(flags);
263 * Disable CMCI on this CPU for all banks it owns when it goes down.
264 * This allows other CPUs to claim the banks on rediscovery.
266 void cmci_clear(void)
273 if (!cmci_supported(&banks))
275 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
276 for (i = 0; i < banks; i++) {
277 if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
280 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
281 val &= ~MCI_CTL2_CMCI_EN;
282 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
283 __clear_bit(i, __get_cpu_var(mce_banks_owned));
285 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
289 * After a CPU went down cycle through all the others and rediscover
290 * Must run in process context.
292 void cmci_rediscover(int dying)
298 if (!cmci_supported(&banks))
300 if (!alloc_cpumask_var(&old, GFP_KERNEL))
302 cpumask_copy(old, ¤t->cpus_allowed);
304 for_each_online_cpu(cpu) {
307 if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
309 /* Recheck banks in case CPUs don't all have the same */
310 if (cmci_supported(&banks))
311 cmci_discover(banks);
314 set_cpus_allowed_ptr(current, old);
315 free_cpumask_var(old);
319 * Reenable CMCI on this CPU in case a CPU down failed.
321 void cmci_reenable(void)
324 if (cmci_supported(&banks))
325 cmci_discover(banks);
328 static void intel_init_cmci(void)
332 if (!cmci_supported(&banks))
335 mce_threshold_vector = intel_threshold_interrupt;
336 cmci_discover(banks);
338 * For CPU #0 this runs with still disabled APIC, but that's
339 * ok because only the vector is set up. We still do another
340 * check for the banks later for CPU #0 just to make sure
341 * to not miss any events.
343 apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
347 void mce_intel_feature_init(struct cpuinfo_x86 *c)
349 intel_init_thermal(c);