#define default_MCA_trigger(idx) (1)
#define default_MCA_polarity(idx) (0)
-static int __init MPBIOS_polarity(int idx)
+static int MPBIOS_polarity(int idx)
{
int bus = mp_irqs[idx].mpc_srcbus;
int polarity;
{
int apic1, pin1, apic2, pin2;
int vector;
+ unsigned int ver;
+
+ ver = apic_read(APIC_LVR);
+ ver = GET_APIC_VERSION(ver);
/*
* get/set the timer IRQ vector:
* mode for the 8259A whenever interrupts are routed
* through I/O APICs. Also IRQ0 has to be enabled in
* the 8259A which implies the virtual wire has to be
- * disabled in the local APIC.
+ * disabled in the local APIC. Finally timer interrupts
+ * need to be acknowledged manually in the 8259A for
+ * timer_interrupt() and for the i82489DX when using
+ * the NMI watchdog.
*/
apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
init_8259A(1);
- timer_ack = 1;
+ timer_ack = !cpu_has_tsc;
+ timer_ack |= (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
if (timer_over_8254 > 0)
enable_8259A_irq(0);
return 0;
}
+int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
+{
+ int i;
+
+ if (skip_ioapic_setup)
+ return -1;
+
+ for (i = 0; i < mp_irq_entries; i++)
+ if (mp_irqs[i].mpc_irqtype == mp_INT &&
+ mp_irqs[i].mpc_srcbusirq == bus_irq)
+ break;
+ if (i >= mp_irq_entries)
+ return -1;
+
+ *trigger = irq_trigger(i);
+ *polarity = irq_polarity(i);
+ return 0;
+}
+
#endif /* CONFIG_ACPI */
static int __init parse_disable_timer_pin_1(char *arg)
return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
}
+static void acpi_safe_halt(void)
+{
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we
+ * test NEED_RESCHED:
+ */
+ smp_mb();
+ if (!need_resched())
+ safe_halt();
+ current_thread_info()->status |= TS_POLLING;
+}
+
#ifndef CONFIG_CPU_IDLE
static void
return;
}
-static void acpi_safe_halt(void)
-{
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we
- * test NEED_RESCHED:
- */
- smp_mb();
- if (!need_resched())
- safe_halt();
- current_thread_info()->status |= TS_POLLING;
-}
-
static atomic_t c3_cpu_count;
/* Common C-state entry for C2, C3, .. */
if (pr->flags.bm_check)
acpi_idle_update_bm_rld(pr, cx);
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
- if (!need_resched())
- safe_halt();
- current_thread_info()->status |= TS_POLLING;
+ acpi_safe_halt();
cx->usage++;
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
u32 t1, t2;
+ int sleep_ticks = 0;
+
pr = processors[smp_processor_id()];
if (unlikely(!pr))
ACPI_FLUSH_CPU_CACHE();
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ /* Tell the scheduler that we are going deep-idle: */
+ sched_clock_idle_sleep_event();
acpi_state_timer_broadcast(pr, cx, 1);
acpi_idle_do_entry(cx);
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* TSC could halt in idle, so notify users */
mark_tsc_unstable("TSC halts in idle");;
#endif
+ sleep_ticks = ticks_elapsed(t1, t2);
+
+ /* Tell the scheduler how much we idled: */
+ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
cx->usage++;
acpi_state_timer_broadcast(pr, cx, 0);
- cx->time += ticks_elapsed(t1, t2);
+ cx->time += sleep_ticks;
return ticks_elapsed_in_us(t1, t2);
}
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
u32 t1, t2;
+ int sleep_ticks = 0;
+
pr = processors[smp_processor_id()];
if (unlikely(!pr))
if (acpi_idle_suspend)
return(acpi_idle_enter_c1(dev, state));
+ if (acpi_idle_bm_check()) {
+ if (dev->safe_state) {
+ return dev->safe_state->enter(dev, dev->safe_state);
+ } else {
+ acpi_safe_halt();
+ return 0;
+ }
+ }
+
local_irq_disable();
current_thread_info()->status &= ~TS_POLLING;
/*
return 0;
}
+ /* Tell the scheduler that we are going deep-idle: */
+ sched_clock_idle_sleep_event();
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
acpi_state_timer_broadcast(pr, cx, 1);
- if (acpi_idle_bm_check()) {
- cx = pr->power.bm_state;
-
- acpi_idle_update_bm_rld(pr, cx);
-
- t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
- acpi_idle_do_entry(cx);
- t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
- } else {
- acpi_idle_update_bm_rld(pr, cx);
+ acpi_idle_update_bm_rld(pr, cx);
+ /*
+ * disable bus master
+ * bm_check implies we need ARB_DIS
+ * !bm_check implies we need cache flush
+ * bm_control implies whether we can do ARB_DIS
+ *
+ * That leaves a case where bm_check is set and bm_control is
+ * not set. In that case we cannot do much, we enter C3
+ * without doing anything.
+ */
+ if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
spin_unlock(&c3_lock);
+ } else if (!pr->flags.bm_check) {
+ ACPI_FLUSH_CPU_CACHE();
+ }
- t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
- acpi_idle_do_entry(cx);
- t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ acpi_idle_do_entry(cx);
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ /* Re-enable bus master arbitration */
+ if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
- /* Re-enable bus master arbitration */
- if (c3_cpu_count == num_online_cpus())
- acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
spin_unlock(&c3_lock);
}
/* TSC could halt in idle, so notify users */
mark_tsc_unstable("TSC halts in idle");
#endif
+ sleep_ticks = ticks_elapsed(t1, t2);
+ /* Tell the scheduler how much we idled: */
+ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
cx->usage++;
acpi_state_timer_broadcast(pr, cx, 0);
- cx->time += ticks_elapsed(t1, t2);
+ cx->time += sleep_ticks;
return ticks_elapsed_in_us(t1, t2);
}
case ACPI_STATE_C1:
state->flags |= CPUIDLE_FLAG_SHALLOW;
state->enter = acpi_idle_enter_c1;
+ dev->safe_state = state;
break;
case ACPI_STATE_C2:
state->flags |= CPUIDLE_FLAG_BALANCED;
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_simple;
+ dev->safe_state = state;
break;
case ACPI_STATE_C3:
if (!count)
return -EINVAL;
- /* find the deepest state that can handle active BM */
- if (pr->flags.bm_check) {
- for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++)
- if (pr->power.states[i].type == ACPI_STATE_C3)
- break;
- pr->power.bm_state = &pr->power.states[i-1];
- }
-
return 0;
}
if (!first_run) {
dmi_check_system(processor_power_dmi_table);
+ max_cstate = acpi_processor_cstate_check(max_cstate);
if (max_cstate < ACPI_C_STATES_MAX)
printk(KERN_NOTICE
"ACPI: processor limited to max C-state %d\n",