Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Mar 2012 23:45:38 +0000 (16:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Mar 2012 23:45:39 +0000 (16:45 -0700)
Pull ACPI & Power Management changes from Len Brown:
 - ACPI 5.0 after-ripples, ACPICA/Linux divergence cleanup
 - cpuidle evolving, more ARM use
 - thermal sub-system evolving, ditto
 - assorted other PM bits

Fix up conflicts in various cpuidle implementations due to ARM cpuidle
cleanups (ARM at91 self-refresh and cpu idle code rewritten into
"standby" in asm conflicting with the consolidation of cpuidle time
keeping), trivial SH include file context conflict and RCU tracing fixes
in generic code.

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux: (77 commits)
  ACPI throttling: fix endian bug in acpi_read_throttling_status()
  Disable MCP limit exceeded messages from Intel IPS driver
  ACPI video: Don't start video device until its associated input device has been allocated
  ACPI video: Harden video bus adding.
  ACPI: Add support for exposing BGRT data
  ACPI: export acpi_kobj
  ACPI: Fix logic for removing mappings in 'acpi_unmap'
  CPER failed to handle generic error records with multiple sections
  ACPI: Clean redundant codes in scan.c
  ACPI: Fix unprotected smp_processor_id() in acpi_processor_cst_has_changed()
  ACPI: consistently use should_use_kmap()
  PNPACPI: Fix device ref leaking in acpi_pnp_match
  ACPI: Fix use-after-free in acpi_map_lsapic
  ACPI: processor_driver: add missing kfree
  ACPI, APEI: Fix incorrect APEI register bit width check and usage
  Update documentation for parameter *notrigger* in einj.txt
  ACPI, APEI, EINJ, new parameter to control trigger action
  ACPI, APEI, EINJ, limit the range of einj_param
  ACPI, APEI, Fix ERST header length check
  cpuidle: power_usage should be declared signed integer
  ...

1  2 
arch/arm/kernel/Makefile
arch/arm/mach-at91/cpuidle.c
arch/arm/mach-shmobile/cpuidle.c
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/smpboot.c
drivers/acpi/ec.c
drivers/acpi/processor_driver.c
drivers/cpuidle/cpuidle.c
include/linux/acpi.h

diff --combined arch/arm/kernel/Makefile
index 8269d892874232f59f6e49537dc6df49e8c0927b,940c27fde498f9277a7ac2460d651876bd1fdca0..7b787d642af4fe2ac3c9f81be97c8913ea8b2e95
@@@ -7,8 -7,6 +7,8 @@@ AFLAGS_head.o        := -DTEXT_OFFSET=$
  
  ifdef CONFIG_FUNCTION_TRACER
  CFLAGS_REMOVE_ftrace.o = -pg
 +CFLAGS_REMOVE_insn.o = -pg
 +CFLAGS_REMOVE_patch.o = -pg
  endif
  
  CFLAGS_REMOVE_return_address.o = -pg
  # Object file lists.
  
  obj-y         := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
 -                 process.o ptrace.o return_address.o setup.o signal.o \
 -                 sys_arm.o stacktrace.o time.o traps.o
 +                 process.o ptrace.o return_address.o sched_clock.o \
 +                 setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
  
  obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += compat.o
  
  obj-$(CONFIG_LEDS)            += leds.o
  obj-$(CONFIG_OC_ETM)          += etm.o
+ obj-$(CONFIG_CPU_IDLE)                += cpuidle.o
  obj-$(CONFIG_ISA_DMA_API)     += dma.o
 -obj-$(CONFIG_ARCH_ACORN)      += ecard.o 
  obj-$(CONFIG_FIQ)             += fiq.o fiqasm.o
  obj-$(CONFIG_MODULES)         += armksyms.o module.o
  obj-$(CONFIG_ARTHUR)          += arthur.o
  obj-$(CONFIG_ISA_DMA)         += dma-isa.o
  obj-$(CONFIG_PCI)             += bios32.o isa.o
  obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
 -obj-$(CONFIG_HAVE_SCHED_CLOCK)        += sched_clock.o
  obj-$(CONFIG_SMP)             += smp.o smp_tlb.o
  obj-$(CONFIG_HAVE_ARM_SCU)    += smp_scu.o
  obj-$(CONFIG_HAVE_ARM_TWD)    += smp_twd.o
 -obj-$(CONFIG_DYNAMIC_FTRACE)  += ftrace.o
 -obj-$(CONFIG_FUNCTION_GRAPH_TRACER)   += ftrace.o
 +obj-$(CONFIG_DYNAMIC_FTRACE)  += ftrace.o insn.o
 +obj-$(CONFIG_FUNCTION_GRAPH_TRACER)   += ftrace.o insn.o
 +obj-$(CONFIG_JUMP_LABEL)      += jump_label.o insn.o patch.o
  obj-$(CONFIG_KEXEC)           += machine_kexec.o relocate_kernel.o
 -obj-$(CONFIG_KPROBES)         += kprobes.o kprobes-common.o
 +obj-$(CONFIG_KPROBES)         += kprobes.o kprobes-common.o patch.o
  ifdef CONFIG_THUMB2_KERNEL
  obj-$(CONFIG_KPROBES)         += kprobes-thumb.o
  else
@@@ -63,6 -62,9 +63,6 @@@ obj-$(CONFIG_SWP_EMULATE)     += swp_emulat
  CFLAGS_swp_emulate.o          := -Wa,-march=armv7-a
  obj-$(CONFIG_HAVE_HW_BREAKPOINT)      += hw_breakpoint.o
  
 -obj-$(CONFIG_CRUNCH)          += crunch.o crunch-bits.o
 -AFLAGS_crunch-bits.o          := -Wa,-mcpu=ep9312
 -
  obj-$(CONFIG_CPU_XSCALE)      += xscale-cp0.o
  obj-$(CONFIG_CPU_XSC3)                += xscale-cp0.o
  obj-$(CONFIG_CPU_MOHAWK)      += xscale-cp0.o
index 555d956b3a574b3a4d96be46c199c087d991080d,d40b3f317f7f7058ff29b657033b3cc0d747dce9..ece1f9aefb47a0ff46a0fe0f0bfeedd4d2bac27f
  #include <linux/init.h>
  #include <linux/platform_device.h>
  #include <linux/cpuidle.h>
- #include <asm/proc-fns.h>
  #include <linux/io.h>
  #include <linux/export.h>
+ #include <asm/proc-fns.h>
+ #include <asm/cpuidle.h>
  
  #include "pm.h"
  
  
  static DEFINE_PER_CPU(struct cpuidle_device, at91_cpuidle_device);
  
- static struct cpuidle_driver at91_idle_driver = {
-       .name =         "at91_idle",
-       .owner =        THIS_MODULE,
- };
  /* Actual code that puts the SoC in different idle states */
  static int at91_enter_idle(struct cpuidle_device *dev,
                        struct cpuidle_driver *drv,
                               int index)
  {
-       struct timeval before, after;
-       int idle_time;
 -      u32 saved_lpr;
--
-       local_irq_disable();
-       do_gettimeofday(&before);
-       if (index == 0)
-               /* Wait for interrupt state */
-               cpu_do_idle();
-       else if (index == 1)
-               at91_standby();
 -      __asm__("b 1f; .align 5; 1:\n"
 -      "       mcr p15, 0, r0, c7, c10, 4");   /* drain write buffer */
 -
 -      saved_lpr = sdram_selfrefresh_enable();
 -      cpu_do_idle();
 -      sdram_selfrefresh_disable(saved_lpr);
++      at91_standby();
  
-       do_gettimeofday(&after);
-       local_irq_enable();
-       idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
-                       (after.tv_usec - before.tv_usec);
-       dev->last_residency = idle_time;
        return index;
  }
  
+ static struct cpuidle_driver at91_idle_driver = {
+       .name                   = "at91_idle",
+       .owner                  = THIS_MODULE,
+       .en_core_tk_irqen       = 1,
+       .states[0]              = ARM_CPUIDLE_WFI_STATE,
+       .states[1]              = {
+               .enter                  = at91_enter_idle,
+               .exit_latency           = 10,
+               .target_residency       = 100000,
+               .flags                  = CPUIDLE_FLAG_TIME_VALID,
+               .name                   = "RAM_SR",
+               .desc                   = "WFI and DDR Self Refresh",
+       },
+       .state_count = AT91_MAX_STATES,
+ };
  /* Initialize CPU idle by registering the idle states */
  static int at91_init_cpuidle(void)
  {
        struct cpuidle_device *device;
-       struct cpuidle_driver *driver = &at91_idle_driver;
  
        device = &per_cpu(at91_cpuidle_device, smp_processor_id());
        device->state_count = AT91_MAX_STATES;
-       driver->state_count = AT91_MAX_STATES;
-       /* Wait for interrupt state */
-       driver->states[0].enter = at91_enter_idle;
-       driver->states[0].exit_latency = 1;
-       driver->states[0].target_residency = 10000;
-       driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
-       strcpy(driver->states[0].name, "WFI");
-       strcpy(driver->states[0].desc, "Wait for interrupt");
-       /* Wait for interrupt and RAM self refresh state */
-       driver->states[1].enter = at91_enter_idle;
-       driver->states[1].exit_latency = 10;
-       driver->states[1].target_residency = 10000;
-       driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
-       strcpy(driver->states[1].name, "RAM_SR");
-       strcpy(driver->states[1].desc, "WFI and RAM Self Refresh");
  
        cpuidle_register_driver(&at91_idle_driver);
  
index 21b09b6455e4b7788057df17d624f3778a7cad21,ca23b202b02d9df22e7bf241b11aab11f8d413ec..7e6559105d40f27be8ffbdbc394cc377cec65ade
@@@ -13,6 -13,8 +13,7 @@@
  #include <linux/suspend.h>
  #include <linux/module.h>
  #include <linux/err.h>
 -#include <asm/system.h>
+ #include <asm/cpuidle.h>
  #include <asm/io.h>
  
  static void shmobile_enter_wfi(void)
@@@ -28,37 -30,19 +29,19 @@@ static int shmobile_cpuidle_enter(struc
                                  struct cpuidle_driver *drv,
                                  int index)
  {
-       ktime_t before, after;
-       before = ktime_get();
-       local_irq_disable();
-       local_fiq_disable();
        shmobile_cpuidle_modes[index]();
  
-       local_irq_enable();
-       local_fiq_enable();
-       after = ktime_get();
-       dev->last_residency = ktime_to_ns(ktime_sub(after, before)) >> 10;
        return index;
  }
  
  static struct cpuidle_device shmobile_cpuidle_dev;
  static struct cpuidle_driver shmobile_cpuidle_driver = {
-       .name =         "shmobile_cpuidle",
-       .owner =        THIS_MODULE,
-       .states[0] = {
-               .name = "C1",
-               .desc = "WFI",
-               .exit_latency = 1,
-               .target_residency = 1 * 2,
-               .flags = CPUIDLE_FLAG_TIME_VALID,
-       },
-       .safe_state_index = 0, /* C1 */
-       .state_count = 1,
+       .name                   = "shmobile_cpuidle",
+       .owner                  = THIS_MODULE,
+       .en_core_tk_irqen       = 1,
+       .states[0]              = ARM_CPUIDLE_WFI_STATE,
+       .safe_state_index       = 0, /* C1 */
+       .state_count            = 1,
  };
  
  void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
index 0f42c2f44311a9332ec538e32d09cd34cb67c396,bbcc2c389adea731ce25b62db9530e9fb2c24890..a415b1f443659132ba216c88309f67d9e5d04a73
@@@ -239,7 -239,7 +239,7 @@@ acpi_parse_x2apic(struct acpi_subtable_
         * to not preallocating memory for all NR_CPUS
         * when we use CPU hotplug.
         */
 -      if (!cpu_has_x2apic && (apic_id >= 0xff) && enabled)
 +      if (!apic->apic_id_valid(apic_id) && enabled)
                printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
        else
                acpi_register_lapic(apic_id, enabled);
@@@ -593,7 -593,7 +593,7 @@@ void __init acpi_set_irq_model_ioapic(v
  #ifdef CONFIG_ACPI_HOTPLUG_CPU
  #include <acpi/processor.h>
  
 -static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
 +static void __cpuinitdata acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
  {
  #ifdef CONFIG_ACPI_NUMA
        int nid;
@@@ -642,6 -642,7 +642,7 @@@ static int __cpuinit _acpi_map_lsapic(a
        kfree(buffer.pointer);
        buffer.length = ACPI_ALLOCATE_BUFFER;
        buffer.pointer = NULL;
+       lapic = NULL;
  
        if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
                goto out;
                goto free_tmp_map;
  
        cpumask_copy(tmp_map, cpu_present_mask);
-       acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
+       acpi_register_lapic(physid, ACPI_MADT_ENABLED);
  
        /*
         * If mp_register_lapic successfully generates a new logical cpu
index ce13315d48fb73d9d55f8344e99e6c50bbe168db,93a2a0932b518a417c7467774560f7f686d4934e..6e1e406038c23d963ee12fcab76abca544cc1d86
@@@ -50,6 -50,7 +50,7 @@@
  #include <linux/tboot.h>
  #include <linux/stackprotector.h>
  #include <linux/gfp.h>
+ #include <linux/cpuidle.h>
  
  #include <asm/acpi.h>
  #include <asm/desc.h>
@@@ -219,9 -220,14 +220,9 @@@ static void __cpuinit smp_callin(void
         * Update loops_per_jiffy in cpu_data. Previous call to
         * smp_store_cpu_info() stored a value that is close but not as
         * accurate as the value just calculated.
 -       *
 -       * Need to enable IRQs because it can take longer and then
 -       * the NMI watchdog might kill us.
         */
 -      local_irq_enable();
        calibrate_delay();
        cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
 -      local_irq_disable();
        pr_debug("Stack at about %p\n", &cpuid);
  
        /*
@@@ -250,7 -256,6 +251,7 @@@ notrace static void __cpuinit start_sec
         * most necessary things.
         */
        cpu_init();
 +      x86_cpuinit.early_percpu_clock_init();
        preempt_disable();
        smp_callin();
  
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
        x86_platform.nmi_init();
  
 -      /*
 -       * Wait until the cpu which brought this one up marked it
 -       * online before enabling interrupts. If we don't do that then
 -       * we can end up waking up the softirq thread before this cpu
 -       * reached the active state, which makes the scheduler unhappy
 -       * and schedule the softirq thread on the wrong cpu. This is
 -       * only observable with forced threaded interrupts, but in
 -       * theory it could also happen w/o them. It's just way harder
 -       * to achieve.
 -       */
 -      while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
 -              cpu_relax();
 -
        /* enable local interrupts */
        local_irq_enable();
  
@@@ -723,6 -741,8 +724,6 @@@ do_rest
         * the targeted processor.
         */
  
 -      printk(KERN_DEBUG "smpboot cpu %d: start_ip = %lx\n", cpu, start_ip);
 -
        atomic_set(&init_deasserted, 0);
  
        if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
                        schedule();
                }
  
 -              if (cpumask_test_cpu(cpu, cpu_callin_mask))
 +              if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
 +                      print_cpu_msr(&cpu_data(cpu));
                        pr_debug("CPU%d: has booted.\n", cpu);
 -              else {
 +              else {
                        boot_error = 1;
                        if (*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status)
                            == 0xA5A5A5A5)
@@@ -829,7 -848,7 +830,7 @@@ int __cpuinit native_cpu_up(unsigned in
  
        if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
            !physid_isset(apicid, phys_cpu_present_map) ||
 -          (!x2apic_mode && apicid >= 255)) {
 +          !apic->apic_id_valid(apicid)) {
                printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
                return -EINVAL;
        }
@@@ -1404,7 -1423,8 +1405,8 @@@ void native_play_dead(void
        tboot_shutdown(TB_SHUTDOWN_WFS);
  
        mwait_play_dead();      /* Only returns on failure */
-       hlt_play_dead();
+       if (cpuidle_play_dead())
+               hlt_play_dead();
  }
  
  #else /* ... !CONFIG_HOTPLUG_CPU */
diff --combined drivers/acpi/ec.c
index e37615f310d731a2f3837304b3d7f18a039bb5e4,3268dcfbaa9b3d92b7fd36871deb71e202d56373..7edaccce66402b75b1b32228bfc7cebc000066c4
@@@ -445,16 -445,6 +445,16 @@@ int ec_transaction(u8 command
  
  EXPORT_SYMBOL(ec_transaction);
  
 +/* Get the handle to the EC device */
 +acpi_handle ec_get_handle(void)
 +{
 +      if (!first_ec)
 +              return NULL;
 +      return first_ec->handle;
 +}
 +
 +EXPORT_SYMBOL(ec_get_handle);
 +
  void acpi_ec_block_transactions(void)
  {
        struct acpi_ec *ec = first_ec;
@@@ -822,10 -812,10 +822,10 @@@ static int acpi_ec_add(struct acpi_devi
                first_ec = ec;
        device->driver_data = ec;
  
-       WARN(!request_region(ec->data_addr, 1, "EC data"),
-            "Could not request EC data io port 0x%lx", ec->data_addr);
-       WARN(!request_region(ec->command_addr, 1, "EC cmd"),
-            "Could not request EC cmd io port 0x%lx", ec->command_addr);
+       ret = !!request_region(ec->data_addr, 1, "EC data");
+       WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
+       ret = !!request_region(ec->command_addr, 1, "EC cmd");
+       WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
  
        pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
                          ec->gpe, ec->command_addr, ec->data_addr);
index d4d9cb7e016a0b2d5ea5663f039ae905f77ce92c,41de1355edcdacdd94242f8b143dc2b97ff898ee..0734086537b89732ba805158389b904e94ae7ecf
@@@ -46,6 -46,7 +46,6 @@@
  #include <linux/slab.h>
  
  #include <asm/io.h>
 -#include <asm/system.h>
  #include <asm/cpu.h>
  #include <asm/delay.h>
  #include <asm/uaccess.h>
@@@ -67,6 -68,7 +67,7 @@@
  #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
  #define ACPI_PROCESSOR_NOTIFY_POWER   0x81
  #define ACPI_PROCESSOR_NOTIFY_THROTTLING      0x82
+ #define ACPI_PROCESSOR_DEVICE_HID     "ACPI0007"
  
  #define ACPI_PROCESSOR_LIMIT_USER     0
  #define ACPI_PROCESSOR_LIMIT_THERMAL  1
@@@ -87,7 -89,7 +88,7 @@@ static int acpi_processor_start(struct 
  
  static const struct acpi_device_id processor_device_ids[] = {
        {ACPI_PROCESSOR_OBJECT_HID, 0},
-       {"ACPI0007", 0},
+       {ACPI_PROCESSOR_DEVICE_HID, 0},
        {"", 0},
  };
  MODULE_DEVICE_TABLE(acpi, processor_device_ids);
@@@ -473,7 -475,6 +474,7 @@@ static __ref int acpi_processor_start(s
  
  #ifdef CONFIG_CPU_FREQ
        acpi_processor_ppc_has_changed(pr, 0);
 +      acpi_processor_load_module(pr);
  #endif
        acpi_processor_get_throttling_info(pr);
        acpi_processor_get_limit_info(pr);
@@@ -535,8 -536,8 +536,8 @@@ static int __cpuinit acpi_processor_add
                return -ENOMEM;
  
        if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
-               kfree(pr);
-               return -ENOMEM;
+               result = -ENOMEM;
+               goto err_free_pr;
        }
  
        pr->handle = device->handle;
        dev = get_cpu_device(pr->id);
        if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) {
                result = -EFAULT;
-               goto err_free_cpumask;
+               goto err_clear_processor;
        }
  
        /*
  
  err_remove_sysfs:
        sysfs_remove_link(&device->dev.kobj, "sysdev");
+ err_clear_processor:
+       /*
+        * processor_device_array is not cleared to allow checks for buggy BIOS
+        */ 
+       per_cpu(processors, pr->id) = NULL;
  err_free_cpumask:
        free_cpumask_var(pr->throttling.shared_cpu_map);
+ err_free_pr:
+       kfree(pr);
        return result;
  }
  
@@@ -741,20 -748,46 +748,46 @@@ static void acpi_processor_hotplug_noti
        return;
  }
  
+ static acpi_status is_processor_device(acpi_handle handle)
+ {
+       struct acpi_device_info *info;
+       char *hid;
+       acpi_status status;
+       status = acpi_get_object_info(handle, &info);
+       if (ACPI_FAILURE(status))
+               return status;
+       if (info->type == ACPI_TYPE_PROCESSOR) {
+               kfree(info);
+               return AE_OK;   /* found a processor object */
+       }
+       if (!(info->valid & ACPI_VALID_HID)) {
+               kfree(info);
+               return AE_ERROR;
+       }
+       hid = info->hardware_id.string;
+       if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) {
+               kfree(info);
+               return AE_ERROR;
+       }
+       kfree(info);
+       return AE_OK;   /* found a processor device object */
+ }
  static acpi_status
  processor_walk_namespace_cb(acpi_handle handle,
                            u32 lvl, void *context, void **rv)
  {
        acpi_status status;
        int *action = context;
-       acpi_object_type type = 0;
  
-       status = acpi_get_type(handle, &type);
+       status = is_processor_device(handle);
        if (ACPI_FAILURE(status))
-               return (AE_OK);
-       if (type != ACPI_TYPE_PROCESSOR)
-               return (AE_OK);
+               return AE_OK;   /* not a processor; continue to walk */
  
        switch (*action) {
        case INSTALL_NOTIFY_HANDLER:
                break;
        }
  
-       return (AE_OK);
+       /* found a processor; skip walking underneath */
+       return AE_CTRL_DEPTH;
  }
  
  static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
@@@ -830,7 -864,7 +864,7 @@@ void acpi_processor_install_hotplug_not
  {
  #ifdef CONFIG_ACPI_HOTPLUG_CPU
        int action = INSTALL_NOTIFY_HANDLER;
-       acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
+       acpi_walk_namespace(ACPI_TYPE_ANY,
                            ACPI_ROOT_OBJECT,
                            ACPI_UINT32_MAX,
                            processor_walk_namespace_cb, NULL, &action, NULL);
@@@ -843,7 -877,7 +877,7 @@@ void acpi_processor_uninstall_hotplug_n
  {
  #ifdef CONFIG_ACPI_HOTPLUG_CPU
        int action = UNINSTALL_NOTIFY_HANDLER;
-       acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
+       acpi_walk_namespace(ACPI_TYPE_ANY,
                            ACPI_ROOT_OBJECT,
                            ACPI_UINT32_MAX,
                            processor_walk_namespace_cb, NULL, &action, NULL);
index 6588f43017bd2803d29e04cd28961d858430828d,3e146b2ada4a1c4682ecaa5aca1394f15c2c3238..87411cebc57725223a994eb6467ad0aa209d3515
@@@ -53,6 -53,52 +53,52 @@@ static void cpuidle_kick_cpus(void) {
  
  static int __cpuidle_register_device(struct cpuidle_device *dev);
  
+ static inline int cpuidle_enter(struct cpuidle_device *dev,
+                               struct cpuidle_driver *drv, int index)
+ {
+       struct cpuidle_state *target_state = &drv->states[index];
+       return target_state->enter(dev, drv, index);
+ }
+ static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
+                              struct cpuidle_driver *drv, int index)
+ {
+       return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
+ }
+ typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
+                              struct cpuidle_driver *drv, int index);
+ static cpuidle_enter_t cpuidle_enter_ops;
+ /**
+  * cpuidle_play_dead - cpu off-lining
+  *
+  * Only returns in case of an error
+  */
+ int cpuidle_play_dead(void)
+ {
+       struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+       struct cpuidle_driver *drv = cpuidle_get_driver();
+       int i, dead_state = -1;
+       int power_usage = -1;
+       /* Find lowest-power state that supports long-term idle */
+       for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
+               struct cpuidle_state *s = &drv->states[i];
+               if (s->power_usage < power_usage && s->enter_dead) {
+                       power_usage = s->power_usage;
+                       dead_state = i;
+               }
+       }
+       if (dead_state != -1)
+               return drv->states[dead_state].enter_dead(dev, dead_state);
+       return -ENODEV;
+ }
  /**
   * cpuidle_idle_call - the main idle loop
   *
@@@ -63,7 -109,6 +109,6 @@@ int cpuidle_idle_call(void
  {
        struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
        struct cpuidle_driver *drv = cpuidle_get_driver();
-       struct cpuidle_state *target_state;
        int next_state, entered_state;
  
        if (off)
                return 0;
        }
  
-       target_state = &drv->states[next_state];
 -      trace_power_start(POWER_CSTATE, next_state, dev->cpu);
 -      trace_cpu_idle(next_state, dev->cpu);
 +      trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
 +      trace_cpu_idle_rcuidle(next_state, dev->cpu);
  
-       entered_state = target_state->enter(dev, drv, next_state);
+       entered_state = cpuidle_enter_ops(dev, drv, next_state);
  
 -      trace_power_end(dev->cpu);
 -      trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
 +      trace_power_end_rcuidle(dev->cpu);
 +      trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
  
        if (entered_state >= 0) {
                /* Update cpuidle counters */
                dev->states_usage[entered_state].time +=
                                (unsigned long long)dev->last_residency;
                dev->states_usage[entered_state].usage++;
+       } else {
+               dev->last_residency = 0;
        }
  
        /* give the governor an opportunity to reflect on the outcome */
@@@ -164,6 -209,37 +209,37 @@@ void cpuidle_resume_and_unlock(void
  
  EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
  
+ /**
+  * cpuidle_wrap_enter - performs timekeeping and irqen around enter function
+  * @dev: pointer to a valid cpuidle_device object
+  * @drv: pointer to a valid cpuidle_driver object
+  * @index: index of the target cpuidle state.
+  */
+ int cpuidle_wrap_enter(struct cpuidle_device *dev,
+                               struct cpuidle_driver *drv, int index,
+                               int (*enter)(struct cpuidle_device *dev,
+                                       struct cpuidle_driver *drv, int index))
+ {
+       ktime_t time_start, time_end;
+       s64 diff;
+       time_start = ktime_get();
+       index = enter(dev, drv, index);
+       time_end = ktime_get();
+       local_irq_enable();
+       diff = ktime_to_us(ktime_sub(time_end, time_start));
+       if (diff > INT_MAX)
+               diff = INT_MAX;
+       dev->last_residency = (int) diff;
+       return index;
+ }
  #ifdef CONFIG_ARCH_HAS_CPU_RELAX
  static int poll_idle(struct cpuidle_device *dev,
                struct cpuidle_driver *drv, int index)
@@@ -197,6 -273,7 +273,7 @@@ static void poll_idle_init(struct cpuid
        state->power_usage = -1;
        state->flags = 0;
        state->enter = poll_idle;
+       state->disable = 0;
  }
  #else
  static void poll_idle_init(struct cpuidle_driver *drv) {}
  int cpuidle_enable_device(struct cpuidle_device *dev)
  {
        int ret, i;
+       struct cpuidle_driver *drv = cpuidle_get_driver();
  
        if (dev->enabled)
                return 0;
-       if (!cpuidle_get_driver() || !cpuidle_curr_governor)
+       if (!drv || !cpuidle_curr_governor)
                return -EIO;
        if (!dev->state_count)
-               return -EINVAL;
+               dev->state_count = drv->state_count;
  
        if (dev->registered == 0) {
                ret = __cpuidle_register_device(dev);
                        return ret;
        }
  
-       poll_idle_init(cpuidle_get_driver());
+       cpuidle_enter_ops = drv->en_core_tk_irqen ?
+               cpuidle_enter_tk : cpuidle_enter;
+       poll_idle_init(drv);
  
        if ((ret = cpuidle_add_state_sysfs(dev)))
                return ret;
  
        if (cpuidle_curr_governor->enable &&
-           (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
+           (ret = cpuidle_curr_governor->enable(drv, dev)))
                goto fail_sysfs;
  
        for (i = 0; i < dev->state_count; i++) {
diff --combined include/linux/acpi.h
index f53fea61f40a4edc0456f11ae27586ebf1a3eb6f,104eda758e7f27dacb9446a33c7f26dde31c2493..f421dd84f29d375468c3317db9cc4a69e0f9e33d
@@@ -151,7 -151,6 +151,7 @@@ extern int ec_write(u8 addr, u8 val)
  extern int ec_transaction(u8 command,
                            const u8 *wdata, unsigned wdata_len,
                            u8 *rdata, unsigned rdata_len);
 +extern acpi_handle ec_get_handle(void);
  
  #if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
  
@@@ -372,4 -371,14 +372,14 @@@ static inline int acpi_nvs_for_each_reg
  
  #endif        /* !CONFIG_ACPI */
  
+ #ifdef CONFIG_ACPI
+ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
+                              u32 pm1a_ctrl,  u32 pm1b_ctrl));
+ acpi_status acpi_os_prepare_sleep(u8 sleep_state,
+                                 u32 pm1a_control, u32 pm1b_control);
+ #else
+ #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
+ #endif
  #endif        /*_LINUX_ACPI_H*/