Merge tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 Oct 2012 01:32:35 +0000 (18:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 Oct 2012 01:32:35 +0000 (18:32 -0700)
Pull power management updates from Rafael J Wysocki:

 - Improved system suspend/resume and runtime PM handling for the SH
   TMU, CMT and MTU2 clock event devices (also used by ARM/shmobile).

 - Generic PM domains framework extensions related to cpuidle support
   and domain objects lookup using names.

 - ARM/shmobile power management updates including improved support for
   the SH7372's A4S power domain containing the CPU core.

 - cpufreq changes related to AMD CPUs support from Matthew Garrett,
   Andre Przywara and Borislav Petkov.

 - cpu0 cpufreq driver from Shawn Guo.

 - cpufreq governor fixes related to the relaxing of limit from Michal
   Pecio.

 - OMAP cpufreq updates from Axel Lin and Richard Zhao.

 - cpuidle ladder governor fixes related to the disabling of states from
   Carsten Emde and me.

 - Runtime PM core updates related to the interactions with the system
   suspend core from Alan Stern and Kevin Hilman.

 - Wakeup sources modification allowing more helper functions to be
   called from interrupt context from John Stultz and additional
   diagnostic code from Todd Poynor.

 - System suspend error code path fix from Feng Hong.

Fixed up conflicts in cpufreq/powernow-k8 that stemmed from the
workqueue fixes conflicting fairly badly with the removal of support for
hardware P-state chips.  The changes were independent but somewhat
intertwined.

* tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (76 commits)
  Revert "PM QoS: Use spinlock in the per-device PM QoS constraints code"
  PM / Runtime: let rpm_resume() succeed if RPM_ACTIVE, even when disabled, v2
  cpuidle: rename function name "__cpuidle_register_driver", v2
  cpufreq: OMAP: Check IS_ERR() instead of NULL for omap_device_get_by_hwmod_name
  cpuidle: remove some empty lines
  PM: Prevent runtime suspend during system resume
  PM QoS: Use spinlock in the per-device PM QoS constraints code
  PM / Sleep: use resume event when call dpm_resume_early
  cpuidle / ACPI : move cpuidle_device field out of the acpi_processor_power structure
  ACPI / processor: remove pointless variable initialization
  ACPI / processor: remove unused function parameter
  cpufreq: OMAP: remove loops_per_jiffy recalculate for smp
  sections: fix section conflicts in drivers/cpufreq
  cpufreq: conservative: update frequency when limits are relaxed
  cpufreq / ondemand: update frequency when limits are relaxed
  properly __init-annotate pm_sysrq_init()
  cpufreq: Add a generic cpufreq-cpu0 driver
  PM / OPP: Initialize OPP table from device tree
  ARM: add cpufreq transiton notifier to adjust loops_per_jiffy for smp
  cpufreq: Remove support for hardware P-state chips from powernow-k8
  ...

17 files changed:
1  2 
arch/arm/kernel/smp.c
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mach-shmobile/include/mach/common.h
arch/arm/mach-shmobile/include/mach/r8a7779.h
arch/arm/mach-shmobile/pm-rmobile.c
arch/arm/mach-shmobile/pm-sh7372.c
drivers/base/platform.c
drivers/base/power/main.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/powernow-k8.c
drivers/pci/pci-driver.c
include/linux/device.h
include/linux/pm.h
kernel/time/timekeeping.c

diff --combined arch/arm/kernel/smp.c
index aa4ffe6e5ecfbd9a8feeedf1c002ca50861ae541,8e03567c958354b030da70a27a9db1d01e335757..dea7a925c7e249375a86c9c943302a926c865433
  #include <linux/mm.h>
  #include <linux/err.h>
  #include <linux/cpu.h>
 -#include <linux/smp.h>
  #include <linux/seq_file.h>
  #include <linux/irq.h>
  #include <linux/percpu.h>
  #include <linux/clockchips.h>
  #include <linux/completion.h>
+ #include <linux/cpufreq.h>
  
  #include <linux/atomic.h>
 +#include <asm/smp.h>
  #include <asm/cacheflush.h>
  #include <asm/cpu.h>
  #include <asm/cputype.h>
@@@ -42,7 -43,6 +43,7 @@@
  #include <asm/ptrace.h>
  #include <asm/localtimer.h>
  #include <asm/smp_plat.h>
 +#include <asm/mach/arch.h>
  
  /*
   * as from 2.5, kernels no longer have an init_tasks structure
   */
  struct secondary_data secondary_data;
  
 +/*
 + * control for which core is the next to come out of the secondary
 + * boot "holding pen"
 + */
 +volatile int __cpuinitdata pen_release = -1;
 +
  enum ipi_msg_type {
        IPI_TIMER = 2,
        IPI_RESCHEDULE,
  
  static DECLARE_COMPLETION(cpu_running);
  
 +static struct smp_operations smp_ops;
 +
 +void __init smp_set_ops(struct smp_operations *ops)
 +{
 +      if (ops)
 +              smp_ops = *ops;
 +};
 +
  int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
  {
        int ret;
        return ret;
  }
  
 +/* platform specific SMP operations */
 +void __init smp_init_cpus(void)
 +{
 +      if (smp_ops.smp_init_cpus)
 +              smp_ops.smp_init_cpus();
 +}
 +
 +static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
 +{
 +      if (smp_ops.smp_prepare_cpus)
 +              smp_ops.smp_prepare_cpus(max_cpus);
 +}
 +
 +static void __cpuinit platform_secondary_init(unsigned int cpu)
 +{
 +      if (smp_ops.smp_secondary_init)
 +              smp_ops.smp_secondary_init(cpu);
 +}
 +
 +int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
 +{
 +      if (smp_ops.smp_boot_secondary)
 +              return smp_ops.smp_boot_secondary(cpu, idle);
 +      return -ENOSYS;
 +}
 +
  #ifdef CONFIG_HOTPLUG_CPU
  static void percpu_timer_stop(void);
  
 +static int platform_cpu_kill(unsigned int cpu)
 +{
 +      if (smp_ops.cpu_kill)
 +              return smp_ops.cpu_kill(cpu);
 +      return 1;
 +}
 +
 +static void platform_cpu_die(unsigned int cpu)
 +{
 +      if (smp_ops.cpu_die)
 +              smp_ops.cpu_die(cpu);
 +}
 +
 +static int platform_cpu_disable(unsigned int cpu)
 +{
 +      if (smp_ops.cpu_disable)
 +              return smp_ops.cpu_disable(cpu);
 +
 +      /*
 +       * By default, allow disabling all CPUs except the first one,
 +       * since this is special on a lot of platforms, e.g. because
 +       * of clock tick interrupts.
 +       */
 +      return cpu == 0 ? -EPERM : 0;
 +}
  /*
   * __cpu_disable runs on the processor to be shutdown.
   */
 -int __cpu_disable(void)
 +int __cpuinit __cpu_disable(void)
  {
        unsigned int cpu = smp_processor_id();
        int ret;
@@@ -215,7 -150,7 +216,7 @@@ static DECLARE_COMPLETION(cpu_died)
   * called on the thread which is asking for a CPU to be shutdown -
   * waits until shutdown has completed, or it is timed out.
   */
 -void __cpu_die(unsigned int cpu)
 +void __cpuinit __cpu_die(unsigned int cpu)
  {
        if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
                pr_err("CPU%u: cpu didn't die\n", cpu);
@@@ -650,3 -585,56 +651,56 @@@ int setup_profiling_timer(unsigned int 
  {
        return -EINVAL;
  }
+ #ifdef CONFIG_CPU_FREQ
+ static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
+ static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
+ static unsigned long global_l_p_j_ref;
+ static unsigned long global_l_p_j_ref_freq;
+ static int cpufreq_callback(struct notifier_block *nb,
+                                       unsigned long val, void *data)
+ {
+       struct cpufreq_freqs *freq = data;
+       int cpu = freq->cpu;
+       if (freq->flags & CPUFREQ_CONST_LOOPS)
+               return NOTIFY_OK;
+       if (!per_cpu(l_p_j_ref, cpu)) {
+               per_cpu(l_p_j_ref, cpu) =
+                       per_cpu(cpu_data, cpu).loops_per_jiffy;
+               per_cpu(l_p_j_ref_freq, cpu) = freq->old;
+               if (!global_l_p_j_ref) {
+                       global_l_p_j_ref = loops_per_jiffy;
+                       global_l_p_j_ref_freq = freq->old;
+               }
+       }
+       if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
+           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+           (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+               loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
+                                               global_l_p_j_ref_freq,
+                                               freq->new);
+               per_cpu(cpu_data, cpu).loops_per_jiffy =
+                       cpufreq_scale(per_cpu(l_p_j_ref, cpu),
+                                       per_cpu(l_p_j_ref_freq, cpu),
+                                       freq->new);
+       }
+       return NOTIFY_OK;
+ }
+ static struct notifier_block cpufreq_notifier = {
+       .notifier_call  = cpufreq_callback,
+ };
+ static int __init register_cpufreq_notifier(void)
+ {
+       return cpufreq_register_notifier(&cpufreq_notifier,
+                                               CPUFREQ_TRANSITION_NOTIFIER);
+ }
+ core_initcall(register_cpufreq_notifier);
+ #endif
index bc3b5da59e256ffd050ee6bbfb9168fcc5f6cb22,264340a60f65ca9584de004d361525d8cd303027..790dc68c431250a8ae7021bf622102826471ba16
@@@ -66,8 -66,6 +66,8 @@@
  #include <asm/mach/arch.h>
  #include <asm/setup.h>
  
 +#include "sh-gpio.h"
 +
  /*
   * Address    Interface               BusWidth        note
   * ------------------------------------------------------------------
@@@ -434,7 -432,7 +434,7 @@@ static void usb1_host_port_power(int po
                return;
  
        /* set VBOUT/PWEN and EXTLP1 in DVSTCTR */
 -      __raw_writew(__raw_readw(0xE68B0008) | 0x600, 0xE68B0008);
 +      __raw_writew(__raw_readw(IOMEM(0xE68B0008)) | 0x600, IOMEM(0xE68B0008));
  }
  
  static struct r8a66597_platdata usb1_host_data = {
@@@ -1226,11 -1224,20 +1226,20 @@@ static struct i2c_board_info i2c1_devic
  };
  
  
 -#define GPIO_PORT9CR  0xE6051009
 -#define GPIO_PORT10CR 0xE605100A
 -#define USCCR1                0xE6058144
 +#define GPIO_PORT9CR  IOMEM(0xE6051009)
 +#define GPIO_PORT10CR IOMEM(0xE605100A)
 +#define USCCR1                IOMEM(0xE6058144)
  static void __init ap4evb_init(void)
  {
+       struct pm_domain_device domain_devices[] = {
+               { "A4LC", &lcdc1_device, },
+               { "A4LC", &lcdc_device, },
+               { "A4MP", &fsi_device, },
+               { "A3SP", &sh_mmcif_device, },
+               { "A3SP", &sdhi0_device, },
+               { "A3SP", &sdhi1_device, },
+               { "A4R", &ceu_device, },
+       };
        u32 srcr4;
        struct clk *clk;
  
        gpio_request(GPIO_FN_OVCN2_1,    NULL);
  
        /* setup USB phy */
 -      __raw_writew(0x8a0a, 0xE6058130);       /* USBCR4 */
 +      __raw_writew(0x8a0a, IOMEM(0xE6058130));        /* USBCR4 */
  
        /* enable FSI2 port A (ak4643) */
        gpio_request(GPIO_FN_FSIAIBT,   NULL);
        gpio_request(GPIO_FN_HDMI_CEC, NULL);
  
        /* Reset HDMI, must be held at least one EXTALR (32768Hz) period */
 -#define SRCR4 0xe61580bc
 +#define SRCR4 IOMEM(0xe61580bc)
        srcr4 = __raw_readl(SRCR4);
        __raw_writel(srcr4 | (1 << 13), SRCR4);
        udelay(50);
  
        platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices));
  
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device);
+       rmobile_add_devices_to_domains(domain_devices,
+                                      ARRAY_SIZE(domain_devices));
  
        hdmi_init_pm_clock();
        fsi_init_pm_clock();
@@@ -1485,6 -1486,6 +1488,6 @@@ MACHINE_START(AP4EVB, "ap4evb"
        .init_irq       = sh7372_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = ap4evb_init,
-       .init_late      = shmobile_init_late,
+       .init_late      = sh7372_pm_init_late,
        .timer          = &shmobile_timer,
  MACHINE_END
index c6593d3942738a369e12645d71ebaf60f223f3a3,da6117c326b9e0c80147050089f1d8a5f604c28a..2912eab3b967bddedb8a42e39747e9612f04d237
@@@ -37,7 -37,6 +37,7 @@@
  #include <linux/mmc/host.h>
  #include <linux/mmc/sh_mmcif.h>
  #include <linux/mmc/sh_mobile_sdhi.h>
 +#include <linux/i2c-gpio.h>
  #include <mach/common.h>
  #include <mach/irqs.h>
  #include <mach/r8a7740.h>
@@@ -55,8 -54,6 +55,8 @@@
  #include <sound/sh_fsi.h>
  #include <sound/simple_card.h>
  
 +#include "sh-gpio.h"
 +
  /*
   * CON1               Camera Module
   * CON2               Extension Bus
   *    usbhsf_power_ctrl()
   */
  #define IRQ7          evt2irq(0x02e0)
 -#define USBCR1                0xe605810a
 +#define USBCR1                IOMEM(0xe605810a)
  #define USBH          0xC6700000
  #define USBH_USBCTR   0x10834
  
@@@ -880,21 -877,6 +880,21 @@@ static struct platform_device fsi_hdmi_
        },
  };
  
 +/* RTC: RTC connects i2c-gpio. */
 +static struct i2c_gpio_platform_data i2c_gpio_data = {
 +      .sda_pin        = GPIO_PORT208,
 +      .scl_pin        = GPIO_PORT91,
 +      .udelay         = 5, /* 100 kHz */
 +};
 +
 +static struct platform_device i2c_gpio_device = {
 +      .name = "i2c-gpio",
 +      .id = 2,
 +      .dev = {
 +              .platform_data = &i2c_gpio_data,
 +      },
 +};
 +
  /* I2C */
  static struct i2c_board_info i2c0_devices[] = {
        {
        },
  };
  
 +static struct i2c_board_info i2c2_devices[] = {
 +      {
 +              I2C_BOARD_INFO("s35390a", 0x30),
 +              .type = "s35390a",
 +      },
 +};
 +
  /*
   * board devices
   */
@@@ -929,7 -904,6 +929,7 @@@ static struct platform_device *eva_devi
        &fsi_device,
        &fsi_wm8978_device,
        &fsi_hdmi_device,
 +      &i2c_gpio_device,
  };
  
  static void __init eva_clock_init(void)
@@@ -976,8 -950,8 +976,8 @@@ clock_error
  /*
   * board init
   */
 -#define GPIO_PORT7CR  0xe6050007
 -#define GPIO_PORT8CR  0xe6050008
 +#define GPIO_PORT7CR  IOMEM(0xe6050007)
 +#define GPIO_PORT8CR  IOMEM(0xe6050008)
  static void __init eva_init(void)
  {
        struct platform_device *usb = NULL;
  #endif
  
        i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
 +      i2c_register_board_info(2, i2c2_devices, ARRAY_SIZE(i2c2_devices));
  
        r8a7740_add_standard_devices();
  
  
        eva_clock_init();
  
-       rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &lcdc0_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &hdmi_lcdc_device);
+       rmobile_add_device_to_domain("A4LC", &lcdc0_device);
+       rmobile_add_device_to_domain("A4LC", &hdmi_lcdc_device);
        if (usb)
-               rmobile_add_device_to_domain(&r8a7740_pd_a3sp, usb);
+               rmobile_add_device_to_domain("A3SP", usb);
  }
  
  static void __init eva_earlytimer_init(void)
index 62783b5d881389d405653ba9ea2550e2b6252e0a,c76776a3e70da10aaf2027537f5630b020c3f505..0c27c810cf99d0906b655f298091ffab257644fc
@@@ -64,8 -64,6 +64,8 @@@
  #include <asm/mach/arch.h>
  #include <asm/mach-types.h>
  
 +#include "sh-gpio.h"
 +
  /*
   * Address    Interface               BusWidth        note
   * ------------------------------------------------------------------
@@@ -585,8 -583,8 +585,8 @@@ out
  #define USBHS0_POLL_INTERVAL (HZ * 5)
  
  struct usbhs_private {
 -      unsigned int usbphyaddr;
 -      unsigned int usbcrcaddr;
 +      void __iomem *usbphyaddr;
 +      void __iomem *usbcrcaddr;
        struct renesas_usbhs_platform_info info;
        struct delayed_work work;
        struct platform_device *pdev;
@@@ -644,7 -642,7 +644,7 @@@ static void usbhs0_hardware_exit(struc
  }
  
  static struct usbhs_private usbhs0_private = {
 -      .usbcrcaddr     = 0xe605810c,           /* USBCR2 */
 +      .usbcrcaddr     = IOMEM(0xe605810c),            /* USBCR2 */
        .info = {
                .platform_callback = {
                        .hardware_init  = usbhs0_hardware_init,
@@@ -778,8 -776,8 +778,8 @@@ static u32 usbhs1_pipe_cfg[] = 
  };
  
  static struct usbhs_private usbhs1_private = {
 -      .usbphyaddr     = 0xe60581e2,           /* USBPHY1INTAP */
 -      .usbcrcaddr     = 0xe6058130,           /* USBCR4 */
 +      .usbphyaddr     = IOMEM(0xe60581e2),    /* USBPHY1INTAP */
 +      .usbcrcaddr     = IOMEM(0xe6058130),    /* USBCR4 */
        .info = {
                .platform_callback = {
                        .hardware_init  = usbhs1_hardware_init,
@@@ -1404,14 -1402,30 +1404,30 @@@ static struct i2c_board_info i2c1_devic
        },
  };
  
 -#define GPIO_PORT9CR  0xE6051009
 -#define GPIO_PORT10CR 0xE605100A
 -#define GPIO_PORT167CR        0xE60520A7
 -#define GPIO_PORT168CR        0xE60520A8
 -#define SRCR4         0xe61580bc
 -#define USCCR1                0xE6058144
 +#define GPIO_PORT9CR  IOMEM(0xE6051009)
 +#define GPIO_PORT10CR IOMEM(0xE605100A)
 +#define GPIO_PORT167CR        IOMEM(0xE60520A7)
 +#define GPIO_PORT168CR        IOMEM(0xE60520A8)
 +#define SRCR4         IOMEM(0xe61580bc)
 +#define USCCR1                IOMEM(0xE6058144)
  static void __init mackerel_init(void)
  {
+       struct pm_domain_device domain_devices[] = {
+               { "A4LC", &lcdc_device, },
+               { "A4LC", &hdmi_lcdc_device, },
+               { "A4LC", &meram_device, },
+               { "A4MP", &fsi_device, },
+               { "A3SP", &usbhs0_device, },
+               { "A3SP", &usbhs1_device, },
+               { "A3SP", &nand_flash_device, },
+               { "A3SP", &sh_mmcif_device, },
+               { "A3SP", &sdhi0_device, },
+ #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
+               { "A3SP", &sdhi1_device, },
+ #endif
+               { "A3SP", &sdhi2_device, },
+               { "A4R", &ceu_device, },
+       };
        u32 srcr4;
        struct clk *clk;
  
  
        platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
  
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &hdmi_lcdc_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &meram_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &nand_flash_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device);
- #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device);
- #endif
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi2_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device);
+       rmobile_add_devices_to_domains(domain_devices,
+                                      ARRAY_SIZE(domain_devices));
  
        hdmi_init_pm_clock();
        sh7372_pm_init();
@@@ -1653,6 -1655,6 +1657,6 @@@ MACHINE_START(MACKEREL, "mackerel"
        .init_irq       = sh7372_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = mackerel_init,
-       .init_late      = shmobile_init_late,
+       .init_late      = sh7372_pm_init_late,
        .timer          = &shmobile_timer,
  MACHINE_END
index f80f9c549393e14e1c5c35673ab0707d7c641eed,eb89293fff4d36068a7fc3baf305bd7010209799..ed77ab8c91437c5bfc1cae270351b0bd06a253d6
@@@ -4,17 -4,20 +4,19 @@@
  extern void shmobile_earlytimer_init(void);
  extern struct sys_timer shmobile_timer;
  extern void shmobile_setup_delay(unsigned int max_cpu_core_mhz,
 -                               unsigned int mult, unsigned int div);
 +                       unsigned int mult, unsigned int div);
  struct twd_local_timer;
  extern void shmobile_setup_console(void);
  extern void shmobile_secondary_vector(void);
 -extern int shmobile_platform_cpu_kill(unsigned int cpu);
  struct clk;
  extern int shmobile_clk_init(void);
  extern void shmobile_handle_irq_intc(struct pt_regs *);
  extern struct platform_suspend_ops shmobile_suspend_ops;
  struct cpuidle_driver;
- extern void (*shmobile_cpuidle_modes[])(void);
- extern void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
+ struct cpuidle_device;
+ extern int shmobile_enter_wfi(struct cpuidle_device *dev,
+                             struct cpuidle_driver *drv, int index);
+ extern void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv);
  
  extern void sh7367_init_irq(void);
  extern void sh7367_map_io(void);
@@@ -57,6 -60,11 +59,6 @@@ extern struct clk sh73a0_extal2_clk
  extern struct clk sh73a0_extcki_clk;
  extern struct clk sh73a0_extalr_clk;
  
 -extern unsigned int sh73a0_get_core_count(void);
 -extern void sh73a0_secondary_init(unsigned int cpu);
 -extern int sh73a0_boot_secondary(unsigned int cpu);
 -extern void sh73a0_smp_prepare_cpus(void);
 -
  extern void r8a7740_init_irq(void);
  extern void r8a7740_map_io(void);
  extern void r8a7740_add_early_devices(void);
@@@ -73,10 -81,13 +75,8 @@@ extern void r8a7779_pinmux_init(void)
  extern void r8a7779_pm_init(void);
  extern void r8a7740_meram_workaround(void);
  
 -extern unsigned int r8a7779_get_core_count(void);
 -extern int r8a7779_platform_cpu_kill(unsigned int cpu);
 -extern void r8a7779_secondary_init(unsigned int cpu);
 -extern int r8a7779_boot_secondary(unsigned int cpu);
 -extern void r8a7779_smp_prepare_cpus(void);
  extern void r8a7779_register_twd(void);
  
- extern void shmobile_init_late(void);
  #ifdef CONFIG_SUSPEND
  int shmobile_suspend_init(void);
  #else
@@@ -89,15 -100,10 +89,21 @@@ int shmobile_cpuidle_init(void)
  static inline int shmobile_cpuidle_init(void) { return 0; }
  #endif
  
 +extern void shmobile_cpu_die(unsigned int cpu);
 +extern int shmobile_cpu_disable(unsigned int cpu);
 +
 +#ifdef CONFIG_HOTPLUG_CPU
 +extern int shmobile_cpu_is_dead(unsigned int cpu);
 +#else
 +static inline int shmobile_cpu_is_dead(unsigned int cpu) { return 1; }
 +#endif
 +
 +extern void shmobile_smp_init_cpus(unsigned int ncores);
 +
+ static inline void shmobile_init_late(void)
+ {
+       shmobile_suspend_init();
+       shmobile_cpuidle_init();
+ }
  #endif /* __ARCH_MACH_COMMON_H */
index f504c5e81b476a8647c2659a1847e30c9d31a9bf,7ad47977d2e7ba15e1b0674b0b96c292e050dd3d..499f52d2a4a193acbcf659af657aed58d7e27ffb
@@@ -347,19 -347,9 +347,11 @@@ extern int r8a7779_sysc_power_down(stru
  extern int r8a7779_sysc_power_up(struct r8a7779_pm_ch *r8a7779_ch);
  
  #ifdef CONFIG_PM
- extern struct r8a7779_pm_domain r8a7779_sh4a;
- extern struct r8a7779_pm_domain r8a7779_sgx;
- extern struct r8a7779_pm_domain r8a7779_vdp1;
- extern struct r8a7779_pm_domain r8a7779_impx3;
- extern void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd);
- extern void r8a7779_add_device_to_domain(struct r8a7779_pm_domain *r8a7779_pd,
-                                       struct platform_device *pdev);
+ extern void __init r8a7779_init_pm_domains(void);
  #else
- #define r8a7779_init_pm_domain(pd) do { } while (0)
- #define r8a7779_add_device_to_domain(pd, pdev) do { } while (0)
+ static inline void r8a7779_init_pm_domains(void) {}
  #endif /* CONFIG_PM */
  
 +extern struct smp_operations r8a7779_smp_ops;
 +
  #endif /* __ASM_R8A7779_H__ */
index 32e177275e47dcff33f7b5a7850ce3dcc990426d,d37d368434da84399c6159295c397dc10f652158..1fc05d9453d026df07a1fb383a81f97d6f8d460e
@@@ -20,9 -20,9 +20,9 @@@
  #include <mach/pm-rmobile.h>
  
  /* SYSC */
 -#define SPDCR         0xe6180008
 -#define SWUCR         0xe6180014
 -#define PSTR          0xe6180080
 +#define SPDCR         IOMEM(0xe6180008)
 +#define SWUCR         IOMEM(0xe6180014)
 +#define PSTR          IOMEM(0xe6180080)
  
  #define PSTR_RETRIES  100
  #define PSTR_DELAY_US 10
@@@ -134,7 -134,7 +134,7 @@@ static int rmobile_pd_start_dev(struct 
        return ret;
  }
  
- void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
  {
        struct generic_pm_domain *genpd = &rmobile_pd->genpd;
        struct dev_power_governor *gov = rmobile_pd->gov;
        __rmobile_pd_power_up(rmobile_pd, false);
  }
  
- void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd,
-                                struct platform_device *pdev)
+ void rmobile_init_domains(struct rmobile_pm_domain domains[], int num)
+ {
+       int j;
+       for (j = 0; j < num; j++)
+               rmobile_init_pm_domain(&domains[j]);
+ }
+ void rmobile_add_device_to_domain_td(const char *domain_name,
+                                    struct platform_device *pdev,
+                                    struct gpd_timing_data *td)
  {
        struct device *dev = &pdev->dev;
  
-       pm_genpd_add_device(&rmobile_pd->genpd, dev);
+       __pm_genpd_name_add_device(domain_name, dev, td);
        if (pm_clk_no_clocks(dev))
                pm_clk_add(dev, NULL);
  }
  
- void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd,
-                            struct rmobile_pm_domain *rmobile_sd)
+ void rmobile_add_devices_to_domains(struct pm_domain_device data[],
+                                   int size)
  {
-       pm_genpd_add_subdomain(&rmobile_pd->genpd, &rmobile_sd->genpd);
+       struct gpd_timing_data latencies = {
+               .stop_latency_ns = DEFAULT_DEV_LATENCY_NS,
+               .start_latency_ns = DEFAULT_DEV_LATENCY_NS,
+               .save_state_latency_ns = DEFAULT_DEV_LATENCY_NS,
+               .restore_state_latency_ns = DEFAULT_DEV_LATENCY_NS,
+       };
+       int j;
+       for (j = 0; j < size; j++)
+               rmobile_add_device_to_domain_td(data[j].domain_name,
+                                               data[j].pdev, &latencies);
  }
  #endif /* CONFIG_PM */
index 162121842a2b0e97cb2d032ce8d0ba6c0958041a,a7a5e20ae9a0df62e43493efff406bc8878895b9..a0826a48dd0885b921d5cc95d524848683353790
@@@ -21,6 -21,7 +21,7 @@@
  #include <linux/irq.h>
  #include <linux/bitrev.h>
  #include <linux/console.h>
+ #include <asm/cpuidle.h>
  #include <asm/io.h>
  #include <asm/tlbflush.h>
  #include <asm/suspend.h>
  #include <mach/pm-rmobile.h>
  
  /* DBG */
 -#define DBGREG1 0xe6100020
 -#define DBGREG9 0xe6100040
 +#define DBGREG1 IOMEM(0xe6100020)
 +#define DBGREG9 IOMEM(0xe6100040)
  
  /* CPGA */
 -#define SYSTBCR 0xe6150024
 -#define MSTPSR0 0xe6150030
 -#define MSTPSR1 0xe6150038
 -#define MSTPSR2 0xe6150040
 -#define MSTPSR3 0xe6150048
 -#define MSTPSR4 0xe615004c
 -#define PLLC01STPCR 0xe61500c8
 +#define SYSTBCR IOMEM(0xe6150024)
 +#define MSTPSR0 IOMEM(0xe6150030)
 +#define MSTPSR1 IOMEM(0xe6150038)
 +#define MSTPSR2 IOMEM(0xe6150040)
 +#define MSTPSR3 IOMEM(0xe6150048)
 +#define MSTPSR4 IOMEM(0xe615004c)
 +#define PLLC01STPCR IOMEM(0xe61500c8)
  
  /* SYSC */
 -#define SBAR 0xe6180020
 -#define WUPRMSK 0xe6180028
 -#define WUPSMSK 0xe618002c
 -#define WUPSMSK2 0xe6180048
 -#define WUPSFAC 0xe6180098
 -#define IRQCR 0xe618022c
 -#define IRQCR2 0xe6180238
 -#define IRQCR3 0xe6180244
 -#define IRQCR4 0xe6180248
 -#define PDNSEL 0xe6180254
 +#define SBAR IOMEM(0xe6180020)
 +#define WUPRMSK IOMEM(0xe6180028)
 +#define WUPSMSK IOMEM(0xe618002c)
 +#define WUPSMSK2 IOMEM(0xe6180048)
 +#define WUPSFAC IOMEM(0xe6180098)
 +#define IRQCR IOMEM(0xe618022c)
 +#define IRQCR2 IOMEM(0xe6180238)
 +#define IRQCR3 IOMEM(0xe6180244)
 +#define IRQCR4 IOMEM(0xe6180248)
 +#define PDNSEL IOMEM(0xe6180254)
  
  /* INTC */
 -#define ICR1A 0xe6900000
 -#define ICR2A 0xe6900004
 -#define ICR3A 0xe6900008
 -#define ICR4A 0xe690000c
 -#define INTMSK00A 0xe6900040
 -#define INTMSK10A 0xe6900044
 -#define INTMSK20A 0xe6900048
 -#define INTMSK30A 0xe690004c
 +#define ICR1A IOMEM(0xe6900000)
 +#define ICR2A IOMEM(0xe6900004)
 +#define ICR3A IOMEM(0xe6900008)
 +#define ICR4A IOMEM(0xe690000c)
 +#define INTMSK00A IOMEM(0xe6900040)
 +#define INTMSK10A IOMEM(0xe6900044)
 +#define INTMSK20A IOMEM(0xe6900048)
 +#define INTMSK30A IOMEM(0xe690004c)
  
  /* MFIS */
 +/* FIXME: pointing where? */
  #define SMFRAM 0xe6a70000
  
  /* AP-System Core */
 -#define APARMBAREA 0xe6f10020
 +#define APARMBAREA IOMEM(0xe6f10020)
  
  #ifdef CONFIG_PM
  
- struct rmobile_pm_domain sh7372_pd_a4lc = {
-       .genpd.name = "A4LC",
-       .bit_shift = 1,
- };
- struct rmobile_pm_domain sh7372_pd_a4mp = {
-       .genpd.name = "A4MP",
-       .bit_shift = 2,
- };
- struct rmobile_pm_domain sh7372_pd_d4 = {
-       .genpd.name = "D4",
-       .bit_shift = 3,
- };
+ #define PM_DOMAIN_ON_OFF_LATENCY_NS   250000
  
  static int sh7372_a4r_pd_suspend(void)
  {
        return 0;
  }
  
- struct rmobile_pm_domain sh7372_pd_a4r = {
-       .genpd.name = "A4R",
-       .bit_shift = 5,
-       .suspend = sh7372_a4r_pd_suspend,
-       .resume = sh7372_intcs_resume,
- };
+ static bool a4s_suspend_ready;
  
- struct rmobile_pm_domain sh7372_pd_a3rv = {
-       .genpd.name = "A3RV",
-       .bit_shift = 6,
- };
- struct rmobile_pm_domain sh7372_pd_a3ri = {
-       .genpd.name = "A3RI",
-       .bit_shift = 8,
- };
- static int sh7372_pd_a4s_suspend(void)
+ static int sh7372_a4s_pd_suspend(void)
  {
        /*
         * The A4S domain contains the CPU core and therefore it should
-        * only be turned off if the CPU is in use.
+        * only be turned off if the CPU is not in use.  This may happen
+        * during system suspend, when SYSC is going to be used for generating
+        * resume signals and a4s_suspend_ready is set to let
+        * sh7372_enter_suspend() know that it can turn A4S off.
         */
+       a4s_suspend_ready = true;
        return -EBUSY;
  }
  
- struct rmobile_pm_domain sh7372_pd_a4s = {
-       .genpd.name = "A4S",
-       .bit_shift = 10,
-       .gov = &pm_domain_always_on_gov,
-       .no_debug = true,
-       .suspend = sh7372_pd_a4s_suspend,
- };
+ static void sh7372_a4s_pd_resume(void)
+ {
+       a4s_suspend_ready = false;
+ }
  
  static int sh7372_a3sp_pd_suspend(void)
  {
        return console_suspend_enabled ? 0 : -EBUSY;
  }
  
- struct rmobile_pm_domain sh7372_pd_a3sp = {
-       .genpd.name = "A3SP",
-       .bit_shift = 11,
-       .gov = &pm_domain_always_on_gov,
-       .no_debug = true,
-       .suspend = sh7372_a3sp_pd_suspend,
+ static struct rmobile_pm_domain sh7372_pm_domains[] = {
+       {
+               .genpd.name = "A4LC",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 1,
+       },
+       {
+               .genpd.name = "A4MP",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 2,
+       },
+       {
+               .genpd.name = "D4",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 3,
+       },
+       {
+               .genpd.name = "A4R",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 5,
+               .suspend = sh7372_a4r_pd_suspend,
+               .resume = sh7372_intcs_resume,
+       },
+       {
+               .genpd.name = "A3RV",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 6,
+       },
+       {
+               .genpd.name = "A3RI",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 8,
+       },
+       {
+               .genpd.name = "A4S",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 10,
+               .gov = &pm_domain_always_on_gov,
+               .no_debug = true,
+               .suspend = sh7372_a4s_pd_suspend,
+               .resume = sh7372_a4s_pd_resume,
+       },
+       {
+               .genpd.name = "A3SP",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 11,
+               .gov = &pm_domain_always_on_gov,
+               .no_debug = true,
+               .suspend = sh7372_a3sp_pd_suspend,
+       },
+       {
+               .genpd.name = "A3SG",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 13,
+       },
  };
  
- struct rmobile_pm_domain sh7372_pd_a3sg = {
-       .genpd.name = "A3SG",
-       .bit_shift = 13,
- };
+ void __init sh7372_init_pm_domains(void)
+ {
+       rmobile_init_domains(sh7372_pm_domains, ARRAY_SIZE(sh7372_pm_domains));
+       pm_genpd_add_subdomain_names("A4LC", "A3RV");
+       pm_genpd_add_subdomain_names("A4R", "A4LC");
+       pm_genpd_add_subdomain_names("A4S", "A3SG");
+       pm_genpd_add_subdomain_names("A4S", "A3SP");
+ }
  
  #endif /* CONFIG_PM */
  
@@@ -304,6 -339,21 +340,21 @@@ static void sh7372_enter_a3sm_common(in
        sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
        sh7372_enter_sysc(pllc0_on, 1 << 12);
  }
+ static void sh7372_enter_a4s_common(int pllc0_on)
+ {
+       sh7372_intca_suspend();
+       sh7372_set_reset_vector(SMFRAM);
+       sh7372_enter_sysc(pllc0_on, 1 << 10);
+       sh7372_intca_resume();
+ }
+ static void sh7372_pm_setup_smfram(void)
+ {
+       memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
+ }
+ #else
+ static inline void sh7372_pm_setup_smfram(void) {}
  #endif /* CONFIG_SUSPEND || CONFIG_CPU_IDLE */
  
  #ifdef CONFIG_CPU_IDLE
@@@ -313,7 -363,8 +364,8 @@@ static int sh7372_do_idle_core_standby(
        return 0;
  }
  
- static void sh7372_enter_core_standby(void)
+ static int sh7372_enter_core_standby(struct cpuidle_device *dev,
+                                    struct cpuidle_driver *drv, int index)
  {
        sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
  
  
         /* disable reset vector translation */
        __raw_writel(0, SBAR);
+       return 1;
  }
  
- static void sh7372_enter_a3sm_pll_on(void)
+ static int sh7372_enter_a3sm_pll_on(struct cpuidle_device *dev,
+                                   struct cpuidle_driver *drv, int index)
  {
        sh7372_enter_a3sm_common(1);
+       return 2;
  }
  
- static void sh7372_enter_a3sm_pll_off(void)
+ static int sh7372_enter_a3sm_pll_off(struct cpuidle_device *dev,
+                                    struct cpuidle_driver *drv, int index)
  {
        sh7372_enter_a3sm_common(0);
+       return 3;
  }
  
- static void sh7372_cpuidle_setup(struct cpuidle_driver *drv)
+ static int sh7372_enter_a4s(struct cpuidle_device *dev,
+                           struct cpuidle_driver *drv, int index)
  {
-       struct cpuidle_state *state = &drv->states[drv->state_count];
-       snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
-       strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN);
-       state->exit_latency = 10;
-       state->target_residency = 20 + 10;
-       state->flags = CPUIDLE_FLAG_TIME_VALID;
-       shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_core_standby;
-       drv->state_count++;
-       state = &drv->states[drv->state_count];
-       snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
-       strncpy(state->desc, "A3SM PLL ON", CPUIDLE_DESC_LEN);
-       state->exit_latency = 20;
-       state->target_residency = 30 + 20;
-       state->flags = CPUIDLE_FLAG_TIME_VALID;
-       shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_on;
-       drv->state_count++;
-       state = &drv->states[drv->state_count];
-       snprintf(state->name, CPUIDLE_NAME_LEN, "C4");
-       strncpy(state->desc, "A3SM PLL OFF", CPUIDLE_DESC_LEN);
-       state->exit_latency = 120;
-       state->target_residency = 30 + 120;
-       state->flags = CPUIDLE_FLAG_TIME_VALID;
-       shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_off;
-       drv->state_count++;
+       unsigned long msk, msk2;
+       if (!sh7372_sysc_valid(&msk, &msk2))
+               return sh7372_enter_a3sm_pll_off(dev, drv, index);
+       sh7372_setup_sysc(msk, msk2);
+       sh7372_enter_a4s_common(0);
+       return 4;
  }
  
+ static struct cpuidle_driver sh7372_cpuidle_driver = {
+       .name                   = "sh7372_cpuidle",
+       .owner                  = THIS_MODULE,
+       .en_core_tk_irqen       = 1,
+       .state_count            = 5,
+       .safe_state_index       = 0, /* C1 */
+       .states[0] = ARM_CPUIDLE_WFI_STATE,
+       .states[0].enter = shmobile_enter_wfi,
+       .states[1] = {
+               .name = "C2",
+               .desc = "Core Standby Mode",
+               .exit_latency = 10,
+               .target_residency = 20 + 10,
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .enter = sh7372_enter_core_standby,
+       },
+       .states[2] = {
+               .name = "C3",
+               .desc = "A3SM PLL ON",
+               .exit_latency = 20,
+               .target_residency = 30 + 20,
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .enter = sh7372_enter_a3sm_pll_on,
+       },
+       .states[3] = {
+               .name = "C4",
+               .desc = "A3SM PLL OFF",
+               .exit_latency = 120,
+               .target_residency = 30 + 120,
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .enter = sh7372_enter_a3sm_pll_off,
+       },
+       .states[4] = {
+               .name = "C5",
+               .desc = "A4S PLL OFF",
+               .exit_latency = 240,
+               .target_residency = 30 + 240,
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .enter = sh7372_enter_a4s,
+               .disabled = true,
+       },
+ };
  static void sh7372_cpuidle_init(void)
  {
-       shmobile_cpuidle_setup = sh7372_cpuidle_setup;
+       shmobile_cpuidle_set_driver(&sh7372_cpuidle_driver);
  }
  #else
  static void sh7372_cpuidle_init(void) {}
  #endif
  
  #ifdef CONFIG_SUSPEND
- static void sh7372_enter_a4s_common(int pllc0_on)
- {
-       sh7372_intca_suspend();
-       memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
-       sh7372_set_reset_vector(SMFRAM);
-       sh7372_enter_sysc(pllc0_on, 1 << 10);
-       sh7372_intca_resume();
- }
  static int sh7372_enter_suspend(suspend_state_t suspend_state)
  {
        unsigned long msk, msk2;
  
        /* check active clocks to determine potential wakeup sources */
-       if (sh7372_sysc_valid(&msk, &msk2)) {
-               if (!console_suspend_enabled &&
-                   sh7372_pd_a4s.genpd.status == GPD_STATE_POWER_OFF) {
-                       /* convert INTC mask/sense to SYSC mask/sense */
-                       sh7372_setup_sysc(msk, msk2);
-                       /* enter A4S sleep with PLLC0 off */
-                       pr_debug("entering A4S\n");
-                       sh7372_enter_a4s_common(0);
-                       return 0;
-               }
+       if (sh7372_sysc_valid(&msk, &msk2) && a4s_suspend_ready) {
+               /* convert INTC mask/sense to SYSC mask/sense */
+               sh7372_setup_sysc(msk, msk2);
+               /* enter A4S sleep with PLLC0 off */
+               pr_debug("entering A4S\n");
+               sh7372_enter_a4s_common(0);
+               return 0;
        }
  
        /* default to enter A3SM sleep with PLLC0 off */
@@@ -426,7 -496,7 +497,7 @@@ static int sh7372_pm_notifier_fn(struc
                 * executed during system suspend and resume, respectively, so
                 * that those functions don't crash while accessing the INTCS.
                 */
-               pm_genpd_poweron(&sh7372_pd_a4r.genpd);
+               pm_genpd_name_poweron("A4R");
                break;
        case PM_POST_SUSPEND:
                pm_genpd_poweroff_unused();
@@@ -455,6 -525,14 +526,14 @@@ void __init sh7372_pm_init(void
        /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
        __raw_writel(0, PDNSEL);
  
+       sh7372_pm_setup_smfram();
        sh7372_suspend_init();
        sh7372_cpuidle_init();
  }
+ void __init sh7372_pm_init_late(void)
+ {
+       shmobile_init_late();
+       pm_genpd_name_attach_cpuidle("A4S", 4);
+ }
diff --combined drivers/base/platform.c
index ddeca142293ccbe378f5b5c207465f4e1f5e3056,d51514b79efedc4ad446e5ce6dd87a8ee9d2fb6d..8727e9c5eea47dd78170e091635f31d29fc7cb52
  #include <linux/err.h>
  #include <linux/slab.h>
  #include <linux/pm_runtime.h>
 +#include <linux/idr.h>
  
  #include "base.h"
+ #include "power/power.h"
  
 +/* For automatically allocated device IDs */
 +static DEFINE_IDA(platform_devid_ida);
 +
  #define to_platform_driver(drv)       (container_of((drv), struct platform_driver, \
                                 driver))
  
@@@ -103,9 -100,6 +104,9 @@@ struct resource *platform_get_resource_
        for (i = 0; i < dev->num_resources; i++) {
                struct resource *r = &dev->resource[i];
  
 +              if (unlikely(!r->name))
 +                      continue;
 +
                if (type == resource_type(r) && !strcmp(r->name, name))
                        return r;
        }
@@@ -270,7 -264,7 +271,7 @@@ EXPORT_SYMBOL_GPL(platform_device_add_d
   */
  int platform_device_add(struct platform_device *pdev)
  {
 -      int i, ret = 0;
 +      int i, ret;
  
        if (!pdev)
                return -EINVAL;
  
        pdev->dev.bus = &platform_bus_type;
  
 -      if (pdev->id != -1)
 +      switch (pdev->id) {
 +      default:
                dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
 -      else
 +              break;
 +      case PLATFORM_DEVID_NONE:
                dev_set_name(&pdev->dev, "%s", pdev->name);
 +              break;
 +      case PLATFORM_DEVID_AUTO:
 +              /*
 +               * Automatically allocated device ID. We mark it as such so
 +               * that we remember it must be freed, and we append a suffix
 +               * to avoid namespace collision with explicit IDs.
 +               */
 +              ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
 +              if (ret < 0)
 +                      goto err_out;
 +              pdev->id = ret;
 +              pdev->id_auto = true;
 +              dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
 +              break;
 +      }
  
        for (i = 0; i < pdev->num_resources; i++) {
                struct resource *p, *r = &pdev->resource[i];
                return ret;
  
   failed:
 +      if (pdev->id_auto) {
 +              ida_simple_remove(&platform_devid_ida, pdev->id);
 +              pdev->id = PLATFORM_DEVID_AUTO;
 +      }
 +
        while (--i >= 0) {
                struct resource *r = &pdev->resource[i];
                unsigned long type = resource_type(r);
                        release_resource(r);
        }
  
 + err_out:
        return ret;
  }
  EXPORT_SYMBOL_GPL(platform_device_add);
@@@ -366,11 -337,6 +367,11 @@@ void platform_device_del(struct platfor
        if (pdev) {
                device_del(&pdev->dev);
  
 +              if (pdev->id_auto) {
 +                      ida_simple_remove(&platform_devid_ida, pdev->id);
 +                      pdev->id = PLATFORM_DEVID_AUTO;
 +              }
 +
                for (i = 0; i < pdev->num_resources; i++) {
                        struct resource *r = &pdev->resource[i];
                        unsigned long type = resource_type(r);
@@@ -983,6 -949,7 +984,7 @@@ void __init early_platform_add_devices(
                dev = &devs[i]->dev;
  
                if (!dev->devres_head.next) {
+                       pm_runtime_early_init(dev);
                        INIT_LIST_HEAD(&dev->devres_head);
                        list_add_tail(&dev->devres_head,
                                      &early_platform_device_list);
index b0b072a88f5fdaf8447ae6fb9133a613be8fac06,008e6786ae7919e3e4165e6b605cb6a7836ad4c6..a3c1404c79338c3b7e82b7b71835971dc42ec358
@@@ -57,20 -57,17 +57,17 @@@ static pm_message_t pm_transition
  static int async_error;
  
  /**
-  * device_pm_init - Initialize the PM-related part of a device object.
+  * device_pm_sleep_init - Initialize system suspend-related device fields.
   * @dev: Device object being initialized.
   */
- void device_pm_init(struct device *dev)
+ void device_pm_sleep_init(struct device *dev)
  {
        dev->power.is_prepared = false;
        dev->power.is_suspended = false;
        init_completion(&dev->power.completion);
        complete_all(&dev->power.completion);
        dev->power.wakeup = NULL;
-       spin_lock_init(&dev->power.lock);
-       pm_runtime_init(dev);
        INIT_LIST_HEAD(&dev->power.entry);
-       dev->power.power_state = PMSG_INVALID;
  }
  
  /**
@@@ -408,6 -405,9 +405,9 @@@ static int device_resume_noirq(struct d
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
  
+       if (dev->power.syscore)
+               goto Out;
        if (dev->pm_domain) {
                info = "noirq power domain ";
                callback = pm_noirq_op(&dev->pm_domain->ops, state);
  
        error = dpm_run_callback(callback, dev, state, info);
  
+  Out:
        TRACE_RESUME(error);
        return error;
  }
@@@ -486,6 -487,9 +487,9 @@@ static int device_resume_early(struct d
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
  
+       if (dev->power.syscore)
+               goto Out;
        if (dev->pm_domain) {
                info = "early power domain ";
                callback = pm_late_early_op(&dev->pm_domain->ops, state);
  
        error = dpm_run_callback(callback, dev, state, info);
  
+  Out:
        TRACE_RESUME(error);
        return error;
  }
@@@ -565,11 -570,13 +570,13 @@@ static int device_resume(struct device 
        pm_callback_t callback = NULL;
        char *info = NULL;
        int error = 0;
-       bool put = false;
  
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
  
+       if (dev->power.syscore)
+               goto Complete;
        dpm_wait(dev->parent, async);
        device_lock(dev);
  
                goto Unlock;
  
        pm_runtime_enable(dev);
-       put = true;
  
        if (dev->pm_domain) {
                info = "power domain ";
  
   Unlock:
        device_unlock(dev);
+  Complete:
        complete_all(&dev->power.completion);
  
        TRACE_RESUME(error);
  
-       if (put)
-               pm_runtime_put_sync(dev);
        return error;
  }
  
@@@ -722,6 -727,9 +727,9 @@@ static void device_complete(struct devi
        void (*callback)(struct device *) = NULL;
        char *info = NULL;
  
+       if (dev->power.syscore)
+               return;
        device_lock(dev);
  
        if (dev->pm_domain) {
        }
  
        device_unlock(dev);
+       pm_runtime_put_sync(dev);
  }
  
  /**
@@@ -834,6 -844,9 +844,9 @@@ static int device_suspend_noirq(struct 
        pm_callback_t callback = NULL;
        char *info = NULL;
  
+       if (dev->power.syscore)
+               return 0;
        if (dev->pm_domain) {
                info = "noirq power domain ";
                callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@@ -917,6 -930,9 +930,9 @@@ static int device_suspend_late(struct d
        pm_callback_t callback = NULL;
        char *info = NULL;
  
+       if (dev->power.syscore)
+               return 0;
        if (dev->pm_domain) {
                info = "late power domain ";
                callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@@ -996,7 -1012,7 +1012,7 @@@ int dpm_suspend_end(pm_message_t state
  
        error = dpm_suspend_noirq(state);
        if (error) {
-               dpm_resume_early(state);
+               dpm_resume_early(resume_event(state));
                return error;
        }
  
@@@ -1043,16 -1059,23 +1059,23 @@@ static int __device_suspend(struct devi
        if (async_error)
                goto Complete;
  
-       pm_runtime_get_noresume(dev);
+       /*
+        * If a device configured to wake up the system from sleep states
+        * has been suspended at run time and there's a resume request pending
+        * for it, this is equivalent to the device signaling wakeup, so the
+        * system suspend operation should be aborted.
+        */
        if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
                pm_wakeup_event(dev, 0);
  
        if (pm_wakeup_pending()) {
-               pm_runtime_put_sync(dev);
                async_error = -EBUSY;
                goto Complete;
        }
  
+       if (dev->power.syscore)
+               goto Complete;
        device_lock(dev);
  
        if (dev->pm_domain) {
   Complete:
        complete_all(&dev->power.completion);
  
-       if (error) {
-               pm_runtime_put_sync(dev);
+       if (error)
                async_error = error;
-       } else if (dev->power.is_suspended) {
+       else if (dev->power.is_suspended)
                __pm_runtime_disable(dev, false);
-       }
  
        return error;
  }
@@@ -1209,6 -1230,17 +1230,17 @@@ static int device_prepare(struct devic
        char *info = NULL;
        int error = 0;
  
+       if (dev->power.syscore)
+               return 0;
+       /*
+        * If a device's parent goes into runtime suspend at the wrong time,
+        * it won't be possible to resume the device.  To prevent this we
+        * block runtime suspend here, during the prepare phase, and allow
+        * it again during the complete phase.
+        */
+       pm_runtime_get_noresume(dev);
        device_lock(dev);
  
        dev->power.wakeup_path = device_may_wakeup(dev);
@@@ -1324,25 -1356,3 +1356,25 @@@ int device_pm_wait_for_dev(struct devic
        return async_error;
  }
  EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
 +
 +/**
 + * dpm_for_each_dev - device iterator.
 + * @data: data for the callback.
 + * @fn: function to be called for each device.
 + *
 + * Iterate over devices in dpm_list, and call @fn for each device,
 + * passing it @data.
 + */
 +void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
 +{
 +      struct device *dev;
 +
 +      if (!fn)
 +              return;
 +
 +      device_pm_lock();
 +      list_for_each_entry(dev, &dpm_list, power.entry)
 +              fn(dev, data);
 +      device_pm_unlock();
 +}
 +EXPORT_SYMBOL_GPL(dpm_for_each_dev);
index 55f0354864e25d5783effc0589a2f2c8e24da4c4,b75dc2c2f8d3f5e7aa8268579c7f515c1edff029..a152af7e1991eff7db4dcd97c3d0d777b8812f37
@@@ -466,7 -466,7 +466,7 @@@ static inline void dbs_timer_init(struc
        delay -= jiffies % delay;
  
        dbs_info->enable = 1;
 -      INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
 +      INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
        schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
  }
  
@@@ -504,6 -504,7 +504,7 @@@ static int cpufreq_governor_dbs(struct 
                                j_dbs_info->prev_cpu_nice =
                                                kcpustat_cpu(j).cpustat[CPUTIME_NICE];
                }
+               this_dbs_info->cpu = cpu;
                this_dbs_info->down_skip = 0;
                this_dbs_info->requested_freq = policy->cur;
  
                        __cpufreq_driver_target(
                                        this_dbs_info->cur_policy,
                                        policy->min, CPUFREQ_RELATION_L);
+               dbs_check_cpu(this_dbs_info);
                mutex_unlock(&this_dbs_info->timer_mutex);
  
                break;
index 14c1af5a264f29d33d02fef753d854c8dad0377e,9479fb33c30fd6e7eb303b9b09dd3afa543f2df0..396322f2a83ffc22fd6249a3133c588b195cf25b
@@@ -644,7 -644,7 +644,7 @@@ static inline void dbs_timer_init(struc
                delay -= jiffies % delay;
  
        dbs_info->sample_type = DBS_NORMAL_SAMPLE;
 -      INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
 +      INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
        schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
  }
  
@@@ -761,6 -761,7 +761,7 @@@ static int cpufreq_governor_dbs(struct 
                else if (policy->min > this_dbs_info->cur_policy->cur)
                        __cpufreq_driver_target(this_dbs_info->cur_policy,
                                policy->min, CPUFREQ_RELATION_L);
+               dbs_check_cpu(this_dbs_info);
                mutex_unlock(&this_dbs_info->timer_mutex);
                break;
        }
index 1a40935c85fdcc1f21e976ad8dbd5298a87d6060,0b19faf002eeb30d44c28e56b57a8a907814989d..129e80bfff22e1d399fbe5320012f5721de1bcaf
@@@ -35,6 -35,7 +35,6 @@@
  #include <linux/slab.h>
  #include <linux/string.h>
  #include <linux/cpumask.h>
 -#include <linux/sched.h>      /* for current / set_cpus_allowed() */
  #include <linux/io.h>
  #include <linux/delay.h>
  
  #define PFX "powernow-k8: "
  #define VERSION "version 2.20.00"
  #include "powernow-k8.h"
- #include "mperf.h"
  
  /* serialize freq changes  */
  static DEFINE_MUTEX(fidvid_mutex);
  
  static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
  
- static int cpu_family = CPU_OPTERON;
- /* array to map SW pstate number to acpi state */
- static u32 ps_to_as[8];
- /* core performance boost */
- static bool cpb_capable, cpb_enabled;
- static struct msr __percpu *msrs;
  static struct cpufreq_driver cpufreq_amd64_driver;
  
  #ifndef CONFIG_SMP
@@@ -85,12 -76,6 +75,6 @@@ static u32 find_khz_freq_from_fid(u32 f
        return 1000 * find_freq_from_fid(fid);
  }
  
- static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
-                                    u32 pstate)
- {
-       return data[ps_to_as[pstate]].frequency;
- }
  /* Return the vco fid for an input fid
   *
   * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
@@@ -113,9 -98,6 +97,6 @@@ static int pending_bit_stuck(void
  {
        u32 lo, hi;
  
-       if (cpu_family == CPU_HW_PSTATE)
-               return 0;
        rdmsr(MSR_FIDVID_STATUS, lo, hi);
        return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
  }
@@@ -129,20 -111,6 +110,6 @@@ static int query_current_values_with_pe
        u32 lo, hi;
        u32 i = 0;
  
-       if (cpu_family == CPU_HW_PSTATE) {
-               rdmsr(MSR_PSTATE_STATUS, lo, hi);
-               i = lo & HW_PSTATE_MASK;
-               data->currpstate = i;
-               /*
-                * a workaround for family 11h erratum 311 might cause
-                * an "out-of-range Pstate if the core is in Pstate-0
-                */
-               if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps))
-                       data->currpstate = HW_PSTATE_0;
-               return 0;
-       }
        do {
                if (i++ > 10000) {
                        pr_debug("detected change pending stuck\n");
@@@ -299,14 -267,6 +266,6 @@@ static int decrease_vid_code_by_step(st
        return 0;
  }
  
- /* Change hardware pstate by single MSR write */
- static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
- {
-       wrmsr(MSR_PSTATE_CTRL, pstate, 0);
-       data->currpstate = pstate;
-       return 0;
- }
  /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
  static int transition_fid_vid(struct powernow_k8_data *data,
                u32 reqfid, u32 reqvid)
@@@ -523,8 -483,6 +482,6 @@@ static int core_voltage_post_transition
  static const struct x86_cpu_id powernow_k8_ids[] = {
        /* IO based frequency switching */
        { X86_VENDOR_AMD, 0xf },
-       /* MSR based frequency switching supported */
-       X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
        {}
  };
  MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids);
@@@ -560,15 -518,8 +517,8 @@@ static void check_supported_cpu(void *_
                                "Power state transitions not supported\n");
                        return;
                }
-       } else { /* must be a HW Pstate capable processor */
-               cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
-               if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
-                       cpu_family = CPU_HW_PSTATE;
-               else
-                       return;
+               *rc = 0;
        }
-       *rc = 0;
  }
  
  static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
@@@ -632,18 -583,11 +582,11 @@@ static void print_basics(struct powerno
        for (j = 0; j < data->numps; j++) {
                if (data->powernow_table[j].frequency !=
                                CPUFREQ_ENTRY_INVALID) {
-                       if (cpu_family == CPU_HW_PSTATE) {
-                               printk(KERN_INFO PFX
-                                       "   %d : pstate %d (%d MHz)\n", j,
-                                       data->powernow_table[j].index,
-                                       data->powernow_table[j].frequency/1000);
-                       } else {
                                printk(KERN_INFO PFX
                                        "fid 0x%x (%d MHz), vid 0x%x\n",
                                        data->powernow_table[j].index & 0xff,
                                        data->powernow_table[j].frequency/1000,
                                        data->powernow_table[j].index >> 8);
-                       }
                }
        }
        if (data->batps)
                                data->batps);
  }
  
- static u32 freq_from_fid_did(u32 fid, u32 did)
- {
-       u32 mhz = 0;
-       if (boot_cpu_data.x86 == 0x10)
-               mhz = (100 * (fid + 0x10)) >> did;
-       else if (boot_cpu_data.x86 == 0x11)
-               mhz = (100 * (fid + 8)) >> did;
-       else
-               BUG();
-       return mhz * 1000;
- }
  static int fill_powernow_table(struct powernow_k8_data *data,
                struct pst_s *pst, u8 maxvid)
  {
@@@ -824,7 -754,7 +753,7 @@@ static void powernow_k8_acpi_pst_values
  {
        u64 control;
  
-       if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
+       if (!data->acpi_data.state_count)
                return;
  
        control = data->acpi_data.states[index].control;
@@@ -875,10 -805,7 +804,7 @@@ static int powernow_k8_cpu_init_acpi(st
        data->numps = data->acpi_data.state_count;
        powernow_k8_acpi_pst_values(data, 0);
  
-       if (cpu_family == CPU_HW_PSTATE)
-               ret_val = fill_powernow_table_pstate(data, powernow_table);
-       else
-               ret_val = fill_powernow_table_fidvid(data, powernow_table);
+       ret_val = fill_powernow_table_fidvid(data, powernow_table);
        if (ret_val)
                goto err_out_mem;
  
@@@ -915,51 -842,6 +841,6 @@@ err_out
        return ret_val;
  }
  
- static int fill_powernow_table_pstate(struct powernow_k8_data *data,
-               struct cpufreq_frequency_table *powernow_table)
- {
-       int i;
-       u32 hi = 0, lo = 0;
-       rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi);
-       data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
-       for (i = 0; i < data->acpi_data.state_count; i++) {
-               u32 index;
-               index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
-               if (index > data->max_hw_pstate) {
-                       printk(KERN_ERR PFX "invalid pstate %d - "
-                                       "bad value %d.\n", i, index);
-                       printk(KERN_ERR PFX "Please report to BIOS "
-                                       "manufacturer\n");
-                       invalidate_entry(powernow_table, i);
-                       continue;
-               }
-               ps_to_as[index] = i;
-               /* Frequency may be rounded for these */
-               if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
-                                || boot_cpu_data.x86 == 0x11) {
-                       rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
-                       if (!(hi & HW_PSTATE_VALID_MASK)) {
-                               pr_debug("invalid pstate %d, ignoring\n", index);
-                               invalidate_entry(powernow_table, i);
-                               continue;
-                       }
-                       powernow_table[i].frequency =
-                               freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
-               } else
-                       powernow_table[i].frequency =
-                               data->acpi_data.states[i].core_frequency * 1000;
-               powernow_table[i].index = index;
-       }
-       return 0;
- }
  static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
                struct cpufreq_frequency_table *powernow_table)
  {
@@@ -1036,15 -918,7 +917,7 @@@ static int get_transition_latency(struc
                        max_latency = cur_latency;
        }
        if (max_latency == 0) {
-               /*
-                * Fam 11h and later may return 0 as transition latency. This
-                * is intended and means "very fast". While cpufreq core and
-                * governors currently can handle that gracefully, better set it
-                * to 1 to avoid problems in the future.
-                */
-               if (boot_cpu_data.x86 < 0x11)
-                       printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
-                               "latency\n");
+               pr_err(FW_WARN PFX "Invalid zero transition latency\n");
                max_latency = 1;
        }
        /* value in usecs, needs to be in nanoseconds */
@@@ -1104,57 -978,16 +977,23 @@@ static int transition_frequency_fidvid(
        return res;
  }
  
- /* Take a frequency, and issue the hardware pstate transition command */
- static int transition_frequency_pstate(struct powernow_k8_data *data,
-               unsigned int index)
- {
-       u32 pstate = 0;
-       int res, i;
-       struct cpufreq_freqs freqs;
-       pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
-       /* get MSR index for hardware pstate transition */
-       pstate = index & HW_PSTATE_MASK;
-       if (pstate > data->max_hw_pstate)
-               return -EINVAL;
-       freqs.old = find_khz_freq_from_pstate(data->powernow_table,
-                       data->currpstate);
-       freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
-       for_each_cpu(i, data->available_cores) {
-               freqs.cpu = i;
-               cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-       }
-       res = transition_pstate(data, pstate);
-       freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
-       for_each_cpu(i, data->available_cores) {
-               freqs.cpu = i;
-               cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-       }
-       return res;
- }
 -/* Driver entry point to switch to the target frequency */
 -static int powernowk8_target(struct cpufreq_policy *pol,
 -              unsigned targfreq, unsigned relation)
 +struct powernowk8_target_arg {
 +      struct cpufreq_policy           *pol;
 +      unsigned                        targfreq;
 +      unsigned                        relation;
 +};
 +
 +static long powernowk8_target_fn(void *arg)
  {
 -      cpumask_var_t oldmask;
 +      struct powernowk8_target_arg *pta = arg;
 +      struct cpufreq_policy *pol = pta->pol;
 +      unsigned targfreq = pta->targfreq;
 +      unsigned relation = pta->relation;
        struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
        u32 checkfid;
        u32 checkvid;
        unsigned int newstate;
 -      int ret = -EIO;
 +      int ret;
  
        if (!data)
                return -EINVAL;
        checkfid = data->currfid;
        checkvid = data->currvid;
  
 -      /* only run on specific CPU from here on. */
 -      /* This is poor form: use a workqueue or smp_call_function_single */
 -      if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
 -              return -ENOMEM;
 -
 -      cpumask_copy(oldmask, tsk_cpus_allowed(current));
 -      set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
 -
 -      if (smp_processor_id() != pol->cpu) {
 -              printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
 -              goto err_out;
 -      }
 -
        if (pending_bit_stuck()) {
                printk(KERN_ERR PFX "failing targ, change pending bit set\n");
 -              goto err_out;
 +              return -EIO;
        }
  
        pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
                pol->cpu, targfreq, pol->min, pol->max, relation);
  
        if (query_current_values_with_pending_wait(data))
 -              goto err_out;
 +              return -EIO;
  
-       if (cpu_family != CPU_HW_PSTATE) {
-               pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
-               data->currfid, data->currvid);
+       pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
+                data->currfid, data->currvid);
  
-               if ((checkvid != data->currvid) ||
-                   (checkfid != data->currfid)) {
-                       printk(KERN_INFO PFX
-                               "error - out of sync, fix 0x%x 0x%x, "
-                               "vid 0x%x 0x%x\n",
-                               checkfid, data->currfid,
-                               checkvid, data->currvid);
-               }
+       if ((checkvid != data->currvid) ||
+           (checkfid != data->currfid)) {
+               pr_info(PFX
+                      "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
+                      checkfid, data->currfid,
+                      checkvid, data->currvid);
        }
  
        if (cpufreq_frequency_table_target(pol, data->powernow_table,
                                targfreq, relation, &newstate))
 -              goto err_out;
 +              return -EIO;
  
        mutex_lock(&fidvid_mutex);
  
        powernow_k8_acpi_pst_values(data, newstate);
  
-       if (cpu_family == CPU_HW_PSTATE)
-               ret = transition_frequency_pstate(data,
-                       data->powernow_table[newstate].index);
-       else
-               ret = transition_frequency_fidvid(data, newstate);
+       ret = transition_frequency_fidvid(data, newstate);
        if (ret) {
                printk(KERN_ERR PFX "transition frequency failed\n");
 -              ret = 1;
                mutex_unlock(&fidvid_mutex);
 -              goto err_out;
 +              return 1;
        }
        mutex_unlock(&fidvid_mutex);
  
-       if (cpu_family == CPU_HW_PSTATE)
-               pol->cur = find_khz_freq_from_pstate(data->powernow_table,
-                               data->powernow_table[newstate].index);
-       else
-               pol->cur = find_khz_freq_from_fid(data->currfid);
+       pol->cur = find_khz_freq_from_fid(data->currfid);
 -      ret = 0;
  
 -err_out:
 -      set_cpus_allowed_ptr(current, oldmask);
 -      free_cpumask_var(oldmask);
 -      return ret;
 +      return 0;
 +}
 +
 +/* Driver entry point to switch to the target frequency */
 +static int powernowk8_target(struct cpufreq_policy *pol,
 +              unsigned targfreq, unsigned relation)
 +{
 +      struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
 +                                           .relation = relation };
 +
 +      /*
 +       * Must run on @pol->cpu.  cpufreq core is responsible for ensuring
 +       * that we're bound to the current CPU and pol->cpu stays online.
 +       */
 +      if (smp_processor_id() == pol->cpu)
 +              return powernowk8_target_fn(&pta);
 +      else
 +              return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
  }
  
  /* Driver entry point to verify the policy and range of frequencies */
@@@ -1264,22 -1088,23 +1093,23 @@@ static void __cpuinit powernowk8_cpu_in
                return;
        }
  
-       if (cpu_family == CPU_OPTERON)
-               fidvid_msr_init();
+       fidvid_msr_init();
  
        init_on_cpu->rc = 0;
  }
  
+ static const char missing_pss_msg[] =
+       KERN_ERR
+       FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
+       FW_BUG PFX "First, make sure Cool'N'Quiet is enabled in the BIOS.\n"
+       FW_BUG PFX "If that doesn't help, try upgrading your BIOS.\n";
  /* per CPU init entry point to the driver */
  static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
  {
-       static const char ACPI_PSS_BIOS_BUG_MSG[] =
-               KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
-               FW_BUG PFX "Try again with latest BIOS.\n";
        struct powernow_k8_data *data;
        struct init_on_cpu init_on_cpu;
        int rc;
-       struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
  
        if (!cpu_online(pol->cpu))
                return -ENODEV;
        }
  
        data->cpu = pol->cpu;
-       data->currpstate = HW_PSTATE_INVALID;
  
        if (powernow_k8_cpu_init_acpi(data)) {
                /*
                 * an UP version, and is deprecated by AMD.
                 */
                if (num_online_cpus() != 1) {
-                       printk_once(ACPI_PSS_BIOS_BUG_MSG);
+                       printk_once(missing_pss_msg);
                        goto err_out;
                }
                if (pol->cpu != 0) {
        if (rc != 0)
                goto err_out_exit_acpi;
  
-       if (cpu_family == CPU_HW_PSTATE)
-               cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
-       else
-               cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
+       cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
        data->available_cores = pol->cpus;
  
-       if (cpu_family == CPU_HW_PSTATE)
-               pol->cur = find_khz_freq_from_pstate(data->powernow_table,
-                               data->currpstate);
-       else
-               pol->cur = find_khz_freq_from_fid(data->currfid);
+       pol->cur = find_khz_freq_from_fid(data->currfid);
        pr_debug("policy current frequency %d kHz\n", pol->cur);
  
        /* min/max the cpu is capable of */
                return -EINVAL;
        }
  
-       /* Check for APERF/MPERF support in hardware */
-       if (cpu_has(c, X86_FEATURE_APERFMPERF))
-               cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
        cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
  
-       if (cpu_family == CPU_HW_PSTATE)
-               pr_debug("cpu_init done, current pstate 0x%x\n",
-                               data->currpstate);
-       else
-               pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
-                       data->currfid, data->currvid);
+       pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
+                data->currfid, data->currvid);
  
        per_cpu(powernow_data, pol->cpu) = data;
  
@@@ -1418,88 -1227,15 +1232,15 @@@ static unsigned int powernowk8_get(unsi
        if (err)
                goto out;
  
-       if (cpu_family == CPU_HW_PSTATE)
-               khz = find_khz_freq_from_pstate(data->powernow_table,
-                                               data->currpstate);
-       else
-               khz = find_khz_freq_from_fid(data->currfid);
+       khz = find_khz_freq_from_fid(data->currfid);
  
  
  out:
        return khz;
  }
  
- static void _cpb_toggle_msrs(bool t)
- {
-       int cpu;
-       get_online_cpus();
-       rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
-       for_each_cpu(cpu, cpu_online_mask) {
-               struct msr *reg = per_cpu_ptr(msrs, cpu);
-               if (t)
-                       reg->l &= ~BIT(25);
-               else
-                       reg->l |= BIT(25);
-       }
-       wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
-       put_online_cpus();
- }
- /*
-  * Switch on/off core performance boosting.
-  *
-  * 0=disable
-  * 1=enable.
-  */
- static void cpb_toggle(bool t)
- {
-       if (!cpb_capable)
-               return;
-       if (t && !cpb_enabled) {
-               cpb_enabled = true;
-               _cpb_toggle_msrs(t);
-               printk(KERN_INFO PFX "Core Boosting enabled.\n");
-       } else if (!t && cpb_enabled) {
-               cpb_enabled = false;
-               _cpb_toggle_msrs(t);
-               printk(KERN_INFO PFX "Core Boosting disabled.\n");
-       }
- }
- static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
-                                size_t count)
- {
-       int ret = -EINVAL;
-       unsigned long val = 0;
-       ret = strict_strtoul(buf, 10, &val);
-       if (!ret && (val == 0 || val == 1) && cpb_capable)
-               cpb_toggle(val);
-       else
-               return -EINVAL;
-       return count;
- }
- static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
- {
-       return sprintf(buf, "%u\n", cpb_enabled);
- }
- #define define_one_rw(_name) \
- static struct freq_attr _name = \
- __ATTR(_name, 0644, show_##_name, store_##_name)
- define_one_rw(cpb);
  static struct freq_attr *powernow_k8_attr[] = {
        &cpufreq_freq_attr_scaling_available_freqs,
-       &cpb,
        NULL,
  };
  
@@@ -1515,53 -1251,18 +1256,18 @@@ static struct cpufreq_driver cpufreq_am
        .attr           = powernow_k8_attr,
  };
  
- /*
-  * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
-  * cannot block the remaining ones from boosting. On the CPU_UP path we
-  * simply keep the boost-disable flag in sync with the current global
-  * state.
-  */
- static int cpb_notify(struct notifier_block *nb, unsigned long action,
-                     void *hcpu)
- {
-       unsigned cpu = (long)hcpu;
-       u32 lo, hi;
-       switch (action) {
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-               if (!cpb_enabled) {
-                       rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
-                       lo |= BIT(25);
-                       wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
-               }
-               break;
-       case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
-               rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
-               lo &= ~BIT(25);
-               wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
-               break;
-       default:
-               break;
-       }
-       return NOTIFY_OK;
- }
- static struct notifier_block cpb_nb = {
-       .notifier_call          = cpb_notify,
- };
  /* driver entry point for init */
  static int __cpuinit powernowk8_init(void)
  {
-       unsigned int i, supported_cpus = 0, cpu;
+       unsigned int i, supported_cpus = 0;
        int rv;
  
+       if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
+               pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n");
+               request_module("acpi-cpufreq");
+               return -ENODEV;
+       }
        if (!x86_match_cpu(powernow_k8_ids))
                return -ENODEV;
  
        if (supported_cpus != num_online_cpus())
                return -ENODEV;
  
-       printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
-               num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
-       if (boot_cpu_has(X86_FEATURE_CPB)) {
-               cpb_capable = true;
-               msrs = msrs_alloc();
-               if (!msrs) {
-                       printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
-                       return -ENOMEM;
-               }
-               register_cpu_notifier(&cpb_nb);
-               rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+       rv = cpufreq_register_driver(&cpufreq_amd64_driver);
  
-               for_each_cpu(cpu, cpu_online_mask) {
-                       struct msr *reg = per_cpu_ptr(msrs, cpu);
-                       cpb_enabled |= !(!!(reg->l & BIT(25)));
-               }
+       if (!rv)
+               pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
+                       num_online_nodes(), boot_cpu_data.x86_model_id,
+                       supported_cpus);
  
-               printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
-                       (cpb_enabled ? "on" : "off"));
-       }
-       rv = cpufreq_register_driver(&cpufreq_amd64_driver);
-       if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
-               unregister_cpu_notifier(&cpb_nb);
-               msrs_free(msrs);
-               msrs = NULL;
-       }
        return rv;
  }
  
@@@ -1615,13 -1291,6 +1296,6 @@@ static void __exit powernowk8_exit(void
  {
        pr_debug("exit\n");
  
-       if (boot_cpu_has(X86_FEATURE_CPB)) {
-               msrs_free(msrs);
-               msrs = NULL;
-               unregister_cpu_notifier(&cpb_nb);
-       }
        cpufreq_unregister_driver(&cpufreq_amd64_driver);
  }
  
diff --combined drivers/pci/pci-driver.c
index 9e1d2959e22682004c7f3f975c7991ad2b561060,0862b727d7c3689d2e796ac6cf4beb3aeb5add45..94c6e2aa03d658defb5d30b0f2fbb1f8383cf9d0
@@@ -139,6 -139,7 +139,6 @@@ store_new_id(struct device_driver *driv
                return retval;
        return count;
  }
 -static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
  
  /**
   * store_remove_id - remove a PCI device ID from this driver
@@@ -184,16 -185,38 +184,16 @@@ store_remove_id(struct device_driver *d
                return retval;
        return count;
  }
 -static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
  
 -static int
 -pci_create_newid_files(struct pci_driver *drv)
 -{
 -      int error = 0;
 +static struct driver_attribute pci_drv_attrs[] = {
 +      __ATTR(new_id, S_IWUSR, NULL, store_new_id),
 +      __ATTR(remove_id, S_IWUSR, NULL, store_remove_id),
 +      __ATTR_NULL,
 +};
  
 -      if (drv->probe != NULL) {
 -              error = driver_create_file(&drv->driver, &driver_attr_new_id);
 -              if (error == 0) {
 -                      error = driver_create_file(&drv->driver,
 -                                      &driver_attr_remove_id);
 -                      if (error)
 -                              driver_remove_file(&drv->driver,
 -                                              &driver_attr_new_id);
 -              }
 -      }
 -      return error;
 -}
 -
 -static void pci_remove_newid_files(struct pci_driver *drv)
 -{
 -      driver_remove_file(&drv->driver, &driver_attr_remove_id);
 -      driver_remove_file(&drv->driver, &driver_attr_new_id);
 -}
 -#else /* !CONFIG_HOTPLUG */
 -static inline int pci_create_newid_files(struct pci_driver *drv)
 -{
 -      return 0;
 -}
 -static inline void pci_remove_newid_files(struct pci_driver *drv) {}
 -#endif
 +#else
 +#define pci_drv_attrs NULL
 +#endif /* CONFIG_HOTPLUG */
  
  /**
   * pci_match_id - See if a pci device matches a given pci_id table
@@@ -606,21 -629,6 +606,6 @@@ static int pci_pm_prepare(struct devic
        struct device_driver *drv = dev->driver;
        int error = 0;
  
-       /*
-        * If a PCI device configured to wake up the system from sleep states
-        * has been suspended at run time and there's a resume request pending
-        * for it, this is equivalent to the device signaling wakeup, so the
-        * system suspend operation should be aborted.
-        */
-       pm_runtime_get_noresume(dev);
-       if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
-               pm_wakeup_event(dev, 0);
-       if (pm_wakeup_pending()) {
-               pm_runtime_put_sync(dev);
-               return -EBUSY;
-       }
        /*
         * PCI devices suspended at run time need to be resumed at this
         * point, because in general it is necessary to reconfigure them for
@@@ -644,8 -652,6 +629,6 @@@ static void pci_pm_complete(struct devi
  
        if (drv && drv->pm && drv->pm->complete)
                drv->pm->complete(dev);
-       pm_runtime_put_sync(dev);
  }
  
  #else /* !CONFIG_PM_SLEEP */
@@@ -1139,6 -1145,8 +1122,6 @@@ const struct dev_pm_ops pci_dev_pm_ops 
  int __pci_register_driver(struct pci_driver *drv, struct module *owner,
                          const char *mod_name)
  {
 -      int error;
 -
        /* initialize common driver fields */
        drv->driver.name = drv->name;
        drv->driver.bus = &pci_bus_type;
        INIT_LIST_HEAD(&drv->dynids.list);
  
        /* register with core */
 -      error = driver_register(&drv->driver);
 -      if (error)
 -              goto out;
 -
 -      error = pci_create_newid_files(drv);
 -      if (error)
 -              goto out_newid;
 -out:
 -      return error;
 -
 -out_newid:
 -      driver_unregister(&drv->driver);
 -      goto out;
 +      return driver_register(&drv->driver);
  }
  
  /**
  void
  pci_unregister_driver(struct pci_driver *drv)
  {
 -      pci_remove_newid_files(drv);
        driver_unregister(&drv->driver);
        pci_free_dynids(drv);
  }
@@@ -1264,7 -1285,6 +1247,7 @@@ struct bus_type pci_bus_type = 
        .shutdown       = pci_device_shutdown,
        .dev_attrs      = pci_dev_attrs,
        .bus_attrs      = pci_bus_attrs,
 +      .drv_attrs      = pci_drv_attrs,
        .pm             = PCI_PM_OPS_PTR,
  };
  
diff --combined include/linux/device.h
index af92883bb4a6cf3ed01cc4389a54295cc6cc190d,86529e642d6c047ac2399ba0c21c69e229adf755..86ef6ab553b19305b5125881a05c23885e614c78
@@@ -536,10 -536,6 +536,10 @@@ extern void *__devres_alloc(dr_release_
  #else
  extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
  #endif
 +extern void devres_for_each_res(struct device *dev, dr_release_t release,
 +                              dr_match_t match, void *match_data,
 +                              void (*fn)(struct device *, void *, void *),
 +                              void *data);
  extern void devres_free(void *res);
  extern void devres_add(struct device *dev, void *res);
  extern void *devres_find(struct device *dev, dr_release_t release,
@@@ -776,6 -772,13 +776,13 @@@ static inline void pm_suspend_ignore_ch
        dev->power.ignore_children = enable;
  }
  
+ static inline void dev_pm_syscore_device(struct device *dev, bool val)
+ {
+ #ifdef CONFIG_PM_SLEEP
+       dev->power.syscore = val;
+ #endif
+ }
  static inline void device_lock(struct device *dev)
  {
        mutex_lock(&dev->mutex);
@@@ -895,15 -898,12 +902,15 @@@ extern const char *dev_driver_string(co
  
  #ifdef CONFIG_PRINTK
  
 -extern int __dev_printk(const char *level, const struct device *dev,
 -                      struct va_format *vaf);
 +extern __printf(3, 0)
 +int dev_vprintk_emit(int level, const struct device *dev,
 +                   const char *fmt, va_list args);
 +extern __printf(3, 4)
 +int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
 +
  extern __printf(3, 4)
  int dev_printk(const char *level, const struct device *dev,
 -             const char *fmt, ...)
 -      ;
 +             const char *fmt, ...);
  extern __printf(2, 3)
  int dev_emerg(const struct device *dev, const char *fmt, ...);
  extern __printf(2, 3)
@@@ -921,14 -921,6 +928,14 @@@ int _dev_info(const struct device *dev
  
  #else
  
 +static inline __printf(3, 0)
 +int dev_vprintk_emit(int level, const struct device *dev,
 +                   const char *fmt, va_list args)
 +{ return 0; }
 +static inline __printf(3, 4)
 +int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
 +{ return 0; }
 +
  static inline int __dev_printk(const char *level, const struct device *dev,
                               struct va_format *vaf)
  { return 0; }
@@@ -961,32 -953,6 +968,32 @@@ int _dev_info(const struct device *dev
  
  #endif
  
 +/*
 + * Stupid hackaround for existing uses of non-printk uses dev_info
 + *
 + * Note that the definition of dev_info below is actually _dev_info
 + * and a macro is used to avoid redefining dev_info
 + */
 +
 +#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
 +
 +#if defined(CONFIG_DYNAMIC_DEBUG)
 +#define dev_dbg(dev, format, ...)                  \
 +do {                                               \
 +      dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
 +} while (0)
 +#elif defined(DEBUG)
 +#define dev_dbg(dev, format, arg...)          \
 +      dev_printk(KERN_DEBUG, dev, format, ##arg)
 +#else
 +#define dev_dbg(dev, format, arg...)                          \
 +({                                                            \
 +      if (0)                                                  \
 +              dev_printk(KERN_DEBUG, dev, format, ##arg);     \
 +      0;                                                      \
 +})
 +#endif
 +
  #define dev_level_ratelimited(dev_level, dev, fmt, ...)                       \
  do {                                                                  \
        static DEFINE_RATELIMIT_STATE(_rs,                              \
        dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
  #define dev_info_ratelimited(dev, fmt, ...)                           \
        dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
 +#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
  #define dev_dbg_ratelimited(dev, fmt, ...)                            \
 -      dev_level_ratelimited(dev_dbg, dev, fmt, ##__VA_ARGS__)
 -
 -/*
 - * Stupid hackaround for existing uses of non-printk uses dev_info
 - *
 - * Note that the definition of dev_info below is actually _dev_info
 - * and a macro is used to avoid redefining dev_info
 - */
 -
 -#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
 -
 -#if defined(CONFIG_DYNAMIC_DEBUG)
 -#define dev_dbg(dev, format, ...)                  \
 -do {                                               \
 -      dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
 +do {                                                                  \
 +      static DEFINE_RATELIMIT_STATE(_rs,                              \
 +                                    DEFAULT_RATELIMIT_INTERVAL,       \
 +                                    DEFAULT_RATELIMIT_BURST);         \
 +      DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
 +      if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&        \
 +          __ratelimit(&_rs))                                          \
 +              __dynamic_pr_debug(&descriptor, pr_fmt(fmt),            \
 +                                 ##__VA_ARGS__);                      \
  } while (0)
 -#elif defined(DEBUG)
 -#define dev_dbg(dev, format, arg...)          \
 -      dev_printk(KERN_DEBUG, dev, format, ##arg)
  #else
 -#define dev_dbg(dev, format, arg...)                          \
 -({                                                            \
 -      if (0)                                                  \
 -              dev_printk(KERN_DEBUG, dev, format, ##arg);     \
 -      0;                                                      \
 -})
 +#define dev_dbg_ratelimited(dev, fmt, ...)                    \
 +      no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
  #endif
  
  #ifdef VERBOSE_DEBUG
diff --combined include/linux/pm.h
index 88f034a23f2c6a04d56c9cc07743d1e7148824b5,44d1f2307dbc3a9d21ed0bb6a0e1948e91415b1a..007e687c4f6915866b9d551bbc37cd14747f28ac
@@@ -510,12 -510,14 +510,14 @@@ struct dev_pm_info 
        bool                    is_prepared:1;  /* Owned by the PM core */
        bool                    is_suspended:1; /* Ditto */
        bool                    ignore_children:1;
+       bool                    early_init:1;   /* Owned by the PM core */
        spinlock_t              lock;
  #ifdef CONFIG_PM_SLEEP
        struct list_head        entry;
        struct completion       completion;
        struct wakeup_source    *wakeup;
        bool                    wakeup_path:1;
+       bool                    syscore:1;
  #else
        unsigned int            should_wakeup:1;
  #endif
@@@ -638,7 -640,6 +640,7 @@@ extern void __suspend_report_result(con
        } while (0)
  
  extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
 +extern void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *));
  
  extern int pm_generic_prepare(struct device *dev);
  extern int pm_generic_suspend_late(struct device *dev);
@@@ -678,10 -679,6 +680,10 @@@ static inline int device_pm_wait_for_de
        return 0;
  }
  
 +static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
 +{
 +}
 +
  #define pm_generic_prepare    NULL
  #define pm_generic_suspend    NULL
  #define pm_generic_resume     NULL
index d3b91e75cecd0df7895033aff87f588c6ba2d4df,312a675cb240e4d607bfb24081bcc17ed0308230..5ce06a3fa91e01af6a3cfc7fbbc828af2824fae8
@@@ -303,11 -303,10 +303,11 @@@ void getnstimeofday(struct timespec *ts
                seq = read_seqbegin(&tk->lock);
  
                ts->tv_sec = tk->xtime_sec;
 -              ts->tv_nsec = timekeeping_get_ns(tk);
 +              nsecs = timekeeping_get_ns(tk);
  
        } while (read_seqretry(&tk->lock, seq));
  
 +      ts->tv_nsec = 0;
        timespec_add_ns(ts, nsecs);
  }
  EXPORT_SYMBOL(getnstimeofday);
@@@ -346,7 -345,6 +346,7 @@@ void ktime_get_ts(struct timespec *ts
  {
        struct timekeeper *tk = &timekeeper;
        struct timespec tomono;
 +      s64 nsec;
        unsigned int seq;
  
        WARN_ON(timekeeping_suspended);
        do {
                seq = read_seqbegin(&tk->lock);
                ts->tv_sec = tk->xtime_sec;
 -              ts->tv_nsec = timekeeping_get_ns(tk);
 +              nsec = timekeeping_get_ns(tk);
                tomono = tk->wall_to_monotonic;
  
        } while (read_seqretry(&tk->lock, seq));
  
 -      set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
 -                              ts->tv_nsec + tomono.tv_nsec);
 +      ts->tv_sec += tomono.tv_sec;
 +      ts->tv_nsec = 0;
 +      timespec_add_ns(ts, nsec + tomono.tv_nsec);
  }
  EXPORT_SYMBOL_GPL(ktime_get_ts);
  
@@@ -776,6 -773,7 +776,7 @@@ static void timekeeping_resume(void
  
        read_persistent_clock(&ts);
  
+       clockevents_resume();
        clocksource_resume();
  
        write_seqlock_irqsave(&tk->lock, flags);
@@@ -835,6 -833,7 +836,7 @@@ static int timekeeping_suspend(void
  
        clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
        clocksource_suspend();
+       clockevents_suspend();
  
        return 0;
  }
@@@ -1247,7 -1246,6 +1249,7 @@@ void get_monotonic_boottime(struct time
  {
        struct timekeeper *tk = &timekeeper;
        struct timespec tomono, sleep;
 +      s64 nsec;
        unsigned int seq;
  
        WARN_ON(timekeeping_suspended);
        do {
                seq = read_seqbegin(&tk->lock);
                ts->tv_sec = tk->xtime_sec;
 -              ts->tv_nsec = timekeeping_get_ns(tk);
 +              nsec = timekeeping_get_ns(tk);
                tomono = tk->wall_to_monotonic;
                sleep = tk->total_sleep_time;
  
        } while (read_seqretry(&tk->lock, seq));
  
 -      set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
 -                      ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec);
 +      ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
 +      ts->tv_nsec = 0;
 +      timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
  }
  EXPORT_SYMBOL_GPL(get_monotonic_boottime);