#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/syscore_ops.h>
-#include <linux/suspend.h>
-#include <linux/tick.h>
#include <trace/events/power.h>
#endif
static DEFINE_RWLOCK(cpufreq_driver_lock);
-/* Flag to suspend/resume CPUFreq governors */
-static bool cpufreq_suspended;
-
-static inline bool has_target(void)
-{
- return cpufreq_driver->target;
-}
-
/*
* cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
* all cpufreq/hotplug/workqueue/etc related lock issues.
/**
- * cpufreq_suspend() - Suspend CPUFreq governors
+ * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
*
- * Called during system wide Suspend/Hibernate cycles for suspending governors
- * as some platforms can't change frequency after this point in suspend cycle.
- * Because some of the devices (like: i2c, regulators, etc) they use for
- * changing frequency are suspended quickly after this point.
+ * This function is only executed for the boot processor. The other CPUs
+ * have been put offline by means of CPU hotplug.
*/
-void cpufreq_suspend(void)
+static int cpufreq_bp_suspend(void)
{
- struct cpufreq_policy *policy;
- int cpu;
-
- if (!cpufreq_driver)
- return;
-
- if (!has_target())
- return;
+ int ret = 0;
- pr_debug("%s: Suspending Governors\n", __func__);
+ int cpu = smp_processor_id();
+ struct cpufreq_policy *cpu_policy;
- for_each_possible_cpu(cpu) {
- if (!cpu_online(cpu))
- continue;
+ pr_debug("suspending cpu %u\n", cpu);
- policy = cpufreq_cpu_get(cpu);
+ /* If there's no policy for the boot CPU, we have nothing to do. */
+ cpu_policy = cpufreq_cpu_get(cpu);
+ if (!cpu_policy)
+ return 0;
- if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
- pr_err("%s: Failed to stop governor for policy: %p\n",
- __func__, policy);
- else if (cpufreq_driver->suspend
- && cpufreq_driver->suspend(policy))
- pr_err("%s: Failed to suspend driver: %p\n", __func__,
- policy);
+ if (cpufreq_driver->suspend) {
+ ret = cpufreq_driver->suspend(cpu_policy);
+ if (ret)
+ printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
+ "step on CPU %u\n", cpu_policy->cpu);
}
- cpufreq_suspended = true;
+ cpufreq_cpu_put(cpu_policy);
+ return ret;
}
/**
- * cpufreq_resume() - Resume CPUFreq governors
+ * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
*
- * Called during system wide Suspend/Hibernate cycle for resuming governors that
- * are suspended with cpufreq_suspend().
+ * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
+ * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
+ * restored. It will verify that the current freq is in sync with
+ * what we believe it to be. This is a bit later than when it
+ * should be, but nonethteless it's better than calling
+ * cpufreq_driver->get() here which might re-enable interrupts...
+ *
+ * This function is only executed for the boot CPU. The other CPUs have not
+ * been turned on yet.
*/
-void cpufreq_resume(void)
+static void cpufreq_bp_resume(void)
{
- struct cpufreq_policy *policy;
- int cpu;
+ int ret = 0;
- if (!cpufreq_driver)
- return;
+ int cpu = smp_processor_id();
+ struct cpufreq_policy *cpu_policy;
+
+ pr_debug("resuming cpu %u\n", cpu);
- if (!has_target())
+ /* If there's no policy for the boot CPU, we have nothing to do. */
+ cpu_policy = cpufreq_cpu_get(cpu);
+ if (!cpu_policy)
return;
- pr_debug("%s: Resuming Governors\n", __func__);
+ if (cpufreq_driver->resume) {
+ ret = cpufreq_driver->resume(cpu_policy);
+ if (ret) {
+ printk(KERN_ERR "cpufreq: resume failed in ->resume "
+ "step on CPU %u\n", cpu_policy->cpu);
+ goto fail;
+ }
+ }
- cpufreq_suspended = false;
+ schedule_work(&cpu_policy->update);
- for_each_possible_cpu(cpu) {
- if (!cpu_online(cpu))
- continue;
-
- policy = cpufreq_cpu_get(cpu);
-
- if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
- || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
- pr_err("%s: Failed to start governor for policy: %p\n",
- __func__, policy);
- else if (cpufreq_driver->resume
- && cpufreq_driver->resume(policy))
- pr_err("%s: Failed to resume driver: %p\n", __func__,
- policy);
-
- /*
- * schedule call cpufreq_update_policy() for boot CPU, i.e. last
- * policy in list. It will verify that the current freq is in
- * sync with what we believe it to be.
- */
- if (cpu == 0)
- schedule_work(&policy->update);
- }
+fail:
+ cpufreq_cpu_put(cpu_policy);
}
+static struct syscore_ops cpufreq_syscore_ops = {
+ .suspend = cpufreq_bp_suspend,
+ .resume = cpufreq_bp_resume,
+};
+
/**
* cpufreq_get_current_driver - return current driver's name
*
struct cpufreq_governor *gov = NULL;
#endif
- /* Don't start any governor operations if we are entering suspend */
- if (cpufreq_suspended)
- return 0;
-
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >
policy->governor->max_transition_latency) {
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
BUG_ON(!cpufreq_global_kobject);
+ register_syscore_ops(&cpufreq_syscore_ops);
return 0;
}