X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=drivers%2Fcpufreq%2Fcpufreq.c;h=aea1999b56ae4391fd5240f684ed5fcdc677b0d2;hb=8acfe32cb8716b76814e9a7c964354b91e5ce886;hp=6b53d05f74477c004ae41ffe500d854fda1f9e07;hpb=c23fe50e76a70704141d0c2d86afddaeb16993bd;p=firefly-linux-kernel-4.4.55.git diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6b53d05f7447..aea1999b56ae 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -34,6 +34,8 @@ #include #include #include +#include +#include #include @@ -49,6 +51,15 @@ static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); #endif static DEFINE_RWLOCK(cpufreq_driver_lock); +static DEFINE_MUTEX(cpufreq_governor_lock); + +/* Flag to suspend/resume CPUFreq governors */ +static bool cpufreq_suspended; + +static inline bool has_target(void) +{ + return cpufreq_driver->target; +} /* * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure @@ -1322,82 +1333,89 @@ static struct subsys_interface cpufreq_interface = { /** - * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. + * cpufreq_suspend() - Suspend CPUFreq governors * - * This function is only executed for the boot processor. The other CPUs - * have been put offline by means of CPU hotplug. + * Called during system wide Suspend/Hibernate cycles for suspending governors + * as some platforms can't change frequency after this point in suspend cycle. + * Because some of the devices (like: i2c, regulators, etc) they use for + * changing frequency are suspended quickly after this point. */ -static int cpufreq_bp_suspend(void) +void cpufreq_suspend(void) { - int ret = 0; + struct cpufreq_policy *policy; + int cpu; - int cpu = smp_processor_id(); - struct cpufreq_policy *cpu_policy; + if (!cpufreq_driver) + return; - pr_debug("suspending cpu %u\n", cpu); + if (!has_target()) + return; - /* If there's no policy for the boot CPU, we have nothing to do. */ - cpu_policy = cpufreq_cpu_get(cpu); - if (!cpu_policy) - return 0; + pr_debug("%s: Suspending Governors\n", __func__); - if (cpufreq_driver->suspend) { - ret = cpufreq_driver->suspend(cpu_policy); - if (ret) - printk(KERN_ERR "cpufreq: suspend failed in ->suspend " - "step on CPU %u\n", cpu_policy->cpu); + for_each_possible_cpu(cpu) { + if (!cpu_online(cpu)) + continue; + + policy = cpufreq_cpu_get(cpu); + + if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) + pr_err("%s: Failed to stop governor for policy: %p\n", + __func__, policy); + else if (cpufreq_driver->suspend + && cpufreq_driver->suspend(policy)) + pr_err("%s: Failed to suspend driver: %p\n", __func__, + policy); } - cpufreq_cpu_put(cpu_policy); - return ret; + cpufreq_suspended = true; } /** - * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU. - * - * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) - * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are - * restored. It will verify that the current freq is in sync with - * what we believe it to be. This is a bit later than when it - * should be, but nonethteless it's better than calling - * cpufreq_driver->get() here which might re-enable interrupts... + * cpufreq_resume() - Resume CPUFreq governors * - * This function is only executed for the boot CPU. The other CPUs have not - * been turned on yet. + * Called during system wide Suspend/Hibernate cycle for resuming governors that + * are suspended with cpufreq_suspend(). */ -static void cpufreq_bp_resume(void) +void cpufreq_resume(void) { - int ret = 0; - - int cpu = smp_processor_id(); - struct cpufreq_policy *cpu_policy; + struct cpufreq_policy *policy; + int cpu; - pr_debug("resuming cpu %u\n", cpu); + if (!cpufreq_driver) + return; - /* If there's no policy for the boot CPU, we have nothing to do. */ - cpu_policy = cpufreq_cpu_get(cpu); - if (!cpu_policy) + if (!has_target()) return; - if (cpufreq_driver->resume) { - ret = cpufreq_driver->resume(cpu_policy); - if (ret) { - printk(KERN_ERR "cpufreq: resume failed in ->resume " - "step on CPU %u\n", cpu_policy->cpu); - goto fail; - } - } + pr_debug("%s: Resuming Governors\n", __func__); - schedule_work(&cpu_policy->update); + cpufreq_suspended = false; -fail: - cpufreq_cpu_put(cpu_policy); -} + for_each_possible_cpu(cpu) { + if (!cpu_online(cpu)) + continue; -static struct syscore_ops cpufreq_syscore_ops = { - .suspend = cpufreq_bp_suspend, - .resume = cpufreq_bp_resume, -}; + policy = cpufreq_cpu_get(cpu); + + if (__cpufreq_governor(policy, CPUFREQ_GOV_START) + || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) + pr_err("%s: Failed to start governor for policy: %p\n", + __func__, policy); + else if (cpufreq_driver->resume + && cpufreq_driver->resume(policy)) + pr_err("%s: Failed to resume driver: %p\n", __func__, + policy); + + /* + * schedule call cpufreq_update_policy() for boot CPU, i.e. last + * policy in list. It will verify that the current freq is in + * sync with what we believe it to be. + */ + if (cpu == 0) + schedule_work(&policy->update); + } +} /** * cpufreq_get_current_driver - return current driver's name @@ -1591,6 +1609,10 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, struct cpufreq_governor *gov = NULL; #endif + /* Don't start any governor operations if we are entering suspend */ + if (cpufreq_suspended) + return 0; + if (policy->governor->max_transition_latency && policy->cpuinfo.transition_latency > policy->governor->max_transition_latency) { @@ -1611,6 +1633,21 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, pr_debug("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event); + + mutex_lock(&cpufreq_governor_lock); + if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) || + (policy->governor_enabled && (event == CPUFREQ_GOV_START))) { + mutex_unlock(&cpufreq_governor_lock); + return -EBUSY; + } + + if (event == CPUFREQ_GOV_STOP) + policy->governor_enabled = false; + else if (event == CPUFREQ_GOV_START) + policy->governor_enabled = true; + + mutex_unlock(&cpufreq_governor_lock); + ret = policy->governor->governor(policy, event); if (!ret) { @@ -1618,6 +1655,14 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, policy->governor->initialized++; else if (event == CPUFREQ_GOV_POLICY_EXIT) policy->governor->initialized--; + } else { + /* Restore original values */ + mutex_lock(&cpufreq_governor_lock); + if (event == CPUFREQ_GOV_STOP) + policy->governor_enabled = true; + else if (event == CPUFREQ_GOV_START) + policy->governor_enabled = false; + mutex_unlock(&cpufreq_governor_lock); } /* we keep one module reference alive for @@ -2025,7 +2070,6 @@ static int __init cpufreq_core_init(void) cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); BUG_ON(!cpufreq_global_kobject); - register_syscore_ops(&cpufreq_syscore_ops); return 0; }