sched/cpufreq_sched: fix thermal capping events
authorJuri Lelli <juri.lelli@arm.com>
Thu, 4 Aug 2016 11:20:04 +0000 (12:20 +0100)
committerAmit Pundir <amit.pundir@linaro.org>
Wed, 14 Sep 2016 09:32:22 +0000 (15:02 +0530)
cpufreq_sched_limits (called when CPUFREQ_GOV_LIMITS event happens)
bails out if policy->rwsem is already locked. However, that rwsem is
always guaranteed to be locked when we get here after a thermal
throttling event happens:

 th_throttling ->
   cpufreq_update_policy()
     ...
     down_write(&policy->rwsem);
     ...
     cpufreq_set_policy() ->
       ...
       __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); ->
         cpufreq_sched_limits()
         ...
         if (!down_write_trylock(&policy->rwsem))
                 return; <-- BAIL OUT!

So, we don't currently react immediately to thermal capping event (even
if reaction is still quick in practice, ~1ms, as lots of events are likely
to trigger a frequency selection on a high loaded system).

Fix this bug by removing the bail out condition.

While we are at it we also slightly change handling of the new limits by
clamping the last requested_freq between policy's max and min. Doing so
gives us the oppurtunity to correctly restore the last requested
frequency as soon as a thermal unthrottling event happens.

bug: 30481949

Change-Id: I3c13e818f238c1ffa66b34e419e8b87314b57427
Suggested-by: Javi Merino <javi.merino@arm.com>
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Srinath Sridharan <srinathsr@google.com>
[jstultz: fwdported to 4.4]
Signed-off-by: John Stultz <john.stultz@linaro.org>
kernel/sched/cpufreq_sched.c

index 3f8c67a3ea0ff814c328f67a7de0c9618b373bda..4fea269a6598c971de4eef5fdd6582581cde6f49 100644 (file)
@@ -58,7 +58,6 @@ struct gov_data {
        struct task_struct *task;
        struct irq_work irq_work;
        unsigned int requested_freq;
-       int max;
 };
 
 static void cpufreq_sched_try_driver_target(struct cpufreq_policy *policy,
@@ -193,7 +192,7 @@ static void update_fdomain_capacity_request(int cpu)
        }
 
        /* Convert the new maximum capacity request into a cpu frequency */
-       freq_new = capacity * gd->max >> SCHED_CAPACITY_SHIFT;
+       freq_new = capacity * policy->max >> SCHED_CAPACITY_SHIFT;
        if (cpufreq_frequency_table_target(policy, policy->freq_table,
                                           freq_new, CPUFREQ_RELATION_L,
                                           &index_new))
@@ -288,8 +287,6 @@ static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
        pr_debug("%s: throttle threshold = %u [ns]\n",
                  __func__, gd->up_throttle_nsec);
 
-       gd->max = policy->max;
-
        rc = sysfs_create_group(get_governor_parent_kobj(policy), get_sysfs_attr());
        if (rc) {
                pr_err("%s: couldn't create sysfs attributes: %d\n", __func__, rc);
@@ -352,28 +349,17 @@ static int cpufreq_sched_start(struct cpufreq_policy *policy)
 
 static void cpufreq_sched_limits(struct cpufreq_policy *policy)
 {
-       struct gov_data *gd;
+       unsigned int clamp_freq;
+       struct gov_data *gd = policy->governor_data;;
 
        pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
                policy->cpu, policy->min, policy->max,
                policy->cur);
 
-       if (!down_write_trylock(&policy->rwsem))
-               return;
-       /*
-        * Need to keep track of highest max frequency for
-        * capacity calculations
-        */
-       gd = policy->governor_data;
-       if (gd->max < policy->max)
-               gd->max = policy->max;
+       clamp_freq = clamp(gd->requested_freq, policy->min, policy->max);
 
-       if (policy->max < policy->cur)
-               __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
-       else if (policy->min > policy->cur)
-               __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
-
-       up_write(&policy->rwsem);
+       if (policy->cur != clamp_freq)
+               __cpufreq_driver_target(policy, clamp_freq, CPUFREQ_RELATION_L);
 }
 
 static int cpufreq_sched_stop(struct cpufreq_policy *policy)