sched/fair: Simplify idle_idx handling in select_idle_sibling()
authorDietmar Eggemann <dietmar.eggemann@arm.com>
Mon, 16 Jan 2017 12:42:59 +0000 (12:42 +0000)
committerAmit Pundir <amit.pundir@linaro.org>
Wed, 21 Jun 2017 11:07:32 +0000 (16:37 +0530)
Rename best_idle to best_idle_cpu so the same name is used like in
find_best_target().

Fix if (best_idle > 0) since best_idle_cpu = 0 is a valid target.

Use 'unsigned long' data type for best_idle_capacity.

Since we're looking for the shallowest best_idle_cstate initialize
best_idle_cstate = INT_MAX. For cpus which are not idle (idle_idx = -1)
the condition 'if (idle_idx < best_idle_cstate && ...)' is never
executed.

Change-Id: Ic5b63d58478696b3d1ec6253cf739a69a574cf99
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
(cherry picked from commit 8bff5e9c0968108d465e1f2a4624fc5ec2f00849)
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
kernel/sched/fair.c

index 270091323ae8af6cb543a3b6610bb9bcf215a923..e77917b23c79a3462322cae526babfe70015b297 100644 (file)
@@ -5677,9 +5677,9 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
 {
        struct sched_domain *sd;
        struct sched_group *sg;
-       int best_idle = -1;
-       int best_idle_cstate = -1;
-       int best_idle_capacity = INT_MAX;
+       int best_idle_cpu = -1;
+       int best_idle_cstate = INT_MAX;
+       unsigned long best_idle_capacity = ULONG_MAX;
 
        if (!sysctl_sched_cstate_aware) {
                if (idle_cpu(target))
@@ -5706,18 +5706,19 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
 
                        if (sysctl_sched_cstate_aware) {
                                for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) {
-                                       struct rq *rq = cpu_rq(i);
-                                       int idle_idx = idle_get_state_idx(rq);
+                                       int idle_idx = idle_get_state_idx(cpu_rq(i));
                                        unsigned long new_usage = boosted_task_util(p);
                                        unsigned long capacity_orig = capacity_orig_of(i);
+
                                        if (new_usage > capacity_orig || !idle_cpu(i))
                                                goto next;
 
                                        if (i == target && new_usage <= capacity_curr_of(target))
                                                return target;
 
-                                       if (best_idle < 0 || (idle_idx < best_idle_cstate && capacity_orig <= best_idle_capacity)) {
-                                               best_idle = i;
+                                       if (idle_idx < best_idle_cstate &&
+                                           capacity_orig <= best_idle_capacity) {
+                                               best_idle_cpu = i;
                                                best_idle_cstate = idle_idx;
                                                best_idle_capacity = capacity_orig;
                                        }
@@ -5736,8 +5737,9 @@ next:
                        sg = sg->next;
                } while (sg != sd->groups);
        }
-       if (best_idle > 0)
-               target = best_idle;
+
+       if (best_idle_cpu >= 0)
+               target = best_idle_cpu;
 
 done:
        return target;