sched/fair: optimize idle cpu selection for boosted tasks
authorTodd Kjos <tkjos@google.com>
Fri, 29 Jul 2016 13:41:25 +0000 (14:41 +0100)
committerAmit Pundir <amit.pundir@linaro.org>
Wed, 14 Sep 2016 09:29:32 +0000 (14:59 +0530)
find_best_target CPU selection is biased towards lower CPU IDs. Bias
towards higher CPUs for boosted tasks. For boosted tasks unconditionally
use the idle CPU returned by find_best_target.

BUG: 29512132
Change-Id: I3d650051752163fcf3dc7909751d1fde3f9d17c0

Conflicts:
kernel/sched/fair.c

kernel/sched/fair.c

index 736adab1a503fb2c7988b8f10e492260eeb394b9..84f5e12c8e123e1973f1bb23cd1242aa7a0e877e 100644 (file)
@@ -5590,32 +5590,30 @@ done:
        return target;
 }
 
-static inline int find_best_target(struct task_struct *p)
+static inline int find_best_target(struct task_struct *p, bool boosted)
 {
-       int i, boosted;
+       int iter_cpu;
        int target_cpu = -1;
        int target_capacity = 0;
        int backup_capacity = 0;
-       int idle_cpu = -1;
+       int best_idle_cpu = -1;
        int best_idle_cstate = INT_MAX;
        int backup_cpu = -1;
        unsigned long task_util_boosted, new_util;
 
-       /*
-        * Favor 1) busy cpu with most capacity at current OPP
-        *       2) idle_cpu with capacity at current OPP
-        *       3) busy cpu with capacity at higher OPP
-        */
-#ifdef CONFIG_CGROUP_SCHEDTUNE
-       boosted = schedtune_task_boost(p);
-#else
-       boosted = 0;
-#endif
        task_util_boosted = boosted_task_util(p);
-       for_each_cpu(i, tsk_cpus_allowed(p)) {
-               int cur_capacity = capacity_curr_of(i);
-               struct rq *rq = cpu_rq(i);
-               int idle_idx = idle_get_state_idx(rq);
+       for (iter_cpu = 0; iter_cpu < NR_CPUS; iter_cpu++) {
+               int cur_capacity;
+               struct rq *rq;
+               int idle_idx;
+
+               /*
+                * favor higher cpus for boosted tasks
+                */
+               int i = boosted ? NR_CPUS-iter_cpu-1 : iter_cpu;
+
+               if (!cpu_online(i) || !cpumask_test_cpu(i, tsk_cpus_allowed(p)))
+                       continue;
 
                /*
                 * p's blocked utilization is still accounted for on prev_cpu
@@ -5636,46 +5634,43 @@ static inline int find_best_target(struct task_struct *p)
                 * For boosted tasks we favor idle cpus unconditionally to
                 * improve latency.
                 */
-               if (idle_idx >= 0 && boosted) {
-                       if (idle_cpu < 0 ||
-                               (sysctl_sched_cstate_aware &&
-                                best_idle_cstate > idle_idx)) {
-                               best_idle_cstate = idle_idx;
-                               idle_cpu = i;
-                       }
+               if (idle_cpu(i) && boosted) {
+                       if (best_idle_cpu < 0)
+                               best_idle_cpu = i;
                        continue;
                }
 
+               cur_capacity = capacity_curr_of(i);
+               rq = cpu_rq(i);
+               idle_idx = idle_get_state_idx(rq);
+
                if (new_util < cur_capacity) {
                        if (cpu_rq(i)->nr_running) {
                                if (target_capacity == 0 ||
                                        target_capacity > cur_capacity) {
-                                       /* busy CPU with most capacity at current OPP */
                                        target_cpu = i;
                                        target_capacity = cur_capacity;
                                }
                        } else if (!boosted) {
-                               if (idle_cpu < 0 ||
+                               if (best_idle_cpu < 0 ||
                                        (sysctl_sched_cstate_aware &&
                                                best_idle_cstate > idle_idx)) {
                                        best_idle_cstate = idle_idx;
-                                       idle_cpu = i;
+                                       best_idle_cpu = i;
                                }
                        }
                } else if (backup_capacity == 0 ||
                                backup_capacity > cur_capacity) {
-                       /* first busy CPU with capacity at higher OPP */
                        backup_capacity = cur_capacity;
                        backup_cpu = i;
                }
        }
 
-       if (!boosted && target_cpu < 0) {
-               target_cpu = idle_cpu >= 0 ? idle_cpu : backup_cpu;
-       }
+       if (boosted && best_idle_cpu >= 0)
+               target_cpu = best_idle_cpu;
+       else if (target_cpu < 0)
+               target_cpu = best_idle_cpu >= 0 ? best_idle_cpu : backup_cpu;
 
-       if (boosted && idle_cpu >= 0)
-               target_cpu = idle_cpu;
        return target_cpu;
 }
 
@@ -5761,9 +5756,16 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
                /*
                 * Find a cpu with sufficient capacity
                 */
-               int tmp_target = find_best_target(p);
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+               bool boosted = schedtune_task_boost(p) > 0;
+#else
+               bool boosted = 0;
+#endif
+               int tmp_target = find_best_target(p, boosted);
                if (tmp_target >= 0)
                        target_cpu = tmp_target;
+                       if (boosted && idle_cpu(target_cpu))
+                               return target_cpu;
        }
 
        if (target_cpu != task_cpu(p)) {