sched/tune: Introducing a new schedtune attribute prefer_idle
[firefly-linux-kernel-4.4.55.git] / kernel / sched / fair.c
index 26c7e3fd332f7b8dfa0aad28c5c43595b8fa1f1b..781e7676df8955eb9dc9070645d10b7c0feb9a57 100644 (file)
@@ -5610,7 +5610,7 @@ done:
        return target;
 }
 
-static inline int find_best_target(struct task_struct *p, bool boosted)
+static inline int find_best_target(struct task_struct *p, bool prefer_idle)
 {
        int iter_cpu;
        int target_cpu = -1;
@@ -5628,9 +5628,9 @@ static inline int find_best_target(struct task_struct *p, bool boosted)
                int idle_idx;
 
                /*
-                * favor higher cpus for boosted tasks
+                * favor higher cpus for tasks that prefer idle cores
                 */
-               int i = boosted ? NR_CPUS-iter_cpu-1 : iter_cpu;
+               int i = prefer_idle ? NR_CPUS-iter_cpu-1 : iter_cpu;
 
                if (!cpu_online(i) || !cpumask_test_cpu(i, tsk_cpus_allowed(p)))
                        continue;
@@ -5655,10 +5655,10 @@ static inline int find_best_target(struct task_struct *p, bool boosted)
                        continue;
 #endif
                /*
-                * For boosted tasks we favor idle cpus unconditionally to
+                * Unconditionally favoring tasks that prefer idle cpus to
                 * improve latency.
                 */
-               if (idle_cpu(i) && boosted) {
+               if (idle_cpu(i) && prefer_idle) {
                        if (best_idle_cpu < 0)
                                best_idle_cpu = i;
                        continue;
@@ -5675,7 +5675,7 @@ static inline int find_best_target(struct task_struct *p, bool boosted)
                                        target_cpu = i;
                                        target_util = new_util;
                                }
-                       } else if (!boosted) {
+                       } else if (!prefer_idle) {
                                if (best_idle_cpu < 0 ||
                                        (sysctl_sched_cstate_aware &&
                                                best_idle_cstate > idle_idx)) {
@@ -5690,7 +5690,7 @@ static inline int find_best_target(struct task_struct *p, bool boosted)
                }
        }
 
-       if (boosted && best_idle_cpu >= 0)
+       if (prefer_idle && best_idle_cpu >= 0)
                target_cpu = best_idle_cpu;
        else if (target_cpu < 0)
                target_cpu = best_idle_cpu >= 0 ? best_idle_cpu : backup_cpu;
@@ -5782,14 +5782,17 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
                 */
 #ifdef CONFIG_CGROUP_SCHEDTUNE
                bool boosted = schedtune_task_boost(p) > 0;
+               bool prefer_idle = schedtune_prefer_idle(p) > 0;
 #else
                bool boosted = 0;
+               bool prefer_idle = 0;
 #endif
-               int tmp_target = find_best_target(p, boosted);
-               if (tmp_target >= 0)
+               int tmp_target = find_best_target(p, boosted || prefer_idle);
+               if (tmp_target >= 0) {
                        target_cpu = tmp_target;
-                       if (boosted && idle_cpu(target_cpu))
+                       if ((boosted || prefer_idle) && idle_cpu(target_cpu))
                                return target_cpu;
+               }
        }
 
        if (target_cpu != task_cpu(p)) {