FIXUP: sched/tune: fix accounting for runnable tasks
[firefly-linux-kernel-4.4.55.git] / kernel / sched / fair.c
index 9c717c3be75df324761fd473b7fbe920113e5c40..736adab1a503fb2c7988b8f10e492260eeb394b9 100644 (file)
@@ -4250,8 +4250,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                    cpu_overutilized(rq->cpu))
                        rq->rd->overutilized = true;
 
-               schedtune_enqueue_task(p, cpu_of(rq));
-
                /*
                 * We want to potentially trigger a freq switch
                 * request only for tasks that are waking up; this is
@@ -4262,6 +4260,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                if (task_new || task_wakeup)
                        update_capacity_of(cpu_of(rq));
        }
+
+       /* Update SchedTune accouting */
+       schedtune_enqueue_task(p, cpu_of(rq));
+
 #endif /* CONFIG_SMP */
 
        hrtick_update(rq);
@@ -4327,7 +4329,6 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 #ifdef CONFIG_SMP
 
        if (!se) {
-               schedtune_dequeue_task(p, cpu_of(rq));
 
                /*
                 * We want to potentially trigger a freq switch
@@ -4345,6 +4346,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                }
        }
 
+       /* Update SchedTune accouting */
+       schedtune_dequeue_task(p, cpu_of(rq));
+
 #endif /* CONFIG_SMP */
 
        hrtick_update(rq);
@@ -5625,7 +5629,6 @@ static inline int find_best_target(struct task_struct *p)
                 * The target CPU can be already at a capacity level higher
                 * than the one required to boost the task.
                 */
-
                if (new_util > capacity_orig_of(i))
                        continue;