sched/walt: Accounting for number of irqs pending on each core
[firefly-linux-kernel-4.4.55.git] / kernel / sched / fair.c
index 15b8a8f34bd9a31aa892706fa46c7059b545a7d7..8560a553003543bc614882df258e71768fccb0ad 100644 (file)
@@ -61,6 +61,8 @@ unsigned int sysctl_sched_cstate_aware = 1;
 #ifdef CONFIG_SCHED_WALT
 unsigned int sysctl_sched_use_walt_cpu_util = 1;
 unsigned int sysctl_sched_use_walt_task_util = 1;
+__read_mostly unsigned int sysctl_sched_walt_cpu_high_irqload =
+    (10 * NSEC_PER_MSEC);
 #endif
 /*
  * The initial- and re-scaling of tunables is configurable
@@ -4274,7 +4276,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        schedtune_enqueue_task(p, cpu_of(rq));
 
 #endif /* CONFIG_SMP */
-
        hrtick_update(rq);
 }
 
@@ -5648,6 +5649,10 @@ static inline int find_best_target(struct task_struct *p, bool boosted)
                if (new_util > capacity_orig_of(i))
                        continue;
 
+#ifdef CONFIG_SCHED_WALT
+               if (walt_cpu_high_irqload(i))
+                       continue;
+#endif
                /*
                 * For boosted tasks we favor idle cpus unconditionally to
                 * improve latency.