Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
authorMark Brown <broonie@linaro.org>
Thu, 14 Aug 2014 13:22:49 +0000 (14:22 +0100)
committerMark Brown <broonie@linaro.org>
Thu, 14 Aug 2014 13:22:49 +0000 (14:22 +0100)
include/linux/sched.h
kernel/sched/core.c
kernel/sched/fair.c

index a3c8b270931be92971f9b81b943d066dfdc0481d..a4c8bf7f47039fc143dfaf5bdb0073536592eba1 100644 (file)
@@ -950,6 +950,14 @@ struct sched_avg {
        u32 usage_avg_sum;
 };
 
+#ifdef CONFIG_SCHED_HMP
+/*
+ * We want to avoid boosting any processes forked from init (PID 1)
+ * and kthreadd (assumed to be PID 2).
+ */
+#define hmp_task_should_forkboost(task) ((task->parent && task->parent->pid > 2))
+#endif
+
 #ifdef CONFIG_SCHEDSTATS
 struct sched_statistics {
        u64                     wait_start;
index c481b498f49a941515ef9d96edd8760cbed93721..9408d236c78fee5f00a0e1eb70e3f88ed5addc55 100644 (file)
@@ -1635,9 +1635,9 @@ static void __sched_fork(struct task_struct *p)
 #ifdef CONFIG_SCHED_HMP
        /* keep LOAD_AVG_MAX in sync with fair.c if load avg series is changed */
 #define LOAD_AVG_MAX 47742
-       if (p->mm) {
-               p->se.avg.hmp_last_up_migration = 0;
-               p->se.avg.hmp_last_down_migration = 0;
+       p->se.avg.hmp_last_up_migration = 0;
+       p->se.avg.hmp_last_down_migration = 0;
+       if (hmp_task_should_forkboost(p)) {
                p->se.avg.load_avg_ratio = 1023;
                p->se.avg.load_avg_contrib =
                                (1023 * scale_load_down(p->se.load.weight));
index 97ed132c809a9c203567a7e28833757beab65aef..41d0cbda605d1f46ff86908a482537f0d16ccec1 100644 (file)
@@ -4385,7 +4385,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
 
 #ifdef CONFIG_SCHED_HMP
        /* always put non-kernel forking tasks on a big domain */
-       if (p->mm && (sd_flag & SD_BALANCE_FORK)) {
+       if (unlikely(sd_flag & SD_BALANCE_FORK) && hmp_task_should_forkboost(p)) {
                new_cpu = hmp_select_faster_cpu(p, prev_cpu);
                if (new_cpu != NR_CPUS) {
                        hmp_next_up_delay(&p->se, new_cpu);
@@ -6537,16 +6537,16 @@ static int nohz_test_cpu(int cpu)
  * Decide if the tasks on the busy CPUs in the
  * littlest domain would benefit from an idle balance
  */
-static int hmp_packing_ilb_needed(int cpu)
+static int hmp_packing_ilb_needed(int cpu, int ilb_needed)
 {
        struct hmp_domain *hmp;
-       /* always allow ilb on non-slowest domain */
+       /* allow previous decision on non-slowest domain */
        if (!hmp_cpu_is_slowest(cpu))
-               return 1;
+               return ilb_needed;
 
        /* if disabled, use normal ILB behaviour */
        if (!hmp_packing_enabled)
-               return 1;
+               return ilb_needed;
 
        hmp = hmp_cpu_domain(cpu);
        for_each_cpu_and(cpu, &hmp->cpus, nohz.idle_cpus_mask) {
@@ -6558,19 +6558,34 @@ static int hmp_packing_ilb_needed(int cpu)
 }
 #endif
 
+DEFINE_PER_CPU(cpumask_var_t, ilb_tmpmask);
+
 static inline int find_new_ilb(int call_cpu)
 {
        int ilb = cpumask_first(nohz.idle_cpus_mask);
 #ifdef CONFIG_SCHED_HMP
-       int ilb_needed = 1;
+       int ilb_needed = 0;
+       int cpu;
+       struct cpumask* tmp = per_cpu(ilb_tmpmask, smp_processor_id());
 
        /* restrict nohz balancing to occur in the same hmp domain */
        ilb = cpumask_first_and(nohz.idle_cpus_mask,
                        &((struct hmp_domain *)hmp_cpu_domain(call_cpu))->cpus);
 
+       /* check to see if it's necessary within this domain */
+       cpumask_andnot(tmp,
+                       &((struct hmp_domain *)hmp_cpu_domain(call_cpu))->cpus,
+                       nohz.idle_cpus_mask);
+       for_each_cpu(cpu, tmp) {
+               if (cpu_rq(cpu)->nr_running > 1) {
+                       ilb_needed = 1;
+                       break;
+               }
+       }
+
 #ifdef CONFIG_SCHED_HMP_LITTLE_PACKING
        if (ilb < nr_cpu_ids)
-               ilb_needed = hmp_packing_ilb_needed(ilb);
+               ilb_needed = hmp_packing_ilb_needed(ilb, ilb_needed);
 #endif
 
        if (ilb_needed && ilb < nr_cpu_ids && idle_cpu(ilb))