HMP: Explicitly implement all-load-is-max-load policy for HMP targets
authorChris Redpath <chris.redpath@arm.com>
Mon, 15 Jul 2013 15:06:44 +0000 (16:06 +0100)
committerJon Medhurst <tixy@linaro.org>
Thu, 5 Sep 2013 17:09:16 +0000 (18:09 +0100)
Experimentally, one of the best policies for HMP migration CPU
selection is to completely ignore part-loaded CPUs and only look
for idle ones. If there are no idle ones, we will choose the one
which was least-recently-disturbed.

Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Signed-off-by: Liviu Dudau <liviu.dudau@arm.com>
Signed-off-by: Jon Medhurst <tixy@linaro.org>
kernel/sched/fair.c

index c3ba73750d62fff9916e095595c69c931f7e05e1..78f6d028d29405cfedd9a9f42a64d9fbedcdd9bd 100644 (file)
@@ -3959,8 +3959,7 @@ static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
        u64 min_target_last_migration = ULLONG_MAX;
        u64 curr_last_migration;
        unsigned long min_runnable_load = INT_MAX;
-       unsigned long scaled_min_runnable_load = INT_MAX;
-       unsigned long contrib, scaled_contrib;
+       unsigned long contrib;
        struct sched_avg *avg;
 
        for_each_cpu_mask(cpu, hmpd->cpus) {
@@ -3969,12 +3968,17 @@ static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
                curr_last_migration = avg->hmp_last_up_migration ?
                        avg->hmp_last_up_migration : avg->hmp_last_down_migration;
 
-               /* don't use the divisor in the loop, just at the end */
-               contrib = avg->load_avg_ratio * scale_load_down(1024);
-               scaled_contrib = contrib >> 13;
+               contrib = avg->load_avg_ratio;
+               /*
+                * Consider a runqueue completely busy if there is any load
+                * on it. Definitely not the best for overall fairness, but
+                * does well in typical Android use cases.
+                */
+               if (contrib)
+                       contrib = 1023;
 
                if ((contrib < min_runnable_load) ||
-                       (scaled_contrib == scaled_min_runnable_load &&
+                       (contrib == min_runnable_load &&
                         curr_last_migration < min_target_last_migration)) {
                        /*
                         * if the load is the same target the CPU with
@@ -3984,7 +3988,6 @@ static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
                         * domain is fully loaded
                         */
                        min_runnable_load = contrib;
-                       scaled_min_runnable_load = scaled_contrib;
                        min_cpu_runnable_temp = cpu;
                        min_target_last_migration = curr_last_migration;
                }
@@ -3993,10 +3996,7 @@ static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
        if (min_cpu)
                *min_cpu = min_cpu_runnable_temp;
 
-       /* domain will often have at least one empty CPU */
-       trace_printk("hmp_domain_min_load returning %lu\n",
-               min_runnable_load > 1023 ? 1023 : min_runnable_load);
-       return min_runnable_load > 1023 ? 1023 : min_runnable_load;
+       return min_runnable_load;
 }
 
 /*
@@ -4023,10 +4023,9 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
        if (hmp_cpu_is_slowest(cpu))
                return NR_CPUS;
 
-       /* Is the current domain fully loaded? */
-       /* load < ~50% */
+       /* Is there an idle CPU in the current domain */
        min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL);
-       if (min_usage < (NICE_0_LOAD>>1))
+       if (min_usage == 0)
                return NR_CPUS;
 
        /* Is the task alone on the cpu? */
@@ -4038,10 +4037,9 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
        if (hmp_task_starvation(se) > 768)
                return NR_CPUS;
 
-       /* Does the slower domain have spare cycles? */
+       /* Does the slower domain have any idle CPUs? */
        min_usage = hmp_domain_min_load(hmp_slower_domain(cpu), &dest_cpu);
-       /* load > 50% */
-       if (min_usage > NICE_0_LOAD/2)
+       if (min_usage > 0)
                return NR_CPUS;
 
        if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus))
@@ -6501,9 +6499,11 @@ static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_enti
                                        < hmp_next_up_threshold)
                return 0;
 
-       /* Target domain load < 94% */
-       if (hmp_domain_min_load(hmp_faster_domain(cpu), target_cpu)
-                       > NICE_0_LOAD-64)
+       /* hmp_domain_min_load only returns 0 for an
+        * idle CPU or 1023 for any partly-busy one.
+        * Be explicit about requirement for an idle CPU.
+        */
+       if (hmp_domain_min_load(hmp_faster_domain(cpu), target_cpu) != 0)
                return 0;
 
        if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus,