Revert "hmp: dont attempt to pull tasks if affinity doesn't allow it"
authorJon Medhurst <tixy@linaro.org>
Tue, 8 Apr 2014 15:43:19 +0000 (16:43 +0100)
committerJon Medhurst <tixy@linaro.org>
Tue, 8 Apr 2014 15:43:19 +0000 (16:43 +0100)
This reverts commit 5a570cfc01b06906faa8ac67ad7c0c6f278761c4.

Signed-off-by: Jon Medhurst <tixy@linaro.org>
kernel/sched/fair.c

index 1957f2589d9a0728515fd8313f32fa8d4901481f..128d5723ae4d301c593ddb7c29e81fe89f423726 100644 (file)
@@ -3694,31 +3694,30 @@ static inline struct hmp_domain *hmp_faster_domain(int cpu);
 
 /* must hold runqueue lock for queue se is currently on */
 static struct sched_entity *hmp_get_heaviest_task(
-                               struct sched_entity *se, int target_cpu)
+                               struct sched_entity *se, int migrate_up)
 {
        int num_tasks = hmp_max_tasks;
        struct sched_entity *max_se = se;
        unsigned long int max_ratio = se->avg.load_avg_ratio;
        const struct cpumask *hmp_target_mask = NULL;
-       struct hmp_domain *hmp;
 
-       if (hmp_cpu_is_fastest(cpu_of(se->cfs_rq->rq)))
-               return max_se;
+       if (migrate_up) {
+               struct hmp_domain *hmp;
+               if (hmp_cpu_is_fastest(cpu_of(se->cfs_rq->rq)))
+                       return max_se;
 
-       hmp = hmp_faster_domain(cpu_of(se->cfs_rq->rq));
-       hmp_target_mask = &hmp->cpus;
-       if (target_cpu >= 0) {
-               BUG_ON(!cpumask_test_cpu(target_cpu, hmp_target_mask));
-               hmp_target_mask = cpumask_of(target_cpu);
+               hmp = hmp_faster_domain(cpu_of(se->cfs_rq->rq));
+               hmp_target_mask = &hmp->cpus;
        }
        /* The currently running task is not on the runqueue */
        se = __pick_first_entity(cfs_rq_of(se));
 
        while (num_tasks && se) {
                if (entity_is_task(se) &&
-                       se->avg.load_avg_ratio > max_ratio &&
-                       cpumask_intersects(hmp_target_mask,
-                               tsk_cpus_allowed(task_of(se)))) {
+                       (se->avg.load_avg_ratio > max_ratio &&
+                        hmp_target_mask &&
+                        cpumask_intersects(hmp_target_mask,
+                               tsk_cpus_allowed(task_of(se))))) {
                        max_se = se;
                        max_ratio = se->avg.load_avg_ratio;
                }
@@ -7127,7 +7126,7 @@ static void hmp_force_up_migration(int this_cpu)
                        }
                }
                orig = curr;
-               curr = hmp_get_heaviest_task(curr, -1);
+               curr = hmp_get_heaviest_task(curr, 1);
                p = task_of(curr);
                if (hmp_up_migration(cpu, &target_cpu, curr)) {
                        cpu_rq(target_cpu)->wake_for_idle_pull = 1;
@@ -7224,14 +7223,12 @@ static unsigned int hmp_idle_pull(int this_cpu)
                        }
                }
                orig = curr;
-               curr = hmp_get_heaviest_task(curr, this_cpu);
+               curr = hmp_get_heaviest_task(curr, 1);
                /* check if heaviest eligible task on this
                 * CPU is heavier than previous task
                 */
                if (hmp_task_eligible_for_up_migration(curr) &&
-                       curr->avg.load_avg_ratio > ratio &&
-                       cpumask_test_cpu(this_cpu,
-                                       tsk_cpus_allowed(task_of(curr)))) {
+                       curr->avg.load_avg_ratio > ratio) {
                        p = task_of(curr);
                        target = rq;
                        ratio = curr->avg.load_avg_ratio;