sched: SCHED_HMP multi-domain task migration control
authorMorten Rasmussen <Morten.Rasmussen@arm.com>
Fri, 14 Sep 2012 13:38:17 +0000 (14:38 +0100)
committerJon Medhurst <tixy@linaro.org>
Wed, 17 Jul 2013 10:12:25 +0000 (11:12 +0100)
We need a way to prevent tasks that are migrating up and down the
hmp_domains from migrating straight on through before the load has
adapted to the new compute capacity of the CPU on the new hmp_domain.
This patch adds a next up/down migration delay that prevents the task
from doing another migration in the same direction until the delay
has expired.

Signed-off-by: Morten Rasmussen <Morten.Rasmussen@arm.com>
include/linux/sched.h
kernel/sched/core.c
kernel/sched/fair.c

index cfb9a2efc213d683599d38be27d94df3833fa6f2..5e903596e489a2906a694be29a50305ce1430593 100644 (file)
@@ -938,6 +938,10 @@ struct sched_avg {
        s64 decay_count;
        unsigned long load_avg_contrib;
        unsigned long load_avg_ratio;
+#ifdef CONFIG_SCHED_HMP
+       u64 hmp_last_up_migration;
+       u64 hmp_last_down_migration;
+#endif
        u32 usage_avg_sum;
 };
 
index e8b335016c526594cd910a030c962099907d8518..e45295dc33adb7ed82c893d08b0b6cba4c3f6cd4 100644 (file)
@@ -1617,6 +1617,10 @@ static void __sched_fork(struct task_struct *p)
 #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
        p->se.avg.runnable_avg_period = 0;
        p->se.avg.runnable_avg_sum = 0;
+#ifdef CONFIG_SCHED_HMP
+       p->se.avg.hmp_last_up_migration = 0;
+       p->se.avg.hmp_last_down_migration = 0;
+#endif
 #endif
 #ifdef CONFIG_SCHEDSTATS
        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
index ecbeb90adcff50c507263882b75c63d322e880ad..fe2408f25e24a6a68800258ffb690c5603961d32 100644 (file)
@@ -3401,12 +3401,16 @@ static int __init hmp_cpu_mask_setup(void)
  * tweaking suit particular needs.
  *
  * hmp_up_prio: Only up migrate task with high priority (<hmp_up_prio)
+ * hmp_next_up_threshold: Delay before next up migration (1024 ~= 1 ms)
+ * hmp_next_down_threshold: Delay before next down migration (1024 ~= 1 ms)
  */
 unsigned int hmp_up_threshold = 512;
 unsigned int hmp_down_threshold = 256;
 #ifdef CONFIG_SCHED_HMP_PRIO_FILTER
 unsigned int hmp_up_prio = NICE_TO_PRIO(CONFIG_SCHED_HMP_PRIO_FILTER_VAL);
 #endif
+unsigned int hmp_next_up_threshold = 4096;
+unsigned int hmp_next_down_threshold = 4096;
 
 static unsigned int hmp_up_migration(int cpu, struct sched_entity *se);
 static unsigned int hmp_down_migration(int cpu, struct sched_entity *se);
@@ -3469,6 +3473,21 @@ static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk,
                                tsk_cpus_allowed(tsk));
 }
 
+static inline void hmp_next_up_delay(struct sched_entity *se, int cpu)
+{
+       struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+
+       se->avg.hmp_last_up_migration = cfs_rq_clock_task(cfs_rq);
+       se->avg.hmp_last_down_migration = 0;
+}
+
+static inline void hmp_next_down_delay(struct sched_entity *se, int cpu)
+{
+       struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+
+       se->avg.hmp_last_down_migration = cfs_rq_clock_task(cfs_rq);
+       se->avg.hmp_last_up_migration = 0;
+}
 #endif /* CONFIG_SCHED_HMP */
 
 /*
@@ -3572,11 +3591,13 @@ unlock:
 #ifdef CONFIG_SCHED_HMP
        if (hmp_up_migration(prev_cpu, &p->se)) {
                new_cpu = hmp_select_faster_cpu(p, prev_cpu);
+               hmp_next_up_delay(&p->se, new_cpu);
                trace_sched_hmp_migrate(p, new_cpu, 0);
                return new_cpu;
        }
        if (hmp_down_migration(prev_cpu, &p->se)) {
                new_cpu = hmp_select_slower_cpu(p, prev_cpu);
+               hmp_next_down_delay(&p->se, new_cpu);
                trace_sched_hmp_migrate(p, new_cpu, 0);
                return new_cpu;
        }
@@ -5859,6 +5880,8 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
 static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
 {
        struct task_struct *p = task_of(se);
+       struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+       u64 now;
 
        if (hmp_cpu_is_fastest(cpu))
                return 0;
@@ -5869,6 +5892,12 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
                return 0;
 #endif
 
+       /* Let the task load settle before doing another up migration */
+       now = cfs_rq_clock_task(cfs_rq);
+       if (((now - se->avg.hmp_last_up_migration) >> 10)
+                                       < hmp_next_up_threshold)
+               return 0;
+
        if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus,
                                        tsk_cpus_allowed(p))
                && se->avg.load_avg_ratio > hmp_up_threshold) {
@@ -5881,6 +5910,8 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
 static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
 {
        struct task_struct *p = task_of(se);
+       struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+       u64 now;
 
        if (hmp_cpu_is_slowest(cpu))
                return 0;
@@ -5891,6 +5922,12 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
                return 1;
 #endif
 
+       /* Let the task load settle before doing another down migration */
+       now = cfs_rq_clock_task(cfs_rq);
+       if (((now - se->avg.hmp_last_down_migration) >> 10)
+                                       < hmp_next_down_threshold)
+               return 0;
+
        if (cpumask_intersects(&hmp_slower_domain(cpu)->cpus,
                                        tsk_cpus_allowed(p))
                && se->avg.load_avg_ratio < hmp_down_threshold) {
@@ -6081,6 +6118,7 @@ static void hmp_force_up_migration(int this_cpu)
                                target->migrate_task = p;
                                force = 1;
                                trace_sched_hmp_migrate(p, target->push_cpu, 1);
+                               hmp_next_up_delay(&p->se, target->push_cpu);
                        }
                }
                raw_spin_unlock_irqrestore(&target->lock, flags);