Merge tag 'v3.10.52' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / kernel / sched / core.c
index c771f2547bef323328759caea593bf8883a99484..f44c01b9d713a1416552e8fb372a5331ebd857d3 100644 (file)
@@ -1407,7 +1407,11 @@ void scheduler_ipi(void)
 {
        if (llist_empty(&this_rq()->wake_list)
                        && !tick_nohz_full_cpu(smp_processor_id())
-                       && !got_nohz_idle_kick())
+                       && !got_nohz_idle_kick()
+#ifdef CONFIG_SCHED_HMP
+                       && !this_rq()->wake_for_idle_pull
+#endif
+                       )
                return;
 
        /*
@@ -1434,6 +1438,11 @@ void scheduler_ipi(void)
                this_rq()->idle_balance = 1;
                raise_softirq_irqoff(SCHED_SOFTIRQ);
        }
+#ifdef CONFIG_SCHED_HMP
+       else if (unlikely(this_rq()->wake_for_idle_pull))
+               raise_softirq_irqoff(SCHED_SOFTIRQ);
+#endif
+
        irq_exit();
 }
 
@@ -1623,6 +1632,20 @@ static void __sched_fork(struct task_struct *p)
 #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
        p->se.avg.runnable_avg_period = 0;
        p->se.avg.runnable_avg_sum = 0;
+#ifdef CONFIG_SCHED_HMP
+       /* keep LOAD_AVG_MAX in sync with fair.c if load avg series is changed */
+#define LOAD_AVG_MAX 47742
+       if (p->mm) {
+               p->se.avg.hmp_last_up_migration = 0;
+               p->se.avg.hmp_last_down_migration = 0;
+               p->se.avg.load_avg_ratio = 1023;
+               p->se.avg.load_avg_contrib =
+                               (1023 * scale_load_down(p->se.load.weight));
+               p->se.avg.runnable_avg_period = LOAD_AVG_MAX;
+               p->se.avg.runnable_avg_sum = LOAD_AVG_MAX;
+               p->se.avg.usage_avg_sum = LOAD_AVG_MAX;
+       }
+#endif
 #endif
 #ifdef CONFIG_SCHEDSTATS
        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
@@ -3825,6 +3848,8 @@ static struct task_struct *find_process_by_pid(pid_t pid)
        return pid ? find_task_by_vpid(pid) : current;
 }
 
+extern struct cpumask hmp_slow_cpu_mask;
+
 /* Actually do priority change: must hold rq lock. */
 static void
 __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
@@ -3834,8 +3859,17 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
        p->normal_prio = normal_prio(p);
        /* we are holding p->pi_lock already */
        p->prio = rt_mutex_getprio(p);
-       if (rt_prio(p->prio))
+       if (rt_prio(p->prio)) {
                p->sched_class = &rt_sched_class;
+#ifdef CONFIG_SCHED_HMP
+               if (!cpumask_empty(&hmp_slow_cpu_mask))
+                       if (cpumask_equal(&p->cpus_allowed, cpu_all_mask)) {
+                               p->nr_cpus_allowed =
+                                       cpumask_weight(&hmp_slow_cpu_mask);
+                               do_set_cpus_allowed(p, &hmp_slow_cpu_mask);
+                       }
+#endif
+       }
        else
                p->sched_class = &fair_sched_class;
        set_load_weight(p);