add rk29 sdio mmc
[firefly-linux-kernel-4.4.55.git] / kernel / sched_fair.c
1 /*
2  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3  *
4  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *
6  *  Interactivity improvements by Mike Galbraith
7  *  (C) 2007 Mike Galbraith <efault@gmx.de>
8  *
9  *  Various enhancements by Dmitry Adamushko.
10  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11  *
12  *  Group scheduling enhancements by Srivatsa Vaddagiri
13  *  Copyright IBM Corporation, 2007
14  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15  *
16  *  Scaled math optimizations by Thomas Gleixner
17  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18  *
19  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21  */
22
23 #include <linux/latencytop.h>
24
25 /*
26  * Targeted preemption latency for CPU-bound tasks:
27  * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
28  *
29  * NOTE: this latency value is not the same as the concept of
30  * 'timeslice length' - timeslices in CFS are of variable length
31  * and have no persistent notion like in traditional, time-slice
32  * based scheduling concepts.
33  *
34  * (to see the precise effective timeslice length of your workload,
35  *  run vmstat and monitor the context-switches (cs) field)
36  */
37 unsigned int sysctl_sched_latency = 5000000ULL;
38 unsigned int normalized_sysctl_sched_latency = 5000000ULL;
39
40 /*
41  * Minimal preemption granularity for CPU-bound tasks:
42  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
43  */
44 unsigned int sysctl_sched_min_granularity = 1000000ULL;
45 unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL;
46
47 /*
48  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
49  */
50 static unsigned int sched_nr_latency = 5;
51
52 /*
53  * After fork, child runs first. If set to 0 (default) then
54  * parent will (try to) run first.
55  */
56 unsigned int sysctl_sched_child_runs_first __read_mostly;
57
58 /*
59  * sys_sched_yield() compat mode
60  *
61  * This option switches the agressive yield implementation of the
62  * old scheduler back on.
63  */
64 unsigned int __read_mostly sysctl_sched_compat_yield;
65
66 /*
67  * SCHED_OTHER wake-up granularity.
68  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
69  *
70  * This option delays the preemption effects of decoupled workloads
71  * and reduces their over-scheduling. Synchronous workloads will still
72  * have immediate wakeup/sleep latencies.
73  */
74 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
75 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
76
77 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
78
79 static const struct sched_class fair_sched_class;
80
81 /**************************************************************
82  * CFS operations on generic schedulable entities:
83  */
84
85 #ifdef CONFIG_FAIR_GROUP_SCHED
86
87 /* cpu runqueue to which this cfs_rq is attached */
88 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
89 {
90         return cfs_rq->rq;
91 }
92
93 /* An entity is a task if it doesn't "own" a runqueue */
94 #define entity_is_task(se)      (!se->my_q)
95
96 static inline struct task_struct *task_of(struct sched_entity *se)
97 {
98 #ifdef CONFIG_SCHED_DEBUG
99         WARN_ON_ONCE(!entity_is_task(se));
100 #endif
101         return container_of(se, struct task_struct, se);
102 }
103
104 /* Walk up scheduling entities hierarchy */
105 #define for_each_sched_entity(se) \
106                 for (; se; se = se->parent)
107
108 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
109 {
110         return p->se.cfs_rq;
111 }
112
113 /* runqueue on which this entity is (to be) queued */
114 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
115 {
116         return se->cfs_rq;
117 }
118
119 /* runqueue "owned" by this group */
120 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
121 {
122         return grp->my_q;
123 }
124
125 /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
126  * another cpu ('this_cpu')
127  */
128 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
129 {
130         return cfs_rq->tg->cfs_rq[this_cpu];
131 }
132
133 /* Iterate thr' all leaf cfs_rq's on a runqueue */
134 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
135         list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
136
137 /* Do the two (enqueued) entities belong to the same group ? */
138 static inline int
139 is_same_group(struct sched_entity *se, struct sched_entity *pse)
140 {
141         if (se->cfs_rq == pse->cfs_rq)
142                 return 1;
143
144         return 0;
145 }
146
147 static inline struct sched_entity *parent_entity(struct sched_entity *se)
148 {
149         return se->parent;
150 }
151
152 /* return depth at which a sched entity is present in the hierarchy */
153 static inline int depth_se(struct sched_entity *se)
154 {
155         int depth = 0;
156
157         for_each_sched_entity(se)
158                 depth++;
159
160         return depth;
161 }
162
163 static void
164 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
165 {
166         int se_depth, pse_depth;
167
168         /*
169          * preemption test can be made between sibling entities who are in the
170          * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
171          * both tasks until we find their ancestors who are siblings of common
172          * parent.
173          */
174
175         /* First walk up until both entities are at same depth */
176         se_depth = depth_se(*se);
177         pse_depth = depth_se(*pse);
178
179         while (se_depth > pse_depth) {
180                 se_depth--;
181                 *se = parent_entity(*se);
182         }
183
184         while (pse_depth > se_depth) {
185                 pse_depth--;
186                 *pse = parent_entity(*pse);
187         }
188
189         while (!is_same_group(*se, *pse)) {
190                 *se = parent_entity(*se);
191                 *pse = parent_entity(*pse);
192         }
193 }
194
195 #else   /* !CONFIG_FAIR_GROUP_SCHED */
196
197 static inline struct task_struct *task_of(struct sched_entity *se)
198 {
199         return container_of(se, struct task_struct, se);
200 }
201
202 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
203 {
204         return container_of(cfs_rq, struct rq, cfs);
205 }
206
207 #define entity_is_task(se)      1
208
209 #define for_each_sched_entity(se) \
210                 for (; se; se = NULL)
211
212 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
213 {
214         return &task_rq(p)->cfs;
215 }
216
217 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
218 {
219         struct task_struct *p = task_of(se);
220         struct rq *rq = task_rq(p);
221
222         return &rq->cfs;
223 }
224
225 /* runqueue "owned" by this group */
226 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
227 {
228         return NULL;
229 }
230
231 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
232 {
233         return &cpu_rq(this_cpu)->cfs;
234 }
235
236 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
237                 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
238
239 static inline int
240 is_same_group(struct sched_entity *se, struct sched_entity *pse)
241 {
242         return 1;
243 }
244
245 static inline struct sched_entity *parent_entity(struct sched_entity *se)
246 {
247         return NULL;
248 }
249
250 static inline void
251 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
252 {
253 }
254
255 #endif  /* CONFIG_FAIR_GROUP_SCHED */
256
257
258 /**************************************************************
259  * Scheduling class tree data structure manipulation methods:
260  */
261
262 static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
263 {
264         s64 delta = (s64)(vruntime - min_vruntime);
265         if (delta > 0)
266                 min_vruntime = vruntime;
267
268         return min_vruntime;
269 }
270
271 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
272 {
273         s64 delta = (s64)(vruntime - min_vruntime);
274         if (delta < 0)
275                 min_vruntime = vruntime;
276
277         return min_vruntime;
278 }
279
280 static inline int entity_before(struct sched_entity *a,
281                                 struct sched_entity *b)
282 {
283         return (s64)(a->vruntime - b->vruntime) < 0;
284 }
285
286 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
287 {
288         return se->vruntime - cfs_rq->min_vruntime;
289 }
290
291 static void update_min_vruntime(struct cfs_rq *cfs_rq)
292 {
293         u64 vruntime = cfs_rq->min_vruntime;
294
295         if (cfs_rq->curr)
296                 vruntime = cfs_rq->curr->vruntime;
297
298         if (cfs_rq->rb_leftmost) {
299                 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
300                                                    struct sched_entity,
301                                                    run_node);
302
303                 if (!cfs_rq->curr)
304                         vruntime = se->vruntime;
305                 else
306                         vruntime = min_vruntime(vruntime, se->vruntime);
307         }
308
309         cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
310 }
311
312 /*
313  * Enqueue an entity into the rb-tree:
314  */
315 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
316 {
317         struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
318         struct rb_node *parent = NULL;
319         struct sched_entity *entry;
320         s64 key = entity_key(cfs_rq, se);
321         int leftmost = 1;
322
323         /*
324          * Find the right place in the rbtree:
325          */
326         while (*link) {
327                 parent = *link;
328                 entry = rb_entry(parent, struct sched_entity, run_node);
329                 /*
330                  * We dont care about collisions. Nodes with
331                  * the same key stay together.
332                  */
333                 if (key < entity_key(cfs_rq, entry)) {
334                         link = &parent->rb_left;
335                 } else {
336                         link = &parent->rb_right;
337                         leftmost = 0;
338                 }
339         }
340
341         /*
342          * Maintain a cache of leftmost tree entries (it is frequently
343          * used):
344          */
345         if (leftmost)
346                 cfs_rq->rb_leftmost = &se->run_node;
347
348         rb_link_node(&se->run_node, parent, link);
349         rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
350 }
351
352 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
353 {
354         if (cfs_rq->rb_leftmost == &se->run_node) {
355                 struct rb_node *next_node;
356
357                 next_node = rb_next(&se->run_node);
358                 cfs_rq->rb_leftmost = next_node;
359         }
360
361         rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
362 }
363
364 static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
365 {
366         struct rb_node *left = cfs_rq->rb_leftmost;
367
368         if (!left)
369                 return NULL;
370
371         return rb_entry(left, struct sched_entity, run_node);
372 }
373
374 static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
375 {
376         struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
377
378         if (!last)
379                 return NULL;
380
381         return rb_entry(last, struct sched_entity, run_node);
382 }
383
384 /**************************************************************
385  * Scheduling class statistics methods:
386  */
387
388 #ifdef CONFIG_SCHED_DEBUG
389 int sched_nr_latency_handler(struct ctl_table *table, int write,
390                 void __user *buffer, size_t *lenp,
391                 loff_t *ppos)
392 {
393         int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
394
395         if (ret || !write)
396                 return ret;
397
398         sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
399                                         sysctl_sched_min_granularity);
400
401         return 0;
402 }
403 #endif
404
405 /*
406  * delta /= w
407  */
408 static inline unsigned long
409 calc_delta_fair(unsigned long delta, struct sched_entity *se)
410 {
411         if (unlikely(se->load.weight != NICE_0_LOAD))
412                 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
413
414         return delta;
415 }
416
417 /*
418  * The idea is to set a period in which each task runs once.
419  *
420  * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
421  * this period because otherwise the slices get too small.
422  *
423  * p = (nr <= nl) ? l : l*nr/nl
424  */
425 static u64 __sched_period(unsigned long nr_running)
426 {
427         u64 period = sysctl_sched_latency;
428         unsigned long nr_latency = sched_nr_latency;
429
430         if (unlikely(nr_running > nr_latency)) {
431                 period = sysctl_sched_min_granularity;
432                 period *= nr_running;
433         }
434
435         return period;
436 }
437
438 /*
439  * We calculate the wall-time slice from the period by taking a part
440  * proportional to the weight.
441  *
442  * s = p*P[w/rw]
443  */
444 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
445 {
446         u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
447
448         for_each_sched_entity(se) {
449                 struct load_weight *load;
450                 struct load_weight lw;
451
452                 cfs_rq = cfs_rq_of(se);
453                 load = &cfs_rq->load;
454
455                 if (unlikely(!se->on_rq)) {
456                         lw = cfs_rq->load;
457
458                         update_load_add(&lw, se->load.weight);
459                         load = &lw;
460                 }
461                 slice = calc_delta_mine(slice, se->load.weight, load);
462         }
463         return slice;
464 }
465
466 /*
467  * We calculate the vruntime slice of a to be inserted task
468  *
469  * vs = s/w
470  */
471 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
472 {
473         return calc_delta_fair(sched_slice(cfs_rq, se), se);
474 }
475
476 /*
477  * Update the current task's runtime statistics. Skip current tasks that
478  * are not in our scheduling class.
479  */
480 static inline void
481 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
482               unsigned long delta_exec)
483 {
484         unsigned long delta_exec_weighted;
485
486         schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
487
488         curr->sum_exec_runtime += delta_exec;
489         schedstat_add(cfs_rq, exec_clock, delta_exec);
490         delta_exec_weighted = calc_delta_fair(delta_exec, curr);
491         curr->vruntime += delta_exec_weighted;
492         update_min_vruntime(cfs_rq);
493 }
494
495 static void update_curr(struct cfs_rq *cfs_rq)
496 {
497         struct sched_entity *curr = cfs_rq->curr;
498         u64 now = rq_of(cfs_rq)->clock;
499         unsigned long delta_exec;
500
501         if (unlikely(!curr))
502                 return;
503
504         /*
505          * Get the amount of time the current task was running
506          * since the last time we changed load (this cannot
507          * overflow on 32 bits):
508          */
509         delta_exec = (unsigned long)(now - curr->exec_start);
510         if (!delta_exec)
511                 return;
512
513         __update_curr(cfs_rq, curr, delta_exec);
514         curr->exec_start = now;
515
516         if (entity_is_task(curr)) {
517                 struct task_struct *curtask = task_of(curr);
518
519                 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
520                 cpuacct_charge(curtask, delta_exec);
521                 account_group_exec_runtime(curtask, delta_exec);
522         }
523 }
524
525 static inline void
526 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
527 {
528         schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
529 }
530
531 /*
532  * Task is being enqueued - update stats:
533  */
534 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
535 {
536         /*
537          * Are we enqueueing a waiting task? (for current tasks
538          * a dequeue/enqueue event is a NOP)
539          */
540         if (se != cfs_rq->curr)
541                 update_stats_wait_start(cfs_rq, se);
542 }
543
544 static void
545 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
546 {
547         schedstat_set(se->wait_max, max(se->wait_max,
548                         rq_of(cfs_rq)->clock - se->wait_start));
549         schedstat_set(se->wait_count, se->wait_count + 1);
550         schedstat_set(se->wait_sum, se->wait_sum +
551                         rq_of(cfs_rq)->clock - se->wait_start);
552 #ifdef CONFIG_SCHEDSTATS
553         if (entity_is_task(se)) {
554                 trace_sched_stat_wait(task_of(se),
555                         rq_of(cfs_rq)->clock - se->wait_start);
556         }
557 #endif
558         schedstat_set(se->wait_start, 0);
559 }
560
561 static inline void
562 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
563 {
564         /*
565          * Mark the end of the wait period if dequeueing a
566          * waiting task:
567          */
568         if (se != cfs_rq->curr)
569                 update_stats_wait_end(cfs_rq, se);
570 }
571
572 /*
573  * We are picking a new current task - update its stats:
574  */
575 static inline void
576 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
577 {
578         /*
579          * We are starting a new run period:
580          */
581         se->exec_start = rq_of(cfs_rq)->clock;
582 }
583
584 /**************************************************
585  * Scheduling class queueing methods:
586  */
587
588 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
589 static void
590 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
591 {
592         cfs_rq->task_weight += weight;
593 }
594 #else
595 static inline void
596 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
597 {
598 }
599 #endif
600
601 static void
602 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
603 {
604         update_load_add(&cfs_rq->load, se->load.weight);
605         if (!parent_entity(se))
606                 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
607         if (entity_is_task(se)) {
608                 add_cfs_task_weight(cfs_rq, se->load.weight);
609                 list_add(&se->group_node, &cfs_rq->tasks);
610         }
611         cfs_rq->nr_running++;
612         se->on_rq = 1;
613 }
614
615 static void
616 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
617 {
618         update_load_sub(&cfs_rq->load, se->load.weight);
619         if (!parent_entity(se))
620                 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
621         if (entity_is_task(se)) {
622                 add_cfs_task_weight(cfs_rq, -se->load.weight);
623                 list_del_init(&se->group_node);
624         }
625         cfs_rq->nr_running--;
626         se->on_rq = 0;
627 }
628
629 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
630 {
631 #ifdef CONFIG_SCHEDSTATS
632         struct task_struct *tsk = NULL;
633
634         if (entity_is_task(se))
635                 tsk = task_of(se);
636
637         if (se->sleep_start) {
638                 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
639
640                 if ((s64)delta < 0)
641                         delta = 0;
642
643                 if (unlikely(delta > se->sleep_max))
644                         se->sleep_max = delta;
645
646                 se->sleep_start = 0;
647                 se->sum_sleep_runtime += delta;
648
649                 if (tsk) {
650                         account_scheduler_latency(tsk, delta >> 10, 1);
651                         trace_sched_stat_sleep(tsk, delta);
652                 }
653         }
654         if (se->block_start) {
655                 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
656
657                 if ((s64)delta < 0)
658                         delta = 0;
659
660                 if (unlikely(delta > se->block_max))
661                         se->block_max = delta;
662
663                 se->block_start = 0;
664                 se->sum_sleep_runtime += delta;
665
666                 if (tsk) {
667                         if (tsk->in_iowait) {
668                                 se->iowait_sum += delta;
669                                 se->iowait_count++;
670                                 trace_sched_stat_iowait(tsk, delta);
671                         }
672
673                         /*
674                          * Blocking time is in units of nanosecs, so shift by
675                          * 20 to get a milliseconds-range estimation of the
676                          * amount of time that the task spent sleeping:
677                          */
678                         if (unlikely(prof_on == SLEEP_PROFILING)) {
679                                 profile_hits(SLEEP_PROFILING,
680                                                 (void *)get_wchan(tsk),
681                                                 delta >> 20);
682                         }
683                         account_scheduler_latency(tsk, delta >> 10, 0);
684                 }
685         }
686 #endif
687 }
688
689 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
690 {
691 #ifdef CONFIG_SCHED_DEBUG
692         s64 d = se->vruntime - cfs_rq->min_vruntime;
693
694         if (d < 0)
695                 d = -d;
696
697         if (d > 3*sysctl_sched_latency)
698                 schedstat_inc(cfs_rq, nr_spread_over);
699 #endif
700 }
701
702 static void
703 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
704 {
705         u64 vruntime = cfs_rq->min_vruntime;
706
707         /*
708          * The 'current' period is already promised to the current tasks,
709          * however the extra weight of the new task will slow them down a
710          * little, place the new task so that it fits in the slot that
711          * stays open at the end.
712          */
713         if (initial && sched_feat(START_DEBIT))
714                 vruntime += sched_vslice(cfs_rq, se);
715
716         /* sleeps up to a single latency don't count. */
717         if (!initial && sched_feat(FAIR_SLEEPERS)) {
718                 unsigned long thresh = sysctl_sched_latency;
719
720                 /*
721                  * Convert the sleeper threshold into virtual time.
722                  * SCHED_IDLE is a special sub-class.  We care about
723                  * fairness only relative to other SCHED_IDLE tasks,
724                  * all of which have the same weight.
725                  */
726                 if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
727                                  task_of(se)->policy != SCHED_IDLE))
728                         thresh = calc_delta_fair(thresh, se);
729
730                 /*
731                  * Halve their sleep time's effect, to allow
732                  * for a gentler effect of sleepers:
733                  */
734                 if (sched_feat(GENTLE_FAIR_SLEEPERS))
735                         thresh >>= 1;
736
737                 vruntime -= thresh;
738         }
739
740         /* ensure we never gain time by being placed backwards. */
741         vruntime = max_vruntime(se->vruntime, vruntime);
742
743         se->vruntime = vruntime;
744 }
745
746 static void
747 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
748 {
749         /*
750          * Update run-time statistics of the 'current'.
751          */
752         update_curr(cfs_rq);
753         account_entity_enqueue(cfs_rq, se);
754
755         if (wakeup) {
756                 place_entity(cfs_rq, se, 0);
757                 enqueue_sleeper(cfs_rq, se);
758         }
759
760         update_stats_enqueue(cfs_rq, se);
761         check_spread(cfs_rq, se);
762         if (se != cfs_rq->curr)
763                 __enqueue_entity(cfs_rq, se);
764 }
765
766 static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
767 {
768         if (!se || cfs_rq->last == se)
769                 cfs_rq->last = NULL;
770
771         if (!se || cfs_rq->next == se)
772                 cfs_rq->next = NULL;
773 }
774
775 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
776 {
777         for_each_sched_entity(se)
778                 __clear_buddies(cfs_rq_of(se), se);
779 }
780
781 static void
782 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
783 {
784         /*
785          * Update run-time statistics of the 'current'.
786          */
787         update_curr(cfs_rq);
788
789         update_stats_dequeue(cfs_rq, se);
790         if (sleep) {
791 #ifdef CONFIG_SCHEDSTATS
792                 if (entity_is_task(se)) {
793                         struct task_struct *tsk = task_of(se);
794
795                         if (tsk->state & TASK_INTERRUPTIBLE)
796                                 se->sleep_start = rq_of(cfs_rq)->clock;
797                         if (tsk->state & TASK_UNINTERRUPTIBLE)
798                                 se->block_start = rq_of(cfs_rq)->clock;
799                 }
800 #endif
801         }
802
803         clear_buddies(cfs_rq, se);
804
805         if (se != cfs_rq->curr)
806                 __dequeue_entity(cfs_rq, se);
807         account_entity_dequeue(cfs_rq, se);
808         update_min_vruntime(cfs_rq);
809 }
810
811 /*
812  * Preempt the current task with a newly woken task if needed:
813  */
814 static void
815 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
816 {
817         unsigned long ideal_runtime, delta_exec;
818
819         ideal_runtime = sched_slice(cfs_rq, curr);
820         delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
821         if (delta_exec > ideal_runtime) {
822                 resched_task(rq_of(cfs_rq)->curr);
823                 /*
824                  * The current task ran long enough, ensure it doesn't get
825                  * re-elected due to buddy favours.
826                  */
827                 clear_buddies(cfs_rq, curr);
828                 return;
829         }
830
831         /*
832          * Ensure that a task that missed wakeup preemption by a
833          * narrow margin doesn't have to wait for a full slice.
834          * This also mitigates buddy induced latencies under load.
835          */
836         if (!sched_feat(WAKEUP_PREEMPT))
837                 return;
838
839         if (delta_exec < sysctl_sched_min_granularity)
840                 return;
841
842         if (cfs_rq->nr_running > 1) {
843                 struct sched_entity *se = __pick_next_entity(cfs_rq);
844                 s64 delta = curr->vruntime - se->vruntime;
845
846                 if (delta > ideal_runtime)
847                         resched_task(rq_of(cfs_rq)->curr);
848         }
849 }
850
851 static void
852 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
853 {
854         /* 'current' is not kept within the tree. */
855         if (se->on_rq) {
856                 /*
857                  * Any task has to be enqueued before it get to execute on
858                  * a CPU. So account for the time it spent waiting on the
859                  * runqueue.
860                  */
861                 update_stats_wait_end(cfs_rq, se);
862                 __dequeue_entity(cfs_rq, se);
863         }
864
865         update_stats_curr_start(cfs_rq, se);
866         cfs_rq->curr = se;
867 #ifdef CONFIG_SCHEDSTATS
868         /*
869          * Track our maximum slice length, if the CPU's load is at
870          * least twice that of our own weight (i.e. dont track it
871          * when there are only lesser-weight tasks around):
872          */
873         if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
874                 se->slice_max = max(se->slice_max,
875                         se->sum_exec_runtime - se->prev_sum_exec_runtime);
876         }
877 #endif
878         se->prev_sum_exec_runtime = se->sum_exec_runtime;
879 }
880
881 static int
882 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
883
884 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
885 {
886         struct sched_entity *se = __pick_next_entity(cfs_rq);
887         struct sched_entity *left = se;
888
889         if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
890                 se = cfs_rq->next;
891
892         /*
893          * Prefer last buddy, try to return the CPU to a preempted task.
894          */
895         if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
896                 se = cfs_rq->last;
897
898         clear_buddies(cfs_rq, se);
899
900         return se;
901 }
902
903 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
904 {
905         /*
906          * If still on the runqueue then deactivate_task()
907          * was not called and update_curr() has to be done:
908          */
909         if (prev->on_rq)
910                 update_curr(cfs_rq);
911
912         check_spread(cfs_rq, prev);
913         if (prev->on_rq) {
914                 update_stats_wait_start(cfs_rq, prev);
915                 /* Put 'current' back into the tree. */
916                 __enqueue_entity(cfs_rq, prev);
917         }
918         cfs_rq->curr = NULL;
919 }
920
921 static void
922 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
923 {
924         /*
925          * Update run-time statistics of the 'current'.
926          */
927         update_curr(cfs_rq);
928
929 #ifdef CONFIG_SCHED_HRTICK
930         /*
931          * queued ticks are scheduled to match the slice, so don't bother
932          * validating it and just reschedule.
933          */
934         if (queued) {
935                 resched_task(rq_of(cfs_rq)->curr);
936                 return;
937         }
938         /*
939          * don't let the period tick interfere with the hrtick preemption
940          */
941         if (!sched_feat(DOUBLE_TICK) &&
942                         hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
943                 return;
944 #endif
945
946         if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
947                 check_preempt_tick(cfs_rq, curr);
948 }
949
950 /**************************************************
951  * CFS operations on tasks:
952  */
953
954 #ifdef CONFIG_SCHED_HRTICK
955 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
956 {
957         struct sched_entity *se = &p->se;
958         struct cfs_rq *cfs_rq = cfs_rq_of(se);
959
960         WARN_ON(task_rq(p) != rq);
961
962         if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
963                 u64 slice = sched_slice(cfs_rq, se);
964                 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
965                 s64 delta = slice - ran;
966
967                 if (delta < 0) {
968                         if (rq->curr == p)
969                                 resched_task(p);
970                         return;
971                 }
972
973                 /*
974                  * Don't schedule slices shorter than 10000ns, that just
975                  * doesn't make sense. Rely on vruntime for fairness.
976                  */
977                 if (rq->curr != p)
978                         delta = max_t(s64, 10000LL, delta);
979
980                 hrtick_start(rq, delta);
981         }
982 }
983
984 /*
985  * called from enqueue/dequeue and updates the hrtick when the
986  * current task is from our class and nr_running is low enough
987  * to matter.
988  */
989 static void hrtick_update(struct rq *rq)
990 {
991         struct task_struct *curr = rq->curr;
992
993         if (curr->sched_class != &fair_sched_class)
994                 return;
995
996         if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
997                 hrtick_start_fair(rq, curr);
998 }
999 #else /* !CONFIG_SCHED_HRTICK */
1000 static inline void
1001 hrtick_start_fair(struct rq *rq, struct task_struct *p)
1002 {
1003 }
1004
1005 static inline void hrtick_update(struct rq *rq)
1006 {
1007 }
1008 #endif
1009
1010 /*
1011  * The enqueue_task method is called before nr_running is
1012  * increased. Here we update the fair scheduling stats and
1013  * then put the task into the rbtree:
1014  */
1015 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
1016 {
1017         struct cfs_rq *cfs_rq;
1018         struct sched_entity *se = &p->se;
1019
1020         for_each_sched_entity(se) {
1021                 if (se->on_rq)
1022                         break;
1023                 cfs_rq = cfs_rq_of(se);
1024                 enqueue_entity(cfs_rq, se, wakeup);
1025                 wakeup = 1;
1026         }
1027
1028         hrtick_update(rq);
1029 }
1030
1031 /*
1032  * The dequeue_task method is called before nr_running is
1033  * decreased. We remove the task from the rbtree and
1034  * update the fair scheduling stats:
1035  */
1036 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
1037 {
1038         struct cfs_rq *cfs_rq;
1039         struct sched_entity *se = &p->se;
1040
1041         for_each_sched_entity(se) {
1042                 cfs_rq = cfs_rq_of(se);
1043                 dequeue_entity(cfs_rq, se, sleep);
1044                 /* Don't dequeue parent if it has other entities besides us */
1045                 if (cfs_rq->load.weight)
1046                         break;
1047                 sleep = 1;
1048         }
1049
1050         hrtick_update(rq);
1051 }
1052
1053 /*
1054  * sched_yield() support is very simple - we dequeue and enqueue.
1055  *
1056  * If compat_yield is turned on then we requeue to the end of the tree.
1057  */
1058 static void yield_task_fair(struct rq *rq)
1059 {
1060         struct task_struct *curr = rq->curr;
1061         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1062         struct sched_entity *rightmost, *se = &curr->se;
1063
1064         /*
1065          * Are we the only task in the tree?
1066          */
1067         if (unlikely(cfs_rq->nr_running == 1))
1068                 return;
1069
1070         clear_buddies(cfs_rq, se);
1071
1072         if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
1073                 update_rq_clock(rq);
1074                 /*
1075                  * Update run-time statistics of the 'current'.
1076                  */
1077                 update_curr(cfs_rq);
1078
1079                 return;
1080         }
1081         /*
1082          * Find the rightmost entry in the rbtree:
1083          */
1084         rightmost = __pick_last_entity(cfs_rq);
1085         /*
1086          * Already in the rightmost position?
1087          */
1088         if (unlikely(!rightmost || entity_before(rightmost, se)))
1089                 return;
1090
1091         /*
1092          * Minimally necessary key value to be last in the tree:
1093          * Upon rescheduling, sched_class::put_prev_task() will place
1094          * 'current' within the tree based on its new key value.
1095          */
1096         se->vruntime = rightmost->vruntime + 1;
1097 }
1098
1099 #ifdef CONFIG_SMP
1100
1101 #ifdef CONFIG_FAIR_GROUP_SCHED
1102 /*
1103  * effective_load() calculates the load change as seen from the root_task_group
1104  *
1105  * Adding load to a group doesn't make a group heavier, but can cause movement
1106  * of group shares between cpus. Assuming the shares were perfectly aligned one
1107  * can calculate the shift in shares.
1108  *
1109  * The problem is that perfectly aligning the shares is rather expensive, hence
1110  * we try to avoid doing that too often - see update_shares(), which ratelimits
1111  * this change.
1112  *
1113  * We compensate this by not only taking the current delta into account, but
1114  * also considering the delta between when the shares were last adjusted and
1115  * now.
1116  *
1117  * We still saw a performance dip, some tracing learned us that between
1118  * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
1119  * significantly. Therefore try to bias the error in direction of failing
1120  * the affine wakeup.
1121  *
1122  */
1123 static long effective_load(struct task_group *tg, int cpu,
1124                 long wl, long wg)
1125 {
1126         struct sched_entity *se = tg->se[cpu];
1127
1128         if (!tg->parent)
1129                 return wl;
1130
1131         /*
1132          * By not taking the decrease of shares on the other cpu into
1133          * account our error leans towards reducing the affine wakeups.
1134          */
1135         if (!wl && sched_feat(ASYM_EFF_LOAD))
1136                 return wl;
1137
1138         for_each_sched_entity(se) {
1139                 long S, rw, s, a, b;
1140                 long more_w;
1141
1142                 /*
1143                  * Instead of using this increment, also add the difference
1144                  * between when the shares were last updated and now.
1145                  */
1146                 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1147                 wl += more_w;
1148                 wg += more_w;
1149
1150                 S = se->my_q->tg->shares;
1151                 s = se->my_q->shares;
1152                 rw = se->my_q->rq_weight;
1153
1154                 a = S*(rw + wl);
1155                 b = S*rw + s*wg;
1156
1157                 wl = s*(a-b);
1158
1159                 if (likely(b))
1160                         wl /= b;
1161
1162                 /*
1163                  * Assume the group is already running and will
1164                  * thus already be accounted for in the weight.
1165                  *
1166                  * That is, moving shares between CPUs, does not
1167                  * alter the group weight.
1168                  */
1169                 wg = 0;
1170         }
1171
1172         return wl;
1173 }
1174
1175 #else
1176
1177 static inline unsigned long effective_load(struct task_group *tg, int cpu,
1178                 unsigned long wl, unsigned long wg)
1179 {
1180         return wl;
1181 }
1182
1183 #endif
1184
1185 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1186 {
1187         struct task_struct *curr = current;
1188         unsigned long this_load, load;
1189         int idx, this_cpu, prev_cpu;
1190         unsigned long tl_per_task;
1191         unsigned int imbalance;
1192         struct task_group *tg;
1193         unsigned long weight;
1194         int balanced;
1195
1196         idx       = sd->wake_idx;
1197         this_cpu  = smp_processor_id();
1198         prev_cpu  = task_cpu(p);
1199         load      = source_load(prev_cpu, idx);
1200         this_load = target_load(this_cpu, idx);
1201
1202         if (sync) {
1203                if (sched_feat(SYNC_LESS) &&
1204                    (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1205                     p->se.avg_overlap > sysctl_sched_migration_cost))
1206                        sync = 0;
1207         } else {
1208                 if (sched_feat(SYNC_MORE) &&
1209                     (curr->se.avg_overlap < sysctl_sched_migration_cost &&
1210                      p->se.avg_overlap < sysctl_sched_migration_cost))
1211                         sync = 1;
1212         }
1213
1214         /*
1215          * If sync wakeup then subtract the (maximum possible)
1216          * effect of the currently running task from the load
1217          * of the current CPU:
1218          */
1219         if (sync) {
1220                 tg = task_group(current);
1221                 weight = current->se.load.weight;
1222
1223                 this_load += effective_load(tg, this_cpu, -weight, -weight);
1224                 load += effective_load(tg, prev_cpu, 0, -weight);
1225         }
1226
1227         tg = task_group(p);
1228         weight = p->se.load.weight;
1229
1230         imbalance = 100 + (sd->imbalance_pct - 100) / 2;
1231
1232         /*
1233          * In low-load situations, where prev_cpu is idle and this_cpu is idle
1234          * due to the sync cause above having dropped this_load to 0, we'll
1235          * always have an imbalance, but there's really nothing you can do
1236          * about that, so that's good too.
1237          *
1238          * Otherwise check if either cpus are near enough in load to allow this
1239          * task to be woken on this_cpu.
1240          */
1241         balanced = !this_load ||
1242                 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
1243                 imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
1244
1245         /*
1246          * If the currently running task will sleep within
1247          * a reasonable amount of time then attract this newly
1248          * woken task:
1249          */
1250         if (sync && balanced)
1251                 return 1;
1252
1253         schedstat_inc(p, se.nr_wakeups_affine_attempts);
1254         tl_per_task = cpu_avg_load_per_task(this_cpu);
1255
1256         if (balanced ||
1257             (this_load <= load &&
1258              this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
1259                 /*
1260                  * This domain has SD_WAKE_AFFINE and
1261                  * p is cache cold in this domain, and
1262                  * there is no bad imbalance.
1263                  */
1264                 schedstat_inc(sd, ttwu_move_affine);
1265                 schedstat_inc(p, se.nr_wakeups_affine);
1266
1267                 return 1;
1268         }
1269         return 0;
1270 }
1271
1272 /*
1273  * find_idlest_group finds and returns the least busy CPU group within the
1274  * domain.
1275  */
1276 static struct sched_group *
1277 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1278                   int this_cpu, int load_idx)
1279 {
1280         struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1281         unsigned long min_load = ULONG_MAX, this_load = 0;
1282         int imbalance = 100 + (sd->imbalance_pct-100)/2;
1283
1284         do {
1285                 unsigned long load, avg_load;
1286                 int local_group;
1287                 int i;
1288
1289                 /* Skip over this group if it has no CPUs allowed */
1290                 if (!cpumask_intersects(sched_group_cpus(group),
1291                                         &p->cpus_allowed))
1292                         continue;
1293
1294                 local_group = cpumask_test_cpu(this_cpu,
1295                                                sched_group_cpus(group));
1296
1297                 /* Tally up the load of all CPUs in the group */
1298                 avg_load = 0;
1299
1300                 for_each_cpu(i, sched_group_cpus(group)) {
1301                         /* Bias balancing toward cpus of our domain */
1302                         if (local_group)
1303                                 load = source_load(i, load_idx);
1304                         else
1305                                 load = target_load(i, load_idx);
1306
1307                         avg_load += load;
1308                 }
1309
1310                 /* Adjust by relative CPU power of the group */
1311                 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1312
1313                 if (local_group) {
1314                         this_load = avg_load;
1315                         this = group;
1316                 } else if (avg_load < min_load) {
1317                         min_load = avg_load;
1318                         idlest = group;
1319                 }
1320         } while (group = group->next, group != sd->groups);
1321
1322         if (!idlest || 100*this_load < imbalance*min_load)
1323                 return NULL;
1324         return idlest;
1325 }
1326
1327 /*
1328  * find_idlest_cpu - find the idlest cpu among the cpus in group.
1329  */
1330 static int
1331 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1332 {
1333         unsigned long load, min_load = ULONG_MAX;
1334         int idlest = -1;
1335         int i;
1336
1337         /* Traverse only the allowed CPUs */
1338         for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1339                 load = weighted_cpuload(i);
1340
1341                 if (load < min_load || (load == min_load && i == this_cpu)) {
1342                         min_load = load;
1343                         idlest = i;
1344                 }
1345         }
1346
1347         return idlest;
1348 }
1349
1350 /*
1351  * sched_balance_self: balance the current task (running on cpu) in domains
1352  * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1353  * SD_BALANCE_EXEC.
1354  *
1355  * Balance, ie. select the least loaded group.
1356  *
1357  * Returns the target CPU number, or the same CPU if no balancing is needed.
1358  *
1359  * preempt must be disabled.
1360  */
1361 static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
1362 {
1363         struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
1364         int cpu = smp_processor_id();
1365         int prev_cpu = task_cpu(p);
1366         int new_cpu = cpu;
1367         int want_affine = 0;
1368         int want_sd = 1;
1369         int sync = wake_flags & WF_SYNC;
1370
1371         if (sd_flag & SD_BALANCE_WAKE) {
1372                 if (sched_feat(AFFINE_WAKEUPS) &&
1373                     cpumask_test_cpu(cpu, &p->cpus_allowed))
1374                         want_affine = 1;
1375                 new_cpu = prev_cpu;
1376         }
1377
1378         rcu_read_lock();
1379         for_each_domain(cpu, tmp) {
1380                 if (!(tmp->flags & SD_LOAD_BALANCE))
1381                         continue;
1382
1383                 /*
1384                  * If power savings logic is enabled for a domain, see if we
1385                  * are not overloaded, if so, don't balance wider.
1386                  */
1387                 if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
1388                         unsigned long power = 0;
1389                         unsigned long nr_running = 0;
1390                         unsigned long capacity;
1391                         int i;
1392
1393                         for_each_cpu(i, sched_domain_span(tmp)) {
1394                                 power += power_of(i);
1395                                 nr_running += cpu_rq(i)->cfs.nr_running;
1396                         }
1397
1398                         capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
1399
1400                         if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1401                                 nr_running /= 2;
1402
1403                         if (nr_running < capacity)
1404                                 want_sd = 0;
1405                 }
1406
1407                 if (want_affine && (tmp->flags & SD_WAKE_AFFINE)) {
1408                         int candidate = -1, i;
1409
1410                         if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
1411                                 candidate = cpu;
1412
1413                         /*
1414                          * Check for an idle shared cache.
1415                          */
1416                         if (tmp->flags & SD_PREFER_SIBLING) {
1417                                 if (candidate == cpu) {
1418                                         if (!cpu_rq(prev_cpu)->cfs.nr_running)
1419                                                 candidate = prev_cpu;
1420                                 }
1421
1422                                 if (candidate == -1 || candidate == cpu) {
1423                                         for_each_cpu(i, sched_domain_span(tmp)) {
1424                                                 if (!cpumask_test_cpu(i, &p->cpus_allowed))
1425                                                         continue;
1426                                                 if (!cpu_rq(i)->cfs.nr_running) {
1427                                                         candidate = i;
1428                                                         break;
1429                                                 }
1430                                         }
1431                                 }
1432                         }
1433
1434                         if (candidate >= 0) {
1435                                 affine_sd = tmp;
1436                                 want_affine = 0;
1437                                 cpu = candidate;
1438                         }
1439                 }
1440
1441                 if (!want_sd && !want_affine)
1442                         break;
1443
1444                 if (!(tmp->flags & sd_flag))
1445                         continue;
1446
1447                 if (want_sd)
1448                         sd = tmp;
1449         }
1450
1451         if (sched_feat(LB_SHARES_UPDATE)) {
1452                 /*
1453                  * Pick the largest domain to update shares over
1454                  */
1455                 tmp = sd;
1456                 if (affine_sd && (!tmp ||
1457                                   cpumask_weight(sched_domain_span(affine_sd)) >
1458                                   cpumask_weight(sched_domain_span(sd))))
1459                         tmp = affine_sd;
1460
1461                 if (tmp)
1462                         update_shares(tmp);
1463         }
1464
1465         if (affine_sd && wake_affine(affine_sd, p, sync)) {
1466                 new_cpu = cpu;
1467                 goto out;
1468         }
1469
1470         while (sd) {
1471                 int load_idx = sd->forkexec_idx;
1472                 struct sched_group *group;
1473                 int weight;
1474
1475                 if (!(sd->flags & sd_flag)) {
1476                         sd = sd->child;
1477                         continue;
1478                 }
1479
1480                 if (sd_flag & SD_BALANCE_WAKE)
1481                         load_idx = sd->wake_idx;
1482
1483                 group = find_idlest_group(sd, p, cpu, load_idx);
1484                 if (!group) {
1485                         sd = sd->child;
1486                         continue;
1487                 }
1488
1489                 new_cpu = find_idlest_cpu(group, p, cpu);
1490                 if (new_cpu == -1 || new_cpu == cpu) {
1491                         /* Now try balancing at a lower domain level of cpu */
1492                         sd = sd->child;
1493                         continue;
1494                 }
1495
1496                 /* Now try balancing at a lower domain level of new_cpu */
1497                 cpu = new_cpu;
1498                 weight = cpumask_weight(sched_domain_span(sd));
1499                 sd = NULL;
1500                 for_each_domain(cpu, tmp) {
1501                         if (weight <= cpumask_weight(sched_domain_span(tmp)))
1502                                 break;
1503                         if (tmp->flags & sd_flag)
1504                                 sd = tmp;
1505                 }
1506                 /* while loop will break here if sd == NULL */
1507         }
1508
1509 out:
1510         rcu_read_unlock();
1511         return new_cpu;
1512 }
1513 #endif /* CONFIG_SMP */
1514
1515 /*
1516  * Adaptive granularity
1517  *
1518  * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1519  * with the limit of wakeup_gran -- when it never does a wakeup.
1520  *
1521  * So the smaller avg_wakeup is the faster we want this task to preempt,
1522  * but we don't want to treat the preemptee unfairly and therefore allow it
1523  * to run for at least the amount of time we'd like to run.
1524  *
1525  * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1526  *
1527  * NOTE: we use *nr_running to scale with load, this nicely matches the
1528  *       degrading latency on load.
1529  */
1530 static unsigned long
1531 adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1532 {
1533         u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1534         u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1535         u64 gran = 0;
1536
1537         if (this_run < expected_wakeup)
1538                 gran = expected_wakeup - this_run;
1539
1540         return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1541 }
1542
1543 static unsigned long
1544 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1545 {
1546         unsigned long gran = sysctl_sched_wakeup_granularity;
1547
1548         if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1549                 gran = adaptive_gran(curr, se);
1550
1551         /*
1552          * Since its curr running now, convert the gran from real-time
1553          * to virtual-time in his units.
1554          */
1555         if (sched_feat(ASYM_GRAN)) {
1556                 /*
1557                  * By using 'se' instead of 'curr' we penalize light tasks, so
1558                  * they get preempted easier. That is, if 'se' < 'curr' then
1559                  * the resulting gran will be larger, therefore penalizing the
1560                  * lighter, if otoh 'se' > 'curr' then the resulting gran will
1561                  * be smaller, again penalizing the lighter task.
1562                  *
1563                  * This is especially important for buddies when the leftmost
1564                  * task is higher priority than the buddy.
1565                  */
1566                 if (unlikely(se->load.weight != NICE_0_LOAD))
1567                         gran = calc_delta_fair(gran, se);
1568         } else {
1569                 if (unlikely(curr->load.weight != NICE_0_LOAD))
1570                         gran = calc_delta_fair(gran, curr);
1571         }
1572
1573         return gran;
1574 }
1575
1576 /*
1577  * Should 'se' preempt 'curr'.
1578  *
1579  *             |s1
1580  *        |s2
1581  *   |s3
1582  *         g
1583  *      |<--->|c
1584  *
1585  *  w(c, s1) = -1
1586  *  w(c, s2) =  0
1587  *  w(c, s3) =  1
1588  *
1589  */
1590 static int
1591 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1592 {
1593         s64 gran, vdiff = curr->vruntime - se->vruntime;
1594
1595         if (vdiff <= 0)
1596                 return -1;
1597
1598         gran = wakeup_gran(curr, se);
1599         if (vdiff > gran)
1600                 return 1;
1601
1602         return 0;
1603 }
1604
1605 static void set_last_buddy(struct sched_entity *se)
1606 {
1607         if (likely(task_of(se)->policy != SCHED_IDLE)) {
1608                 for_each_sched_entity(se)
1609                         cfs_rq_of(se)->last = se;
1610         }
1611 }
1612
1613 static void set_next_buddy(struct sched_entity *se)
1614 {
1615         if (likely(task_of(se)->policy != SCHED_IDLE)) {
1616                 for_each_sched_entity(se)
1617                         cfs_rq_of(se)->next = se;
1618         }
1619 }
1620
1621 /*
1622  * Preempt the current task with a newly woken task if needed:
1623  */
1624 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1625 {
1626         struct task_struct *curr = rq->curr;
1627         struct sched_entity *se = &curr->se, *pse = &p->se;
1628         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1629         int sync = wake_flags & WF_SYNC;
1630         int scale = cfs_rq->nr_running >= sched_nr_latency;
1631
1632         update_curr(cfs_rq);
1633
1634         if (unlikely(rt_prio(p->prio))) {
1635                 resched_task(curr);
1636                 return;
1637         }
1638
1639         if (unlikely(p->sched_class != &fair_sched_class))
1640                 return;
1641
1642         if (unlikely(se == pse))
1643                 return;
1644
1645         if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
1646                 set_next_buddy(pse);
1647
1648         /*
1649          * We can come here with TIF_NEED_RESCHED already set from new task
1650          * wake up path.
1651          */
1652         if (test_tsk_need_resched(curr))
1653                 return;
1654
1655         /*
1656          * Batch and idle tasks do not preempt (their preemption is driven by
1657          * the tick):
1658          */
1659         if (unlikely(p->policy != SCHED_NORMAL))
1660                 return;
1661
1662         /* Idle tasks are by definition preempted by everybody. */
1663         if (unlikely(curr->policy == SCHED_IDLE)) {
1664                 resched_task(curr);
1665                 return;
1666         }
1667
1668         if ((sched_feat(WAKEUP_SYNC) && sync) ||
1669             (sched_feat(WAKEUP_OVERLAP) &&
1670              (se->avg_overlap < sysctl_sched_migration_cost &&
1671               pse->avg_overlap < sysctl_sched_migration_cost))) {
1672                 resched_task(curr);
1673                 return;
1674         }
1675
1676         if (sched_feat(WAKEUP_RUNNING)) {
1677                 if (pse->avg_running < se->avg_running) {
1678                         set_next_buddy(pse);
1679                         resched_task(curr);
1680                         return;
1681                 }
1682         }
1683
1684         if (!sched_feat(WAKEUP_PREEMPT))
1685                 return;
1686
1687         find_matching_se(&se, &pse);
1688
1689         BUG_ON(!pse);
1690
1691         if (wakeup_preempt_entity(se, pse) == 1) {
1692                 resched_task(curr);
1693                 /*
1694                  * Only set the backward buddy when the current task is still
1695                  * on the rq. This can happen when a wakeup gets interleaved
1696                  * with schedule on the ->pre_schedule() or idle_balance()
1697                  * point, either of which can * drop the rq lock.
1698                  *
1699                  * Also, during early boot the idle thread is in the fair class,
1700                  * for obvious reasons its a bad idea to schedule back to it.
1701                  */
1702                 if (unlikely(!se->on_rq || curr == rq->idle))
1703                         return;
1704                 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1705                         set_last_buddy(se);
1706         }
1707 }
1708
1709 static struct task_struct *pick_next_task_fair(struct rq *rq)
1710 {
1711         struct task_struct *p;
1712         struct cfs_rq *cfs_rq = &rq->cfs;
1713         struct sched_entity *se;
1714
1715         if (unlikely(!cfs_rq->nr_running))
1716                 return NULL;
1717
1718         do {
1719                 se = pick_next_entity(cfs_rq);
1720                 set_next_entity(cfs_rq, se);
1721                 cfs_rq = group_cfs_rq(se);
1722         } while (cfs_rq);
1723
1724         p = task_of(se);
1725         hrtick_start_fair(rq, p);
1726
1727         return p;
1728 }
1729
1730 /*
1731  * Account for a descheduled task:
1732  */
1733 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1734 {
1735         struct sched_entity *se = &prev->se;
1736         struct cfs_rq *cfs_rq;
1737
1738         for_each_sched_entity(se) {
1739                 cfs_rq = cfs_rq_of(se);
1740                 put_prev_entity(cfs_rq, se);
1741         }
1742 }
1743
1744 #ifdef CONFIG_SMP
1745 /**************************************************
1746  * Fair scheduling class load-balancing methods:
1747  */
1748
1749 /*
1750  * Load-balancing iterator. Note: while the runqueue stays locked
1751  * during the whole iteration, the current task might be
1752  * dequeued so the iterator has to be dequeue-safe. Here we
1753  * achieve that by always pre-iterating before returning
1754  * the current task:
1755  */
1756 static struct task_struct *
1757 __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
1758 {
1759         struct task_struct *p = NULL;
1760         struct sched_entity *se;
1761
1762         if (next == &cfs_rq->tasks)
1763                 return NULL;
1764
1765         se = list_entry(next, struct sched_entity, group_node);
1766         p = task_of(se);
1767         cfs_rq->balance_iterator = next->next;
1768
1769         return p;
1770 }
1771
1772 static struct task_struct *load_balance_start_fair(void *arg)
1773 {
1774         struct cfs_rq *cfs_rq = arg;
1775
1776         return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
1777 }
1778
1779 static struct task_struct *load_balance_next_fair(void *arg)
1780 {
1781         struct cfs_rq *cfs_rq = arg;
1782
1783         return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1784 }
1785
1786 static unsigned long
1787 __load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1788                 unsigned long max_load_move, struct sched_domain *sd,
1789                 enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
1790                 struct cfs_rq *cfs_rq)
1791 {
1792         struct rq_iterator cfs_rq_iterator;
1793
1794         cfs_rq_iterator.start = load_balance_start_fair;
1795         cfs_rq_iterator.next = load_balance_next_fair;
1796         cfs_rq_iterator.arg = cfs_rq;
1797
1798         return balance_tasks(this_rq, this_cpu, busiest,
1799                         max_load_move, sd, idle, all_pinned,
1800                         this_best_prio, &cfs_rq_iterator);
1801 }
1802
1803 #ifdef CONFIG_FAIR_GROUP_SCHED
1804 static unsigned long
1805 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1806                   unsigned long max_load_move,
1807                   struct sched_domain *sd, enum cpu_idle_type idle,
1808                   int *all_pinned, int *this_best_prio)
1809 {
1810         long rem_load_move = max_load_move;
1811         int busiest_cpu = cpu_of(busiest);
1812         struct task_group *tg;
1813
1814         rcu_read_lock();
1815         update_h_load(busiest_cpu);
1816
1817         list_for_each_entry_rcu(tg, &task_groups, list) {
1818                 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1819                 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1820                 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1821                 u64 rem_load, moved_load;
1822
1823                 /*
1824                  * empty group
1825                  */
1826                 if (!busiest_cfs_rq->task_weight)
1827                         continue;
1828
1829                 rem_load = (u64)rem_load_move * busiest_weight;
1830                 rem_load = div_u64(rem_load, busiest_h_load + 1);
1831
1832                 moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1833                                 rem_load, sd, idle, all_pinned, this_best_prio,
1834                                 tg->cfs_rq[busiest_cpu]);
1835
1836                 if (!moved_load)
1837                         continue;
1838
1839                 moved_load *= busiest_h_load;
1840                 moved_load = div_u64(moved_load, busiest_weight + 1);
1841
1842                 rem_load_move -= moved_load;
1843                 if (rem_load_move < 0)
1844                         break;
1845         }
1846         rcu_read_unlock();
1847
1848         return max_load_move - rem_load_move;
1849 }
1850 #else
1851 static unsigned long
1852 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1853                   unsigned long max_load_move,
1854                   struct sched_domain *sd, enum cpu_idle_type idle,
1855                   int *all_pinned, int *this_best_prio)
1856 {
1857         return __load_balance_fair(this_rq, this_cpu, busiest,
1858                         max_load_move, sd, idle, all_pinned,
1859                         this_best_prio, &busiest->cfs);
1860 }
1861 #endif
1862
1863 static int
1864 move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1865                    struct sched_domain *sd, enum cpu_idle_type idle)
1866 {
1867         struct cfs_rq *busy_cfs_rq;
1868         struct rq_iterator cfs_rq_iterator;
1869
1870         cfs_rq_iterator.start = load_balance_start_fair;
1871         cfs_rq_iterator.next = load_balance_next_fair;
1872
1873         for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1874                 /*
1875                  * pass busy_cfs_rq argument into
1876                  * load_balance_[start|next]_fair iterators
1877                  */
1878                 cfs_rq_iterator.arg = busy_cfs_rq;
1879                 if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1880                                        &cfs_rq_iterator))
1881                     return 1;
1882         }
1883
1884         return 0;
1885 }
1886
1887 static void rq_online_fair(struct rq *rq)
1888 {
1889         update_sysctl();
1890 }
1891
1892 static void rq_offline_fair(struct rq *rq)
1893 {
1894         update_sysctl();
1895 }
1896
1897 #endif /* CONFIG_SMP */
1898
1899 /*
1900  * scheduler tick hitting a task of our scheduling class:
1901  */
1902 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1903 {
1904         struct cfs_rq *cfs_rq;
1905         struct sched_entity *se = &curr->se;
1906
1907         for_each_sched_entity(se) {
1908                 cfs_rq = cfs_rq_of(se);
1909                 entity_tick(cfs_rq, se, queued);
1910         }
1911 }
1912
1913 /*
1914  * Share the fairness runtime between parent and child, thus the
1915  * total amount of pressure for CPU stays equal - new tasks
1916  * get a chance to run but frequent forkers are not allowed to
1917  * monopolize the CPU. Note: the parent runqueue is locked,
1918  * the child is not running yet.
1919  */
1920 static void task_new_fair(struct rq *rq, struct task_struct *p)
1921 {
1922         struct cfs_rq *cfs_rq = task_cfs_rq(p);
1923         struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
1924         int this_cpu = smp_processor_id();
1925
1926         sched_info_queued(p);
1927
1928         update_curr(cfs_rq);
1929         if (curr)
1930                 se->vruntime = curr->vruntime;
1931         place_entity(cfs_rq, se, 1);
1932
1933         /* 'curr' will be NULL if the child belongs to a different group */
1934         if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
1935                         curr && entity_before(curr, se)) {
1936                 /*
1937                  * Upon rescheduling, sched_class::put_prev_task() will place
1938                  * 'current' within the tree based on its new key value.
1939                  */
1940                 swap(curr->vruntime, se->vruntime);
1941                 resched_task(rq->curr);
1942         }
1943
1944         enqueue_task_fair(rq, p, 0);
1945 }
1946
1947 /*
1948  * Priority of the task has changed. Check to see if we preempt
1949  * the current task.
1950  */
1951 static void prio_changed_fair(struct rq *rq, struct task_struct *p,
1952                               int oldprio, int running)
1953 {
1954         /*
1955          * Reschedule if we are currently running on this runqueue and
1956          * our priority decreased, or if we are not currently running on
1957          * this runqueue and our priority is higher than the current's
1958          */
1959         if (running) {
1960                 if (p->prio > oldprio)
1961                         resched_task(rq->curr);
1962         } else
1963                 check_preempt_curr(rq, p, 0);
1964 }
1965
1966 /*
1967  * We switched to the sched_fair class.
1968  */
1969 static void switched_to_fair(struct rq *rq, struct task_struct *p,
1970                              int running)
1971 {
1972         /*
1973          * We were most likely switched from sched_rt, so
1974          * kick off the schedule if running, otherwise just see
1975          * if we can still preempt the current task.
1976          */
1977         if (running)
1978                 resched_task(rq->curr);
1979         else
1980                 check_preempt_curr(rq, p, 0);
1981 }
1982
1983 /* Account for a task changing its policy or group.
1984  *
1985  * This routine is mostly called to set cfs_rq->curr field when a task
1986  * migrates between groups/classes.
1987  */
1988 static void set_curr_task_fair(struct rq *rq)
1989 {
1990         struct sched_entity *se = &rq->curr->se;
1991
1992         for_each_sched_entity(se)
1993                 set_next_entity(cfs_rq_of(se), se);
1994 }
1995
1996 #ifdef CONFIG_FAIR_GROUP_SCHED
1997 static void moved_group_fair(struct task_struct *p)
1998 {
1999         struct cfs_rq *cfs_rq = task_cfs_rq(p);
2000
2001         update_curr(cfs_rq);
2002         place_entity(cfs_rq, &p->se, 1);
2003 }
2004 #endif
2005
2006 unsigned int get_rr_interval_fair(struct task_struct *task)
2007 {
2008         struct sched_entity *se = &task->se;
2009         unsigned long flags;
2010         struct rq *rq;
2011         unsigned int rr_interval = 0;
2012
2013         /*
2014          * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
2015          * idle runqueue:
2016          */
2017         rq = task_rq_lock(task, &flags);
2018         if (rq->cfs.load.weight)
2019                 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
2020         task_rq_unlock(rq, &flags);
2021
2022         return rr_interval;
2023 }
2024
2025 /*
2026  * All the scheduling class methods:
2027  */
2028 static const struct sched_class fair_sched_class = {
2029         .next                   = &idle_sched_class,
2030         .enqueue_task           = enqueue_task_fair,
2031         .dequeue_task           = dequeue_task_fair,
2032         .yield_task             = yield_task_fair,
2033
2034         .check_preempt_curr     = check_preempt_wakeup,
2035
2036         .pick_next_task         = pick_next_task_fair,
2037         .put_prev_task          = put_prev_task_fair,
2038
2039 #ifdef CONFIG_SMP
2040         .select_task_rq         = select_task_rq_fair,
2041
2042         .load_balance           = load_balance_fair,
2043         .move_one_task          = move_one_task_fair,
2044         .rq_online              = rq_online_fair,
2045         .rq_offline             = rq_offline_fair,
2046 #endif
2047
2048         .set_curr_task          = set_curr_task_fair,
2049         .task_tick              = task_tick_fair,
2050         .task_new               = task_new_fair,
2051
2052         .prio_changed           = prio_changed_fair,
2053         .switched_to            = switched_to_fair,
2054
2055         .get_rr_interval        = get_rr_interval_fair,
2056
2057 #ifdef CONFIG_FAIR_GROUP_SCHED
2058         .moved_group            = moved_group_fair,
2059 #endif
2060 };
2061
2062 #ifdef CONFIG_SCHED_DEBUG
2063 static void print_cfs_stats(struct seq_file *m, int cpu)
2064 {
2065         struct cfs_rq *cfs_rq;
2066
2067         rcu_read_lock();
2068         for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
2069                 print_cfs_rq(m, cpu, cfs_rq);
2070         rcu_read_unlock();
2071 }
2072 #endif