Merge tag 'full-dynticks-cputime-for-mingo' of git://git.kernel.org/pub/scm/linux...
[firefly-linux-kernel-4.4.55.git] / kernel / sched / rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9
10 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
11
12 struct rt_bandwidth def_rt_bandwidth;
13
14 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
15 {
16         struct rt_bandwidth *rt_b =
17                 container_of(timer, struct rt_bandwidth, rt_period_timer);
18         ktime_t now;
19         int overrun;
20         int idle = 0;
21
22         for (;;) {
23                 now = hrtimer_cb_get_time(timer);
24                 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
25
26                 if (!overrun)
27                         break;
28
29                 idle = do_sched_rt_period_timer(rt_b, overrun);
30         }
31
32         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
33 }
34
35 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
36 {
37         rt_b->rt_period = ns_to_ktime(period);
38         rt_b->rt_runtime = runtime;
39
40         raw_spin_lock_init(&rt_b->rt_runtime_lock);
41
42         hrtimer_init(&rt_b->rt_period_timer,
43                         CLOCK_MONOTONIC, HRTIMER_MODE_REL);
44         rt_b->rt_period_timer.function = sched_rt_period_timer;
45 }
46
47 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
48 {
49         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
50                 return;
51
52         if (hrtimer_active(&rt_b->rt_period_timer))
53                 return;
54
55         raw_spin_lock(&rt_b->rt_runtime_lock);
56         start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
57         raw_spin_unlock(&rt_b->rt_runtime_lock);
58 }
59
60 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
61 {
62         struct rt_prio_array *array;
63         int i;
64
65         array = &rt_rq->active;
66         for (i = 0; i < MAX_RT_PRIO; i++) {
67                 INIT_LIST_HEAD(array->queue + i);
68                 __clear_bit(i, array->bitmap);
69         }
70         /* delimiter for bitsearch: */
71         __set_bit(MAX_RT_PRIO, array->bitmap);
72
73 #if defined CONFIG_SMP
74         rt_rq->highest_prio.curr = MAX_RT_PRIO;
75         rt_rq->highest_prio.next = MAX_RT_PRIO;
76         rt_rq->rt_nr_migratory = 0;
77         rt_rq->overloaded = 0;
78         plist_head_init(&rt_rq->pushable_tasks);
79 #endif
80
81         rt_rq->rt_time = 0;
82         rt_rq->rt_throttled = 0;
83         rt_rq->rt_runtime = 0;
84         raw_spin_lock_init(&rt_rq->rt_runtime_lock);
85 }
86
87 #ifdef CONFIG_RT_GROUP_SCHED
88 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
89 {
90         hrtimer_cancel(&rt_b->rt_period_timer);
91 }
92
93 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
94
95 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
96 {
97 #ifdef CONFIG_SCHED_DEBUG
98         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
99 #endif
100         return container_of(rt_se, struct task_struct, rt);
101 }
102
103 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
104 {
105         return rt_rq->rq;
106 }
107
108 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
109 {
110         return rt_se->rt_rq;
111 }
112
113 void free_rt_sched_group(struct task_group *tg)
114 {
115         int i;
116
117         if (tg->rt_se)
118                 destroy_rt_bandwidth(&tg->rt_bandwidth);
119
120         for_each_possible_cpu(i) {
121                 if (tg->rt_rq)
122                         kfree(tg->rt_rq[i]);
123                 if (tg->rt_se)
124                         kfree(tg->rt_se[i]);
125         }
126
127         kfree(tg->rt_rq);
128         kfree(tg->rt_se);
129 }
130
131 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
132                 struct sched_rt_entity *rt_se, int cpu,
133                 struct sched_rt_entity *parent)
134 {
135         struct rq *rq = cpu_rq(cpu);
136
137         rt_rq->highest_prio.curr = MAX_RT_PRIO;
138         rt_rq->rt_nr_boosted = 0;
139         rt_rq->rq = rq;
140         rt_rq->tg = tg;
141
142         tg->rt_rq[cpu] = rt_rq;
143         tg->rt_se[cpu] = rt_se;
144
145         if (!rt_se)
146                 return;
147
148         if (!parent)
149                 rt_se->rt_rq = &rq->rt;
150         else
151                 rt_se->rt_rq = parent->my_q;
152
153         rt_se->my_q = rt_rq;
154         rt_se->parent = parent;
155         INIT_LIST_HEAD(&rt_se->run_list);
156 }
157
158 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
159 {
160         struct rt_rq *rt_rq;
161         struct sched_rt_entity *rt_se;
162         int i;
163
164         tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
165         if (!tg->rt_rq)
166                 goto err;
167         tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
168         if (!tg->rt_se)
169                 goto err;
170
171         init_rt_bandwidth(&tg->rt_bandwidth,
172                         ktime_to_ns(def_rt_bandwidth.rt_period), 0);
173
174         for_each_possible_cpu(i) {
175                 rt_rq = kzalloc_node(sizeof(struct rt_rq),
176                                      GFP_KERNEL, cpu_to_node(i));
177                 if (!rt_rq)
178                         goto err;
179
180                 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
181                                      GFP_KERNEL, cpu_to_node(i));
182                 if (!rt_se)
183                         goto err_free_rq;
184
185                 init_rt_rq(rt_rq, cpu_rq(i));
186                 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
187                 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
188         }
189
190         return 1;
191
192 err_free_rq:
193         kfree(rt_rq);
194 err:
195         return 0;
196 }
197
198 #else /* CONFIG_RT_GROUP_SCHED */
199
200 #define rt_entity_is_task(rt_se) (1)
201
202 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
203 {
204         return container_of(rt_se, struct task_struct, rt);
205 }
206
207 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
208 {
209         return container_of(rt_rq, struct rq, rt);
210 }
211
212 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
213 {
214         struct task_struct *p = rt_task_of(rt_se);
215         struct rq *rq = task_rq(p);
216
217         return &rq->rt;
218 }
219
220 void free_rt_sched_group(struct task_group *tg) { }
221
222 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
223 {
224         return 1;
225 }
226 #endif /* CONFIG_RT_GROUP_SCHED */
227
228 #ifdef CONFIG_SMP
229
230 static inline int rt_overloaded(struct rq *rq)
231 {
232         return atomic_read(&rq->rd->rto_count);
233 }
234
235 static inline void rt_set_overload(struct rq *rq)
236 {
237         if (!rq->online)
238                 return;
239
240         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
241         /*
242          * Make sure the mask is visible before we set
243          * the overload count. That is checked to determine
244          * if we should look at the mask. It would be a shame
245          * if we looked at the mask, but the mask was not
246          * updated yet.
247          */
248         wmb();
249         atomic_inc(&rq->rd->rto_count);
250 }
251
252 static inline void rt_clear_overload(struct rq *rq)
253 {
254         if (!rq->online)
255                 return;
256
257         /* the order here really doesn't matter */
258         atomic_dec(&rq->rd->rto_count);
259         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
260 }
261
262 static void update_rt_migration(struct rt_rq *rt_rq)
263 {
264         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
265                 if (!rt_rq->overloaded) {
266                         rt_set_overload(rq_of_rt_rq(rt_rq));
267                         rt_rq->overloaded = 1;
268                 }
269         } else if (rt_rq->overloaded) {
270                 rt_clear_overload(rq_of_rt_rq(rt_rq));
271                 rt_rq->overloaded = 0;
272         }
273 }
274
275 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
276 {
277         struct task_struct *p;
278
279         if (!rt_entity_is_task(rt_se))
280                 return;
281
282         p = rt_task_of(rt_se);
283         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
284
285         rt_rq->rt_nr_total++;
286         if (p->nr_cpus_allowed > 1)
287                 rt_rq->rt_nr_migratory++;
288
289         update_rt_migration(rt_rq);
290 }
291
292 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
293 {
294         struct task_struct *p;
295
296         if (!rt_entity_is_task(rt_se))
297                 return;
298
299         p = rt_task_of(rt_se);
300         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
301
302         rt_rq->rt_nr_total--;
303         if (p->nr_cpus_allowed > 1)
304                 rt_rq->rt_nr_migratory--;
305
306         update_rt_migration(rt_rq);
307 }
308
309 static inline int has_pushable_tasks(struct rq *rq)
310 {
311         return !plist_head_empty(&rq->rt.pushable_tasks);
312 }
313
314 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
315 {
316         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
317         plist_node_init(&p->pushable_tasks, p->prio);
318         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
319
320         /* Update the highest prio pushable task */
321         if (p->prio < rq->rt.highest_prio.next)
322                 rq->rt.highest_prio.next = p->prio;
323 }
324
325 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
326 {
327         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
328
329         /* Update the new highest prio pushable task */
330         if (has_pushable_tasks(rq)) {
331                 p = plist_first_entry(&rq->rt.pushable_tasks,
332                                       struct task_struct, pushable_tasks);
333                 rq->rt.highest_prio.next = p->prio;
334         } else
335                 rq->rt.highest_prio.next = MAX_RT_PRIO;
336 }
337
338 #else
339
340 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
341 {
342 }
343
344 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
345 {
346 }
347
348 static inline
349 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
350 {
351 }
352
353 static inline
354 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
355 {
356 }
357
358 #endif /* CONFIG_SMP */
359
360 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
361 {
362         return !list_empty(&rt_se->run_list);
363 }
364
365 #ifdef CONFIG_RT_GROUP_SCHED
366
367 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
368 {
369         if (!rt_rq->tg)
370                 return RUNTIME_INF;
371
372         return rt_rq->rt_runtime;
373 }
374
375 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
376 {
377         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
378 }
379
380 typedef struct task_group *rt_rq_iter_t;
381
382 static inline struct task_group *next_task_group(struct task_group *tg)
383 {
384         do {
385                 tg = list_entry_rcu(tg->list.next,
386                         typeof(struct task_group), list);
387         } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
388
389         if (&tg->list == &task_groups)
390                 tg = NULL;
391
392         return tg;
393 }
394
395 #define for_each_rt_rq(rt_rq, iter, rq)                                 \
396         for (iter = container_of(&task_groups, typeof(*iter), list);    \
397                 (iter = next_task_group(iter)) &&                       \
398                 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
399
400 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
401 {
402         list_add_rcu(&rt_rq->leaf_rt_rq_list,
403                         &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
404 }
405
406 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
407 {
408         list_del_rcu(&rt_rq->leaf_rt_rq_list);
409 }
410
411 #define for_each_leaf_rt_rq(rt_rq, rq) \
412         list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
413
414 #define for_each_sched_rt_entity(rt_se) \
415         for (; rt_se; rt_se = rt_se->parent)
416
417 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
418 {
419         return rt_se->my_q;
420 }
421
422 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
423 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
424
425 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
426 {
427         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
428         struct sched_rt_entity *rt_se;
429
430         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
431
432         rt_se = rt_rq->tg->rt_se[cpu];
433
434         if (rt_rq->rt_nr_running) {
435                 if (rt_se && !on_rt_rq(rt_se))
436                         enqueue_rt_entity(rt_se, false);
437                 if (rt_rq->highest_prio.curr < curr->prio)
438                         resched_task(curr);
439         }
440 }
441
442 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
443 {
444         struct sched_rt_entity *rt_se;
445         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
446
447         rt_se = rt_rq->tg->rt_se[cpu];
448
449         if (rt_se && on_rt_rq(rt_se))
450                 dequeue_rt_entity(rt_se);
451 }
452
453 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
454 {
455         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
456 }
457
458 static int rt_se_boosted(struct sched_rt_entity *rt_se)
459 {
460         struct rt_rq *rt_rq = group_rt_rq(rt_se);
461         struct task_struct *p;
462
463         if (rt_rq)
464                 return !!rt_rq->rt_nr_boosted;
465
466         p = rt_task_of(rt_se);
467         return p->prio != p->normal_prio;
468 }
469
470 #ifdef CONFIG_SMP
471 static inline const struct cpumask *sched_rt_period_mask(void)
472 {
473         return cpu_rq(smp_processor_id())->rd->span;
474 }
475 #else
476 static inline const struct cpumask *sched_rt_period_mask(void)
477 {
478         return cpu_online_mask;
479 }
480 #endif
481
482 static inline
483 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
484 {
485         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
486 }
487
488 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
489 {
490         return &rt_rq->tg->rt_bandwidth;
491 }
492
493 #else /* !CONFIG_RT_GROUP_SCHED */
494
495 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
496 {
497         return rt_rq->rt_runtime;
498 }
499
500 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
501 {
502         return ktime_to_ns(def_rt_bandwidth.rt_period);
503 }
504
505 typedef struct rt_rq *rt_rq_iter_t;
506
507 #define for_each_rt_rq(rt_rq, iter, rq) \
508         for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
509
510 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
511 {
512 }
513
514 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
515 {
516 }
517
518 #define for_each_leaf_rt_rq(rt_rq, rq) \
519         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
520
521 #define for_each_sched_rt_entity(rt_se) \
522         for (; rt_se; rt_se = NULL)
523
524 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
525 {
526         return NULL;
527 }
528
529 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
530 {
531         if (rt_rq->rt_nr_running)
532                 resched_task(rq_of_rt_rq(rt_rq)->curr);
533 }
534
535 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
536 {
537 }
538
539 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
540 {
541         return rt_rq->rt_throttled;
542 }
543
544 static inline const struct cpumask *sched_rt_period_mask(void)
545 {
546         return cpu_online_mask;
547 }
548
549 static inline
550 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
551 {
552         return &cpu_rq(cpu)->rt;
553 }
554
555 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
556 {
557         return &def_rt_bandwidth;
558 }
559
560 #endif /* CONFIG_RT_GROUP_SCHED */
561
562 #ifdef CONFIG_SMP
563 /*
564  * We ran out of runtime, see if we can borrow some from our neighbours.
565  */
566 static int do_balance_runtime(struct rt_rq *rt_rq)
567 {
568         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
569         struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
570         int i, weight, more = 0;
571         u64 rt_period;
572
573         weight = cpumask_weight(rd->span);
574
575         raw_spin_lock(&rt_b->rt_runtime_lock);
576         rt_period = ktime_to_ns(rt_b->rt_period);
577         for_each_cpu(i, rd->span) {
578                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
579                 s64 diff;
580
581                 if (iter == rt_rq)
582                         continue;
583
584                 raw_spin_lock(&iter->rt_runtime_lock);
585                 /*
586                  * Either all rqs have inf runtime and there's nothing to steal
587                  * or __disable_runtime() below sets a specific rq to inf to
588                  * indicate its been disabled and disalow stealing.
589                  */
590                 if (iter->rt_runtime == RUNTIME_INF)
591                         goto next;
592
593                 /*
594                  * From runqueues with spare time, take 1/n part of their
595                  * spare time, but no more than our period.
596                  */
597                 diff = iter->rt_runtime - iter->rt_time;
598                 if (diff > 0) {
599                         diff = div_u64((u64)diff, weight);
600                         if (rt_rq->rt_runtime + diff > rt_period)
601                                 diff = rt_period - rt_rq->rt_runtime;
602                         iter->rt_runtime -= diff;
603                         rt_rq->rt_runtime += diff;
604                         more = 1;
605                         if (rt_rq->rt_runtime == rt_period) {
606                                 raw_spin_unlock(&iter->rt_runtime_lock);
607                                 break;
608                         }
609                 }
610 next:
611                 raw_spin_unlock(&iter->rt_runtime_lock);
612         }
613         raw_spin_unlock(&rt_b->rt_runtime_lock);
614
615         return more;
616 }
617
618 /*
619  * Ensure this RQ takes back all the runtime it lend to its neighbours.
620  */
621 static void __disable_runtime(struct rq *rq)
622 {
623         struct root_domain *rd = rq->rd;
624         rt_rq_iter_t iter;
625         struct rt_rq *rt_rq;
626
627         if (unlikely(!scheduler_running))
628                 return;
629
630         for_each_rt_rq(rt_rq, iter, rq) {
631                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
632                 s64 want;
633                 int i;
634
635                 raw_spin_lock(&rt_b->rt_runtime_lock);
636                 raw_spin_lock(&rt_rq->rt_runtime_lock);
637                 /*
638                  * Either we're all inf and nobody needs to borrow, or we're
639                  * already disabled and thus have nothing to do, or we have
640                  * exactly the right amount of runtime to take out.
641                  */
642                 if (rt_rq->rt_runtime == RUNTIME_INF ||
643                                 rt_rq->rt_runtime == rt_b->rt_runtime)
644                         goto balanced;
645                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
646
647                 /*
648                  * Calculate the difference between what we started out with
649                  * and what we current have, that's the amount of runtime
650                  * we lend and now have to reclaim.
651                  */
652                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
653
654                 /*
655                  * Greedy reclaim, take back as much as we can.
656                  */
657                 for_each_cpu(i, rd->span) {
658                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
659                         s64 diff;
660
661                         /*
662                          * Can't reclaim from ourselves or disabled runqueues.
663                          */
664                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
665                                 continue;
666
667                         raw_spin_lock(&iter->rt_runtime_lock);
668                         if (want > 0) {
669                                 diff = min_t(s64, iter->rt_runtime, want);
670                                 iter->rt_runtime -= diff;
671                                 want -= diff;
672                         } else {
673                                 iter->rt_runtime -= want;
674                                 want -= want;
675                         }
676                         raw_spin_unlock(&iter->rt_runtime_lock);
677
678                         if (!want)
679                                 break;
680                 }
681
682                 raw_spin_lock(&rt_rq->rt_runtime_lock);
683                 /*
684                  * We cannot be left wanting - that would mean some runtime
685                  * leaked out of the system.
686                  */
687                 BUG_ON(want);
688 balanced:
689                 /*
690                  * Disable all the borrow logic by pretending we have inf
691                  * runtime - in which case borrowing doesn't make sense.
692                  */
693                 rt_rq->rt_runtime = RUNTIME_INF;
694                 rt_rq->rt_throttled = 0;
695                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
696                 raw_spin_unlock(&rt_b->rt_runtime_lock);
697         }
698 }
699
700 static void disable_runtime(struct rq *rq)
701 {
702         unsigned long flags;
703
704         raw_spin_lock_irqsave(&rq->lock, flags);
705         __disable_runtime(rq);
706         raw_spin_unlock_irqrestore(&rq->lock, flags);
707 }
708
709 static void __enable_runtime(struct rq *rq)
710 {
711         rt_rq_iter_t iter;
712         struct rt_rq *rt_rq;
713
714         if (unlikely(!scheduler_running))
715                 return;
716
717         /*
718          * Reset each runqueue's bandwidth settings
719          */
720         for_each_rt_rq(rt_rq, iter, rq) {
721                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
722
723                 raw_spin_lock(&rt_b->rt_runtime_lock);
724                 raw_spin_lock(&rt_rq->rt_runtime_lock);
725                 rt_rq->rt_runtime = rt_b->rt_runtime;
726                 rt_rq->rt_time = 0;
727                 rt_rq->rt_throttled = 0;
728                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
729                 raw_spin_unlock(&rt_b->rt_runtime_lock);
730         }
731 }
732
733 static void enable_runtime(struct rq *rq)
734 {
735         unsigned long flags;
736
737         raw_spin_lock_irqsave(&rq->lock, flags);
738         __enable_runtime(rq);
739         raw_spin_unlock_irqrestore(&rq->lock, flags);
740 }
741
742 int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
743 {
744         int cpu = (int)(long)hcpu;
745
746         switch (action) {
747         case CPU_DOWN_PREPARE:
748         case CPU_DOWN_PREPARE_FROZEN:
749                 disable_runtime(cpu_rq(cpu));
750                 return NOTIFY_OK;
751
752         case CPU_DOWN_FAILED:
753         case CPU_DOWN_FAILED_FROZEN:
754         case CPU_ONLINE:
755         case CPU_ONLINE_FROZEN:
756                 enable_runtime(cpu_rq(cpu));
757                 return NOTIFY_OK;
758
759         default:
760                 return NOTIFY_DONE;
761         }
762 }
763
764 static int balance_runtime(struct rt_rq *rt_rq)
765 {
766         int more = 0;
767
768         if (!sched_feat(RT_RUNTIME_SHARE))
769                 return more;
770
771         if (rt_rq->rt_time > rt_rq->rt_runtime) {
772                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
773                 more = do_balance_runtime(rt_rq);
774                 raw_spin_lock(&rt_rq->rt_runtime_lock);
775         }
776
777         return more;
778 }
779 #else /* !CONFIG_SMP */
780 static inline int balance_runtime(struct rt_rq *rt_rq)
781 {
782         return 0;
783 }
784 #endif /* CONFIG_SMP */
785
786 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
787 {
788         int i, idle = 1, throttled = 0;
789         const struct cpumask *span;
790
791         span = sched_rt_period_mask();
792 #ifdef CONFIG_RT_GROUP_SCHED
793         /*
794          * FIXME: isolated CPUs should really leave the root task group,
795          * whether they are isolcpus or were isolated via cpusets, lest
796          * the timer run on a CPU which does not service all runqueues,
797          * potentially leaving other CPUs indefinitely throttled.  If
798          * isolation is really required, the user will turn the throttle
799          * off to kill the perturbations it causes anyway.  Meanwhile,
800          * this maintains functionality for boot and/or troubleshooting.
801          */
802         if (rt_b == &root_task_group.rt_bandwidth)
803                 span = cpu_online_mask;
804 #endif
805         for_each_cpu(i, span) {
806                 int enqueue = 0;
807                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
808                 struct rq *rq = rq_of_rt_rq(rt_rq);
809
810                 raw_spin_lock(&rq->lock);
811                 if (rt_rq->rt_time) {
812                         u64 runtime;
813
814                         raw_spin_lock(&rt_rq->rt_runtime_lock);
815                         if (rt_rq->rt_throttled)
816                                 balance_runtime(rt_rq);
817                         runtime = rt_rq->rt_runtime;
818                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
819                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
820                                 rt_rq->rt_throttled = 0;
821                                 enqueue = 1;
822
823                                 /*
824                                  * Force a clock update if the CPU was idle,
825                                  * lest wakeup -> unthrottle time accumulate.
826                                  */
827                                 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
828                                         rq->skip_clock_update = -1;
829                         }
830                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
831                                 idle = 0;
832                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
833                 } else if (rt_rq->rt_nr_running) {
834                         idle = 0;
835                         if (!rt_rq_throttled(rt_rq))
836                                 enqueue = 1;
837                 }
838                 if (rt_rq->rt_throttled)
839                         throttled = 1;
840
841                 if (enqueue)
842                         sched_rt_rq_enqueue(rt_rq);
843                 raw_spin_unlock(&rq->lock);
844         }
845
846         if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
847                 return 1;
848
849         return idle;
850 }
851
852 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
853 {
854 #ifdef CONFIG_RT_GROUP_SCHED
855         struct rt_rq *rt_rq = group_rt_rq(rt_se);
856
857         if (rt_rq)
858                 return rt_rq->highest_prio.curr;
859 #endif
860
861         return rt_task_of(rt_se)->prio;
862 }
863
864 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
865 {
866         u64 runtime = sched_rt_runtime(rt_rq);
867
868         if (rt_rq->rt_throttled)
869                 return rt_rq_throttled(rt_rq);
870
871         if (runtime >= sched_rt_period(rt_rq))
872                 return 0;
873
874         balance_runtime(rt_rq);
875         runtime = sched_rt_runtime(rt_rq);
876         if (runtime == RUNTIME_INF)
877                 return 0;
878
879         if (rt_rq->rt_time > runtime) {
880                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
881
882                 /*
883                  * Don't actually throttle groups that have no runtime assigned
884                  * but accrue some time due to boosting.
885                  */
886                 if (likely(rt_b->rt_runtime)) {
887                         static bool once = false;
888
889                         rt_rq->rt_throttled = 1;
890
891                         if (!once) {
892                                 once = true;
893                                 printk_sched("sched: RT throttling activated\n");
894                         }
895                 } else {
896                         /*
897                          * In case we did anyway, make it go away,
898                          * replenishment is a joke, since it will replenish us
899                          * with exactly 0 ns.
900                          */
901                         rt_rq->rt_time = 0;
902                 }
903
904                 if (rt_rq_throttled(rt_rq)) {
905                         sched_rt_rq_dequeue(rt_rq);
906                         return 1;
907                 }
908         }
909
910         return 0;
911 }
912
913 /*
914  * Update the current task's runtime statistics. Skip current tasks that
915  * are not in our scheduling class.
916  */
917 static void update_curr_rt(struct rq *rq)
918 {
919         struct task_struct *curr = rq->curr;
920         struct sched_rt_entity *rt_se = &curr->rt;
921         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
922         u64 delta_exec;
923
924         if (curr->sched_class != &rt_sched_class)
925                 return;
926
927         delta_exec = rq->clock_task - curr->se.exec_start;
928         if (unlikely((s64)delta_exec <= 0))
929                 return;
930
931         schedstat_set(curr->se.statistics.exec_max,
932                       max(curr->se.statistics.exec_max, delta_exec));
933
934         curr->se.sum_exec_runtime += delta_exec;
935         account_group_exec_runtime(curr, delta_exec);
936
937         curr->se.exec_start = rq->clock_task;
938         cpuacct_charge(curr, delta_exec);
939
940         sched_rt_avg_update(rq, delta_exec);
941
942         if (!rt_bandwidth_enabled())
943                 return;
944
945         for_each_sched_rt_entity(rt_se) {
946                 rt_rq = rt_rq_of_se(rt_se);
947
948                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
949                         raw_spin_lock(&rt_rq->rt_runtime_lock);
950                         rt_rq->rt_time += delta_exec;
951                         if (sched_rt_runtime_exceeded(rt_rq))
952                                 resched_task(curr);
953                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
954                 }
955         }
956 }
957
958 #if defined CONFIG_SMP
959
960 static void
961 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
962 {
963         struct rq *rq = rq_of_rt_rq(rt_rq);
964
965         if (rq->online && prio < prev_prio)
966                 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
967 }
968
969 static void
970 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
971 {
972         struct rq *rq = rq_of_rt_rq(rt_rq);
973
974         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
975                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
976 }
977
978 #else /* CONFIG_SMP */
979
980 static inline
981 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
982 static inline
983 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
984
985 #endif /* CONFIG_SMP */
986
987 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
988 static void
989 inc_rt_prio(struct rt_rq *rt_rq, int prio)
990 {
991         int prev_prio = rt_rq->highest_prio.curr;
992
993         if (prio < prev_prio)
994                 rt_rq->highest_prio.curr = prio;
995
996         inc_rt_prio_smp(rt_rq, prio, prev_prio);
997 }
998
999 static void
1000 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1001 {
1002         int prev_prio = rt_rq->highest_prio.curr;
1003
1004         if (rt_rq->rt_nr_running) {
1005
1006                 WARN_ON(prio < prev_prio);
1007
1008                 /*
1009                  * This may have been our highest task, and therefore
1010                  * we may have some recomputation to do
1011                  */
1012                 if (prio == prev_prio) {
1013                         struct rt_prio_array *array = &rt_rq->active;
1014
1015                         rt_rq->highest_prio.curr =
1016                                 sched_find_first_bit(array->bitmap);
1017                 }
1018
1019         } else
1020                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1021
1022         dec_rt_prio_smp(rt_rq, prio, prev_prio);
1023 }
1024
1025 #else
1026
1027 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1028 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1029
1030 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1031
1032 #ifdef CONFIG_RT_GROUP_SCHED
1033
1034 static void
1035 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1036 {
1037         if (rt_se_boosted(rt_se))
1038                 rt_rq->rt_nr_boosted++;
1039
1040         if (rt_rq->tg)
1041                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1042 }
1043
1044 static void
1045 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1046 {
1047         if (rt_se_boosted(rt_se))
1048                 rt_rq->rt_nr_boosted--;
1049
1050         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1051 }
1052
1053 #else /* CONFIG_RT_GROUP_SCHED */
1054
1055 static void
1056 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1057 {
1058         start_rt_bandwidth(&def_rt_bandwidth);
1059 }
1060
1061 static inline
1062 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1063
1064 #endif /* CONFIG_RT_GROUP_SCHED */
1065
1066 static inline
1067 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1068 {
1069         int prio = rt_se_prio(rt_se);
1070
1071         WARN_ON(!rt_prio(prio));
1072         rt_rq->rt_nr_running++;
1073
1074         inc_rt_prio(rt_rq, prio);
1075         inc_rt_migration(rt_se, rt_rq);
1076         inc_rt_group(rt_se, rt_rq);
1077 }
1078
1079 static inline
1080 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1081 {
1082         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1083         WARN_ON(!rt_rq->rt_nr_running);
1084         rt_rq->rt_nr_running--;
1085
1086         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1087         dec_rt_migration(rt_se, rt_rq);
1088         dec_rt_group(rt_se, rt_rq);
1089 }
1090
1091 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1092 {
1093         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1094         struct rt_prio_array *array = &rt_rq->active;
1095         struct rt_rq *group_rq = group_rt_rq(rt_se);
1096         struct list_head *queue = array->queue + rt_se_prio(rt_se);
1097
1098         /*
1099          * Don't enqueue the group if its throttled, or when empty.
1100          * The latter is a consequence of the former when a child group
1101          * get throttled and the current group doesn't have any other
1102          * active members.
1103          */
1104         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1105                 return;
1106
1107         if (!rt_rq->rt_nr_running)
1108                 list_add_leaf_rt_rq(rt_rq);
1109
1110         if (head)
1111                 list_add(&rt_se->run_list, queue);
1112         else
1113                 list_add_tail(&rt_se->run_list, queue);
1114         __set_bit(rt_se_prio(rt_se), array->bitmap);
1115
1116         inc_rt_tasks(rt_se, rt_rq);
1117 }
1118
1119 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1120 {
1121         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1122         struct rt_prio_array *array = &rt_rq->active;
1123
1124         list_del_init(&rt_se->run_list);
1125         if (list_empty(array->queue + rt_se_prio(rt_se)))
1126                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1127
1128         dec_rt_tasks(rt_se, rt_rq);
1129         if (!rt_rq->rt_nr_running)
1130                 list_del_leaf_rt_rq(rt_rq);
1131 }
1132
1133 /*
1134  * Because the prio of an upper entry depends on the lower
1135  * entries, we must remove entries top - down.
1136  */
1137 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1138 {
1139         struct sched_rt_entity *back = NULL;
1140
1141         for_each_sched_rt_entity(rt_se) {
1142                 rt_se->back = back;
1143                 back = rt_se;
1144         }
1145
1146         for (rt_se = back; rt_se; rt_se = rt_se->back) {
1147                 if (on_rt_rq(rt_se))
1148                         __dequeue_rt_entity(rt_se);
1149         }
1150 }
1151
1152 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1153 {
1154         dequeue_rt_stack(rt_se);
1155         for_each_sched_rt_entity(rt_se)
1156                 __enqueue_rt_entity(rt_se, head);
1157 }
1158
1159 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1160 {
1161         dequeue_rt_stack(rt_se);
1162
1163         for_each_sched_rt_entity(rt_se) {
1164                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1165
1166                 if (rt_rq && rt_rq->rt_nr_running)
1167                         __enqueue_rt_entity(rt_se, false);
1168         }
1169 }
1170
1171 /*
1172  * Adding/removing a task to/from a priority array:
1173  */
1174 static void
1175 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1176 {
1177         struct sched_rt_entity *rt_se = &p->rt;
1178
1179         if (flags & ENQUEUE_WAKEUP)
1180                 rt_se->timeout = 0;
1181
1182         enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1183
1184         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1185                 enqueue_pushable_task(rq, p);
1186
1187         inc_nr_running(rq);
1188 }
1189
1190 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1191 {
1192         struct sched_rt_entity *rt_se = &p->rt;
1193
1194         update_curr_rt(rq);
1195         dequeue_rt_entity(rt_se);
1196
1197         dequeue_pushable_task(rq, p);
1198
1199         dec_nr_running(rq);
1200 }
1201
1202 /*
1203  * Put task to the head or the end of the run list without the overhead of
1204  * dequeue followed by enqueue.
1205  */
1206 static void
1207 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1208 {
1209         if (on_rt_rq(rt_se)) {
1210                 struct rt_prio_array *array = &rt_rq->active;
1211                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1212
1213                 if (head)
1214                         list_move(&rt_se->run_list, queue);
1215                 else
1216                         list_move_tail(&rt_se->run_list, queue);
1217         }
1218 }
1219
1220 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1221 {
1222         struct sched_rt_entity *rt_se = &p->rt;
1223         struct rt_rq *rt_rq;
1224
1225         for_each_sched_rt_entity(rt_se) {
1226                 rt_rq = rt_rq_of_se(rt_se);
1227                 requeue_rt_entity(rt_rq, rt_se, head);
1228         }
1229 }
1230
1231 static void yield_task_rt(struct rq *rq)
1232 {
1233         requeue_task_rt(rq, rq->curr, 0);
1234 }
1235
1236 #ifdef CONFIG_SMP
1237 static int find_lowest_rq(struct task_struct *task);
1238
1239 static int
1240 select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1241 {
1242         struct task_struct *curr;
1243         struct rq *rq;
1244         int cpu;
1245
1246         cpu = task_cpu(p);
1247
1248         if (p->nr_cpus_allowed == 1)
1249                 goto out;
1250
1251         /* For anything but wake ups, just return the task_cpu */
1252         if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1253                 goto out;
1254
1255         rq = cpu_rq(cpu);
1256
1257         rcu_read_lock();
1258         curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1259
1260         /*
1261          * If the current task on @p's runqueue is an RT task, then
1262          * try to see if we can wake this RT task up on another
1263          * runqueue. Otherwise simply start this RT task
1264          * on its current runqueue.
1265          *
1266          * We want to avoid overloading runqueues. If the woken
1267          * task is a higher priority, then it will stay on this CPU
1268          * and the lower prio task should be moved to another CPU.
1269          * Even though this will probably make the lower prio task
1270          * lose its cache, we do not want to bounce a higher task
1271          * around just because it gave up its CPU, perhaps for a
1272          * lock?
1273          *
1274          * For equal prio tasks, we just let the scheduler sort it out.
1275          *
1276          * Otherwise, just let it ride on the affined RQ and the
1277          * post-schedule router will push the preempted task away
1278          *
1279          * This test is optimistic, if we get it wrong the load-balancer
1280          * will have to sort it out.
1281          */
1282         if (curr && unlikely(rt_task(curr)) &&
1283             (curr->nr_cpus_allowed < 2 ||
1284              curr->prio <= p->prio) &&
1285             (p->nr_cpus_allowed > 1)) {
1286                 int target = find_lowest_rq(p);
1287
1288                 if (target != -1)
1289                         cpu = target;
1290         }
1291         rcu_read_unlock();
1292
1293 out:
1294         return cpu;
1295 }
1296
1297 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1298 {
1299         if (rq->curr->nr_cpus_allowed == 1)
1300                 return;
1301
1302         if (p->nr_cpus_allowed != 1
1303             && cpupri_find(&rq->rd->cpupri, p, NULL))
1304                 return;
1305
1306         if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1307                 return;
1308
1309         /*
1310          * There appears to be other cpus that can accept
1311          * current and none to run 'p', so lets reschedule
1312          * to try and push current away:
1313          */
1314         requeue_task_rt(rq, p, 1);
1315         resched_task(rq->curr);
1316 }
1317
1318 #endif /* CONFIG_SMP */
1319
1320 /*
1321  * Preempt the current task with a newly woken task if needed:
1322  */
1323 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1324 {
1325         if (p->prio < rq->curr->prio) {
1326                 resched_task(rq->curr);
1327                 return;
1328         }
1329
1330 #ifdef CONFIG_SMP
1331         /*
1332          * If:
1333          *
1334          * - the newly woken task is of equal priority to the current task
1335          * - the newly woken task is non-migratable while current is migratable
1336          * - current will be preempted on the next reschedule
1337          *
1338          * we should check to see if current can readily move to a different
1339          * cpu.  If so, we will reschedule to allow the push logic to try
1340          * to move current somewhere else, making room for our non-migratable
1341          * task.
1342          */
1343         if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1344                 check_preempt_equal_prio(rq, p);
1345 #endif
1346 }
1347
1348 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1349                                                    struct rt_rq *rt_rq)
1350 {
1351         struct rt_prio_array *array = &rt_rq->active;
1352         struct sched_rt_entity *next = NULL;
1353         struct list_head *queue;
1354         int idx;
1355
1356         idx = sched_find_first_bit(array->bitmap);
1357         BUG_ON(idx >= MAX_RT_PRIO);
1358
1359         queue = array->queue + idx;
1360         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1361
1362         return next;
1363 }
1364
1365 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1366 {
1367         struct sched_rt_entity *rt_se;
1368         struct task_struct *p;
1369         struct rt_rq *rt_rq;
1370
1371         rt_rq = &rq->rt;
1372
1373         if (!rt_rq->rt_nr_running)
1374                 return NULL;
1375
1376         if (rt_rq_throttled(rt_rq))
1377                 return NULL;
1378
1379         do {
1380                 rt_se = pick_next_rt_entity(rq, rt_rq);
1381                 BUG_ON(!rt_se);
1382                 rt_rq = group_rt_rq(rt_se);
1383         } while (rt_rq);
1384
1385         p = rt_task_of(rt_se);
1386         p->se.exec_start = rq->clock_task;
1387
1388         return p;
1389 }
1390
1391 static struct task_struct *pick_next_task_rt(struct rq *rq)
1392 {
1393         struct task_struct *p = _pick_next_task_rt(rq);
1394
1395         /* The running task is never eligible for pushing */
1396         if (p)
1397                 dequeue_pushable_task(rq, p);
1398
1399 #ifdef CONFIG_SMP
1400         /*
1401          * We detect this state here so that we can avoid taking the RQ
1402          * lock again later if there is no need to push
1403          */
1404         rq->post_schedule = has_pushable_tasks(rq);
1405 #endif
1406
1407         return p;
1408 }
1409
1410 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1411 {
1412         update_curr_rt(rq);
1413
1414         /*
1415          * The previous task needs to be made eligible for pushing
1416          * if it is still active
1417          */
1418         if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1419                 enqueue_pushable_task(rq, p);
1420 }
1421
1422 #ifdef CONFIG_SMP
1423
1424 /* Only try algorithms three times */
1425 #define RT_MAX_TRIES 3
1426
1427 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1428 {
1429         if (!task_running(rq, p) &&
1430             cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1431                 return 1;
1432         return 0;
1433 }
1434
1435 /* Return the second highest RT task, NULL otherwise */
1436 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1437 {
1438         struct task_struct *next = NULL;
1439         struct sched_rt_entity *rt_se;
1440         struct rt_prio_array *array;
1441         struct rt_rq *rt_rq;
1442         int idx;
1443
1444         for_each_leaf_rt_rq(rt_rq, rq) {
1445                 array = &rt_rq->active;
1446                 idx = sched_find_first_bit(array->bitmap);
1447 next_idx:
1448                 if (idx >= MAX_RT_PRIO)
1449                         continue;
1450                 if (next && next->prio <= idx)
1451                         continue;
1452                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1453                         struct task_struct *p;
1454
1455                         if (!rt_entity_is_task(rt_se))
1456                                 continue;
1457
1458                         p = rt_task_of(rt_se);
1459                         if (pick_rt_task(rq, p, cpu)) {
1460                                 next = p;
1461                                 break;
1462                         }
1463                 }
1464                 if (!next) {
1465                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1466                         goto next_idx;
1467                 }
1468         }
1469
1470         return next;
1471 }
1472
1473 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1474
1475 static int find_lowest_rq(struct task_struct *task)
1476 {
1477         struct sched_domain *sd;
1478         struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1479         int this_cpu = smp_processor_id();
1480         int cpu      = task_cpu(task);
1481
1482         /* Make sure the mask is initialized first */
1483         if (unlikely(!lowest_mask))
1484                 return -1;
1485
1486         if (task->nr_cpus_allowed == 1)
1487                 return -1; /* No other targets possible */
1488
1489         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1490                 return -1; /* No targets found */
1491
1492         /*
1493          * At this point we have built a mask of cpus representing the
1494          * lowest priority tasks in the system.  Now we want to elect
1495          * the best one based on our affinity and topology.
1496          *
1497          * We prioritize the last cpu that the task executed on since
1498          * it is most likely cache-hot in that location.
1499          */
1500         if (cpumask_test_cpu(cpu, lowest_mask))
1501                 return cpu;
1502
1503         /*
1504          * Otherwise, we consult the sched_domains span maps to figure
1505          * out which cpu is logically closest to our hot cache data.
1506          */
1507         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1508                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1509
1510         rcu_read_lock();
1511         for_each_domain(cpu, sd) {
1512                 if (sd->flags & SD_WAKE_AFFINE) {
1513                         int best_cpu;
1514
1515                         /*
1516                          * "this_cpu" is cheaper to preempt than a
1517                          * remote processor.
1518                          */
1519                         if (this_cpu != -1 &&
1520                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1521                                 rcu_read_unlock();
1522                                 return this_cpu;
1523                         }
1524
1525                         best_cpu = cpumask_first_and(lowest_mask,
1526                                                      sched_domain_span(sd));
1527                         if (best_cpu < nr_cpu_ids) {
1528                                 rcu_read_unlock();
1529                                 return best_cpu;
1530                         }
1531                 }
1532         }
1533         rcu_read_unlock();
1534
1535         /*
1536          * And finally, if there were no matches within the domains
1537          * just give the caller *something* to work with from the compatible
1538          * locations.
1539          */
1540         if (this_cpu != -1)
1541                 return this_cpu;
1542
1543         cpu = cpumask_any(lowest_mask);
1544         if (cpu < nr_cpu_ids)
1545                 return cpu;
1546         return -1;
1547 }
1548
1549 /* Will lock the rq it finds */
1550 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1551 {
1552         struct rq *lowest_rq = NULL;
1553         int tries;
1554         int cpu;
1555
1556         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1557                 cpu = find_lowest_rq(task);
1558
1559                 if ((cpu == -1) || (cpu == rq->cpu))
1560                         break;
1561
1562                 lowest_rq = cpu_rq(cpu);
1563
1564                 /* if the prio of this runqueue changed, try again */
1565                 if (double_lock_balance(rq, lowest_rq)) {
1566                         /*
1567                          * We had to unlock the run queue. In
1568                          * the mean time, task could have
1569                          * migrated already or had its affinity changed.
1570                          * Also make sure that it wasn't scheduled on its rq.
1571                          */
1572                         if (unlikely(task_rq(task) != rq ||
1573                                      !cpumask_test_cpu(lowest_rq->cpu,
1574                                                        tsk_cpus_allowed(task)) ||
1575                                      task_running(rq, task) ||
1576                                      !task->on_rq)) {
1577
1578                                 double_unlock_balance(rq, lowest_rq);
1579                                 lowest_rq = NULL;
1580                                 break;
1581                         }
1582                 }
1583
1584                 /* If this rq is still suitable use it. */
1585                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1586                         break;
1587
1588                 /* try again */
1589                 double_unlock_balance(rq, lowest_rq);
1590                 lowest_rq = NULL;
1591         }
1592
1593         return lowest_rq;
1594 }
1595
1596 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1597 {
1598         struct task_struct *p;
1599
1600         if (!has_pushable_tasks(rq))
1601                 return NULL;
1602
1603         p = plist_first_entry(&rq->rt.pushable_tasks,
1604                               struct task_struct, pushable_tasks);
1605
1606         BUG_ON(rq->cpu != task_cpu(p));
1607         BUG_ON(task_current(rq, p));
1608         BUG_ON(p->nr_cpus_allowed <= 1);
1609
1610         BUG_ON(!p->on_rq);
1611         BUG_ON(!rt_task(p));
1612
1613         return p;
1614 }
1615
1616 /*
1617  * If the current CPU has more than one RT task, see if the non
1618  * running task can migrate over to a CPU that is running a task
1619  * of lesser priority.
1620  */
1621 static int push_rt_task(struct rq *rq)
1622 {
1623         struct task_struct *next_task;
1624         struct rq *lowest_rq;
1625         int ret = 0;
1626
1627         if (!rq->rt.overloaded)
1628                 return 0;
1629
1630         next_task = pick_next_pushable_task(rq);
1631         if (!next_task)
1632                 return 0;
1633
1634 retry:
1635         if (unlikely(next_task == rq->curr)) {
1636                 WARN_ON(1);
1637                 return 0;
1638         }
1639
1640         /*
1641          * It's possible that the next_task slipped in of
1642          * higher priority than current. If that's the case
1643          * just reschedule current.
1644          */
1645         if (unlikely(next_task->prio < rq->curr->prio)) {
1646                 resched_task(rq->curr);
1647                 return 0;
1648         }
1649
1650         /* We might release rq lock */
1651         get_task_struct(next_task);
1652
1653         /* find_lock_lowest_rq locks the rq if found */
1654         lowest_rq = find_lock_lowest_rq(next_task, rq);
1655         if (!lowest_rq) {
1656                 struct task_struct *task;
1657                 /*
1658                  * find_lock_lowest_rq releases rq->lock
1659                  * so it is possible that next_task has migrated.
1660                  *
1661                  * We need to make sure that the task is still on the same
1662                  * run-queue and is also still the next task eligible for
1663                  * pushing.
1664                  */
1665                 task = pick_next_pushable_task(rq);
1666                 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1667                         /*
1668                          * The task hasn't migrated, and is still the next
1669                          * eligible task, but we failed to find a run-queue
1670                          * to push it to.  Do not retry in this case, since
1671                          * other cpus will pull from us when ready.
1672                          */
1673                         goto out;
1674                 }
1675
1676                 if (!task)
1677                         /* No more tasks, just exit */
1678                         goto out;
1679
1680                 /*
1681                  * Something has shifted, try again.
1682                  */
1683                 put_task_struct(next_task);
1684                 next_task = task;
1685                 goto retry;
1686         }
1687
1688         deactivate_task(rq, next_task, 0);
1689         set_task_cpu(next_task, lowest_rq->cpu);
1690         activate_task(lowest_rq, next_task, 0);
1691         ret = 1;
1692
1693         resched_task(lowest_rq->curr);
1694
1695         double_unlock_balance(rq, lowest_rq);
1696
1697 out:
1698         put_task_struct(next_task);
1699
1700         return ret;
1701 }
1702
1703 static void push_rt_tasks(struct rq *rq)
1704 {
1705         /* push_rt_task will return true if it moved an RT */
1706         while (push_rt_task(rq))
1707                 ;
1708 }
1709
1710 static int pull_rt_task(struct rq *this_rq)
1711 {
1712         int this_cpu = this_rq->cpu, ret = 0, cpu;
1713         struct task_struct *p;
1714         struct rq *src_rq;
1715
1716         if (likely(!rt_overloaded(this_rq)))
1717                 return 0;
1718
1719         for_each_cpu(cpu, this_rq->rd->rto_mask) {
1720                 if (this_cpu == cpu)
1721                         continue;
1722
1723                 src_rq = cpu_rq(cpu);
1724
1725                 /*
1726                  * Don't bother taking the src_rq->lock if the next highest
1727                  * task is known to be lower-priority than our current task.
1728                  * This may look racy, but if this value is about to go
1729                  * logically higher, the src_rq will push this task away.
1730                  * And if its going logically lower, we do not care
1731                  */
1732                 if (src_rq->rt.highest_prio.next >=
1733                     this_rq->rt.highest_prio.curr)
1734                         continue;
1735
1736                 /*
1737                  * We can potentially drop this_rq's lock in
1738                  * double_lock_balance, and another CPU could
1739                  * alter this_rq
1740                  */
1741                 double_lock_balance(this_rq, src_rq);
1742
1743                 /*
1744                  * Are there still pullable RT tasks?
1745                  */
1746                 if (src_rq->rt.rt_nr_running <= 1)
1747                         goto skip;
1748
1749                 p = pick_next_highest_task_rt(src_rq, this_cpu);
1750
1751                 /*
1752                  * Do we have an RT task that preempts
1753                  * the to-be-scheduled task?
1754                  */
1755                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1756                         WARN_ON(p == src_rq->curr);
1757                         WARN_ON(!p->on_rq);
1758
1759                         /*
1760                          * There's a chance that p is higher in priority
1761                          * than what's currently running on its cpu.
1762                          * This is just that p is wakeing up and hasn't
1763                          * had a chance to schedule. We only pull
1764                          * p if it is lower in priority than the
1765                          * current task on the run queue
1766                          */
1767                         if (p->prio < src_rq->curr->prio)
1768                                 goto skip;
1769
1770                         ret = 1;
1771
1772                         deactivate_task(src_rq, p, 0);
1773                         set_task_cpu(p, this_cpu);
1774                         activate_task(this_rq, p, 0);
1775                         /*
1776                          * We continue with the search, just in
1777                          * case there's an even higher prio task
1778                          * in another runqueue. (low likelihood
1779                          * but possible)
1780                          */
1781                 }
1782 skip:
1783                 double_unlock_balance(this_rq, src_rq);
1784         }
1785
1786         return ret;
1787 }
1788
1789 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1790 {
1791         /* Try to pull RT tasks here if we lower this rq's prio */
1792         if (rq->rt.highest_prio.curr > prev->prio)
1793                 pull_rt_task(rq);
1794 }
1795
1796 static void post_schedule_rt(struct rq *rq)
1797 {
1798         push_rt_tasks(rq);
1799 }
1800
1801 /*
1802  * If we are not running and we are not going to reschedule soon, we should
1803  * try to push tasks away now
1804  */
1805 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1806 {
1807         if (!task_running(rq, p) &&
1808             !test_tsk_need_resched(rq->curr) &&
1809             has_pushable_tasks(rq) &&
1810             p->nr_cpus_allowed > 1 &&
1811             rt_task(rq->curr) &&
1812             (rq->curr->nr_cpus_allowed < 2 ||
1813              rq->curr->prio <= p->prio))
1814                 push_rt_tasks(rq);
1815 }
1816
1817 static void set_cpus_allowed_rt(struct task_struct *p,
1818                                 const struct cpumask *new_mask)
1819 {
1820         struct rq *rq;
1821         int weight;
1822
1823         BUG_ON(!rt_task(p));
1824
1825         if (!p->on_rq)
1826                 return;
1827
1828         weight = cpumask_weight(new_mask);
1829
1830         /*
1831          * Only update if the process changes its state from whether it
1832          * can migrate or not.
1833          */
1834         if ((p->nr_cpus_allowed > 1) == (weight > 1))
1835                 return;
1836
1837         rq = task_rq(p);
1838
1839         /*
1840          * The process used to be able to migrate OR it can now migrate
1841          */
1842         if (weight <= 1) {
1843                 if (!task_current(rq, p))
1844                         dequeue_pushable_task(rq, p);
1845                 BUG_ON(!rq->rt.rt_nr_migratory);
1846                 rq->rt.rt_nr_migratory--;
1847         } else {
1848                 if (!task_current(rq, p))
1849                         enqueue_pushable_task(rq, p);
1850                 rq->rt.rt_nr_migratory++;
1851         }
1852
1853         update_rt_migration(&rq->rt);
1854 }
1855
1856 /* Assumes rq->lock is held */
1857 static void rq_online_rt(struct rq *rq)
1858 {
1859         if (rq->rt.overloaded)
1860                 rt_set_overload(rq);
1861
1862         __enable_runtime(rq);
1863
1864         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1865 }
1866
1867 /* Assumes rq->lock is held */
1868 static void rq_offline_rt(struct rq *rq)
1869 {
1870         if (rq->rt.overloaded)
1871                 rt_clear_overload(rq);
1872
1873         __disable_runtime(rq);
1874
1875         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1876 }
1877
1878 /*
1879  * When switch from the rt queue, we bring ourselves to a position
1880  * that we might want to pull RT tasks from other runqueues.
1881  */
1882 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1883 {
1884         /*
1885          * If there are other RT tasks then we will reschedule
1886          * and the scheduling of the other RT tasks will handle
1887          * the balancing. But if we are the last RT task
1888          * we may need to handle the pulling of RT tasks
1889          * now.
1890          */
1891         if (!p->on_rq || rq->rt.rt_nr_running)
1892                 return;
1893
1894         if (pull_rt_task(rq))
1895                 resched_task(rq->curr);
1896 }
1897
1898 void init_sched_rt_class(void)
1899 {
1900         unsigned int i;
1901
1902         for_each_possible_cpu(i) {
1903                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1904                                         GFP_KERNEL, cpu_to_node(i));
1905         }
1906 }
1907 #endif /* CONFIG_SMP */
1908
1909 /*
1910  * When switching a task to RT, we may overload the runqueue
1911  * with RT tasks. In this case we try to push them off to
1912  * other runqueues.
1913  */
1914 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1915 {
1916         int check_resched = 1;
1917
1918         /*
1919          * If we are already running, then there's nothing
1920          * that needs to be done. But if we are not running
1921          * we may need to preempt the current running task.
1922          * If that current running task is also an RT task
1923          * then see if we can move to another run queue.
1924          */
1925         if (p->on_rq && rq->curr != p) {
1926 #ifdef CONFIG_SMP
1927                 if (rq->rt.overloaded && push_rt_task(rq) &&
1928                     /* Don't resched if we changed runqueues */
1929                     rq != task_rq(p))
1930                         check_resched = 0;
1931 #endif /* CONFIG_SMP */
1932                 if (check_resched && p->prio < rq->curr->prio)
1933                         resched_task(rq->curr);
1934         }
1935 }
1936
1937 /*
1938  * Priority of the task has changed. This may cause
1939  * us to initiate a push or pull.
1940  */
1941 static void
1942 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1943 {
1944         if (!p->on_rq)
1945                 return;
1946
1947         if (rq->curr == p) {
1948 #ifdef CONFIG_SMP
1949                 /*
1950                  * If our priority decreases while running, we
1951                  * may need to pull tasks to this runqueue.
1952                  */
1953                 if (oldprio < p->prio)
1954                         pull_rt_task(rq);
1955                 /*
1956                  * If there's a higher priority task waiting to run
1957                  * then reschedule. Note, the above pull_rt_task
1958                  * can release the rq lock and p could migrate.
1959                  * Only reschedule if p is still on the same runqueue.
1960                  */
1961                 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1962                         resched_task(p);
1963 #else
1964                 /* For UP simply resched on drop of prio */
1965                 if (oldprio < p->prio)
1966                         resched_task(p);
1967 #endif /* CONFIG_SMP */
1968         } else {
1969                 /*
1970                  * This task is not running, but if it is
1971                  * greater than the current running task
1972                  * then reschedule.
1973                  */
1974                 if (p->prio < rq->curr->prio)
1975                         resched_task(rq->curr);
1976         }
1977 }
1978
1979 static void watchdog(struct rq *rq, struct task_struct *p)
1980 {
1981         unsigned long soft, hard;
1982
1983         /* max may change after cur was read, this will be fixed next tick */
1984         soft = task_rlimit(p, RLIMIT_RTTIME);
1985         hard = task_rlimit_max(p, RLIMIT_RTTIME);
1986
1987         if (soft != RLIM_INFINITY) {
1988                 unsigned long next;
1989
1990                 if (p->rt.watchdog_stamp != jiffies) {
1991                         p->rt.timeout++;
1992                         p->rt.watchdog_stamp = jiffies;
1993                 }
1994
1995                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1996                 if (p->rt.timeout > next)
1997                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1998         }
1999 }
2000
2001 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2002 {
2003         struct sched_rt_entity *rt_se = &p->rt;
2004
2005         update_curr_rt(rq);
2006
2007         watchdog(rq, p);
2008
2009         /*
2010          * RR tasks need a special form of timeslice management.
2011          * FIFO tasks have no timeslices.
2012          */
2013         if (p->policy != SCHED_RR)
2014                 return;
2015
2016         if (--p->rt.time_slice)
2017                 return;
2018
2019         p->rt.time_slice = RR_TIMESLICE;
2020
2021         /*
2022          * Requeue to the end of queue if we (and all of our ancestors) are the
2023          * only element on the queue
2024          */
2025         for_each_sched_rt_entity(rt_se) {
2026                 if (rt_se->run_list.prev != rt_se->run_list.next) {
2027                         requeue_task_rt(rq, p, 0);
2028                         set_tsk_need_resched(p);
2029                         return;
2030                 }
2031         }
2032 }
2033
2034 static void set_curr_task_rt(struct rq *rq)
2035 {
2036         struct task_struct *p = rq->curr;
2037
2038         p->se.exec_start = rq->clock_task;
2039
2040         /* The running task is never eligible for pushing */
2041         dequeue_pushable_task(rq, p);
2042 }
2043
2044 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2045 {
2046         /*
2047          * Time slice is 0 for SCHED_FIFO tasks
2048          */
2049         if (task->policy == SCHED_RR)
2050                 return RR_TIMESLICE;
2051         else
2052                 return 0;
2053 }
2054
2055 const struct sched_class rt_sched_class = {
2056         .next                   = &fair_sched_class,
2057         .enqueue_task           = enqueue_task_rt,
2058         .dequeue_task           = dequeue_task_rt,
2059         .yield_task             = yield_task_rt,
2060
2061         .check_preempt_curr     = check_preempt_curr_rt,
2062
2063         .pick_next_task         = pick_next_task_rt,
2064         .put_prev_task          = put_prev_task_rt,
2065
2066 #ifdef CONFIG_SMP
2067         .select_task_rq         = select_task_rq_rt,
2068
2069         .set_cpus_allowed       = set_cpus_allowed_rt,
2070         .rq_online              = rq_online_rt,
2071         .rq_offline             = rq_offline_rt,
2072         .pre_schedule           = pre_schedule_rt,
2073         .post_schedule          = post_schedule_rt,
2074         .task_woken             = task_woken_rt,
2075         .switched_from          = switched_from_rt,
2076 #endif
2077
2078         .set_curr_task          = set_curr_task_rt,
2079         .task_tick              = task_tick_rt,
2080
2081         .get_rr_interval        = get_rr_interval_rt,
2082
2083         .prio_changed           = prio_changed_rt,
2084         .switched_to            = switched_to_rt,
2085 };
2086
2087 #ifdef CONFIG_SCHED_DEBUG
2088 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2089
2090 void print_rt_stats(struct seq_file *m, int cpu)
2091 {
2092         rt_rq_iter_t iter;
2093         struct rt_rq *rt_rq;
2094
2095         rcu_read_lock();
2096         for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2097                 print_rt_rq(m, cpu, rt_rq);
2098         rcu_read_unlock();
2099 }
2100 #endif /* CONFIG_SCHED_DEBUG */