usb: dwc3: rockchip: fix possible circular deadlock
[firefly-linux-kernel-4.4.55.git] / kernel / sched / stop_task.c
1 #include "sched.h"
2 #include "walt.h"
3
4 /*
5  * stop-task scheduling class.
6  *
7  * The stop task is the highest priority task in the system, it preempts
8  * everything and will be preempted by nothing.
9  *
10  * See kernel/stop_machine.c
11  */
12
13 #ifdef CONFIG_SMP
14 static int
15 select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
16 {
17         return task_cpu(p); /* stop tasks as never migrate */
18 }
19 #endif /* CONFIG_SMP */
20
21 static void
22 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
23 {
24         /* we're never preempted */
25 }
26
27 static struct task_struct *
28 pick_next_task_stop(struct rq *rq, struct task_struct *prev)
29 {
30         struct task_struct *stop = rq->stop;
31
32         if (!stop || !task_on_rq_queued(stop))
33                 return NULL;
34
35         put_prev_task(rq, prev);
36
37         stop->se.exec_start = rq_clock_task(rq);
38
39         return stop;
40 }
41
42 static void
43 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
44 {
45         add_nr_running(rq, 1);
46         walt_inc_cumulative_runnable_avg(rq, p);
47 }
48
49 static void
50 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
51 {
52         sub_nr_running(rq, 1);
53         walt_dec_cumulative_runnable_avg(rq, p);
54 }
55
56 static void yield_task_stop(struct rq *rq)
57 {
58         BUG(); /* the stop task should never yield, its pointless. */
59 }
60
61 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
62 {
63         struct task_struct *curr = rq->curr;
64         u64 delta_exec;
65
66         delta_exec = rq_clock_task(rq) - curr->se.exec_start;
67         if (unlikely((s64)delta_exec < 0))
68                 delta_exec = 0;
69
70         schedstat_set(curr->se.statistics.exec_max,
71                         max(curr->se.statistics.exec_max, delta_exec));
72
73         curr->se.sum_exec_runtime += delta_exec;
74         account_group_exec_runtime(curr, delta_exec);
75
76         curr->se.exec_start = rq_clock_task(rq);
77         cpuacct_charge(curr, delta_exec);
78 }
79
80 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
81 {
82 }
83
84 static void set_curr_task_stop(struct rq *rq)
85 {
86         struct task_struct *stop = rq->stop;
87
88         stop->se.exec_start = rq_clock_task(rq);
89 }
90
91 static void switched_to_stop(struct rq *rq, struct task_struct *p)
92 {
93         BUG(); /* its impossible to change to this class */
94 }
95
96 static void
97 prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
98 {
99         BUG(); /* how!?, what priority? */
100 }
101
102 static unsigned int
103 get_rr_interval_stop(struct rq *rq, struct task_struct *task)
104 {
105         return 0;
106 }
107
108 static void update_curr_stop(struct rq *rq)
109 {
110 }
111
112 /*
113  * Simple, special scheduling class for the per-CPU stop tasks:
114  */
115 const struct sched_class stop_sched_class = {
116         .next                   = &dl_sched_class,
117
118         .enqueue_task           = enqueue_task_stop,
119         .dequeue_task           = dequeue_task_stop,
120         .yield_task             = yield_task_stop,
121
122         .check_preempt_curr     = check_preempt_curr_stop,
123
124         .pick_next_task         = pick_next_task_stop,
125         .put_prev_task          = put_prev_task_stop,
126
127 #ifdef CONFIG_SMP
128         .select_task_rq         = select_task_rq_stop,
129         .set_cpus_allowed       = set_cpus_allowed_common,
130 #endif
131
132         .set_curr_task          = set_curr_task_stop,
133         .task_tick              = task_tick_stop,
134
135         .get_rr_interval        = get_rr_interval_stop,
136
137         .prio_changed           = prio_changed_stop,
138         .switched_to            = switched_to_stop,
139         .update_curr            = update_curr_stop,
140 };