mmc: dw_mmc: fix the max_blk_count in IDMAC
[firefly-linux-kernel-4.4.55.git] / drivers / gator / gator_trace_sched.c
1 /**
2  * Copyright (C) ARM Limited 2010-2015. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  */
9
10 #include <trace/events/sched.h>
11 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
12 #include <trace/events/task.h>
13 #endif
14
15 #include "gator.h"
16
17 #define TASK_MAP_ENTRIES                1024    /* must be power of 2 */
18 #define TASK_MAX_COLLISIONS             2
19
20 enum {
21         STATE_WAIT_ON_OTHER = 0,
22         STATE_CONTENTION,
23         STATE_WAIT_ON_IO,
24         CPU_WAIT_TOTAL
25 };
26
27 static DEFINE_PER_CPU(uint64_t *, taskname_keys);
28 static DEFINE_PER_CPU(int, collecting);
29
30 /* this array is never read as the cpu wait charts are derived
31  * counters the files are needed, nonetheless, to show that these
32  * counters are available
33  */
34 static ulong cpu_wait_enabled[CPU_WAIT_TOTAL];
35 static ulong sched_cpu_key[CPU_WAIT_TOTAL];
36
37 static int sched_trace_create_files(struct super_block *sb, struct dentry *root)
38 {
39         struct dentry *dir;
40
41         /* CPU Wait - Contention */
42         dir = gatorfs_mkdir(sb, root, "Linux_cpu_wait_contention");
43         if (!dir)
44                 return -1;
45         gatorfs_create_ulong(sb, dir, "enabled", &cpu_wait_enabled[STATE_CONTENTION]);
46         gatorfs_create_ro_ulong(sb, dir, "key", &sched_cpu_key[STATE_CONTENTION]);
47
48         /* CPU Wait - I/O */
49         dir = gatorfs_mkdir(sb, root, "Linux_cpu_wait_io");
50         if (!dir)
51                 return -1;
52         gatorfs_create_ulong(sb, dir, "enabled", &cpu_wait_enabled[STATE_WAIT_ON_IO]);
53         gatorfs_create_ro_ulong(sb, dir, "key", &sched_cpu_key[STATE_WAIT_ON_IO]);
54
55         return 0;
56 }
57
58 static void emit_pid_name(const char *comm, struct task_struct *task)
59 {
60         bool found = false;
61         char taskcomm[TASK_COMM_LEN + 3];
62         unsigned long x, cpu = get_physical_cpu();
63         uint64_t *keys = &(per_cpu(taskname_keys, cpu)[(task->pid & 0xFF) * TASK_MAX_COLLISIONS]);
64         uint64_t value;
65
66         value = gator_chksum_crc32(comm);
67         value = (value << 32) | (uint32_t)task->pid;
68
69         /* determine if the thread name was emitted already */
70         for (x = 0; x < TASK_MAX_COLLISIONS; x++) {
71                 if (keys[x] == value) {
72                         found = true;
73                         break;
74                 }
75         }
76
77         if (!found) {
78                 /* shift values, new value always in front */
79                 uint64_t oldv, newv = value;
80
81                 for (x = 0; x < TASK_MAX_COLLISIONS; x++) {
82                         oldv = keys[x];
83                         keys[x] = newv;
84                         newv = oldv;
85                 }
86
87                 /* emit pid names, cannot use get_task_comm, as it's not exported on all kernel versions */
88                 if (strlcpy(taskcomm, comm, TASK_COMM_LEN) == TASK_COMM_LEN - 1) {
89                         /* append ellipses if comm has length of TASK_COMM_LEN - 1 */
90                         strcat(taskcomm, "...");
91                 }
92
93                 marshal_thread_name(task->pid, taskcomm);
94         }
95 }
96
97 static void collect_counters(u64 time, struct task_struct *task, bool sched_switch)
98 {
99         int *buffer, len, cpu = get_physical_cpu();
100         long long *buffer64;
101         struct gator_interface *gi;
102
103         if (marshal_event_header(time)) {
104                 list_for_each_entry(gi, &gator_events, list) {
105                         if (gi->read) {
106                                 len = gi->read(&buffer, sched_switch);
107                                 marshal_event(len, buffer);
108                         } else if (gi->read64) {
109                                 len = gi->read64(&buffer64);
110                                 marshal_event64(len, buffer64);
111                         }
112                         if (gi->read_proc && task != NULL) {
113                                 len = gi->read_proc(&buffer64, task);
114                                 marshal_event64(len, buffer64);
115                         }
116                 }
117                 if (cpu == 0)
118                         gator_emit_perf_time(time);
119                 /* Only check after writing all counters so that time and corresponding counters appear in the same frame */
120                 buffer_check(cpu, BLOCK_COUNTER_BUF, time);
121
122                 /* Commit buffers on timeout */
123                 if (gator_live_rate > 0 && time >= per_cpu(gator_buffer_commit_time, cpu)) {
124                         static const int buftypes[] = { NAME_BUF, COUNTER_BUF, BLOCK_COUNTER_BUF, SCHED_TRACE_BUF, ACTIVITY_BUF };
125                         int i;
126
127                         for (i = 0; i < ARRAY_SIZE(buftypes); ++i)
128                                 gator_commit_buffer(cpu, buftypes[i], time);
129
130                         /* spinlocks are noops on uniprocessor machines and mutexes do
131                          * not work in sched_switch context in RT-Preempt full, so
132                          * disable proactive flushing of the annotate frame on
133                          * uniprocessor machines.
134                          */
135 #ifdef CONFIG_SMP
136                         /* Try to preemptively flush the annotate buffer to reduce the chance of the buffer being full */
137                         if (on_primary_core() && spin_trylock(&annotate_lock)) {
138                                 gator_commit_buffer(0, ANNOTATE_BUF, time);
139                                 spin_unlock(&annotate_lock);
140                         }
141 #endif
142                 }
143         }
144 }
145
146 /* special case used during a suspend of the system */
147 static void trace_sched_insert_idle(void)
148 {
149         marshal_sched_trace_switch(0, 0);
150 }
151
152 static void gator_trace_emit_link(struct task_struct *p)
153 {
154         int cookie;
155         int cpu = get_physical_cpu();
156
157         cookie = get_exec_cookie(cpu, p);
158         emit_pid_name(p->comm, p);
159
160         marshal_link(cookie, p->tgid, p->pid);
161 }
162
163 GATOR_DEFINE_PROBE(sched_process_fork, TP_PROTO(struct task_struct *parent, struct task_struct *child))
164 {
165         gator_trace_emit_link(child);
166 }
167
168 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
169 GATOR_DEFINE_PROBE(sched_process_exec, TP_PROTO(struct task_struct *p, pid_t old_pid, struct linux_binprm *bprm))
170 {
171         gator_trace_emit_link(p);
172 }
173
174 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
175 GATOR_DEFINE_PROBE(task_rename, TP_PROTO(struct task_struct *task, char *comm))
176 #else
177 GATOR_DEFINE_PROBE(task_rename, TP_PROTO(struct task_struct *task, const char *comm))
178 #endif
179 {
180         emit_pid_name(comm, task);
181 }
182 #endif
183
184 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
185 GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct rq *rq, struct task_struct *prev, struct task_struct *next))
186 #else
187 GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct task_struct *prev, struct task_struct *next))
188 #endif
189 {
190         int state;
191         int cpu = get_physical_cpu();
192
193         per_cpu(in_scheduler_context, cpu) = true;
194
195         /* do as much work as possible before disabling interrupts */
196         if (prev->state == TASK_RUNNING)
197                 state = STATE_CONTENTION;
198         else if (prev->in_iowait)
199                 state = STATE_WAIT_ON_IO;
200         else
201                 state = STATE_WAIT_ON_OTHER;
202
203         per_cpu(collecting, cpu) = 1;
204         collect_counters(gator_get_time(), prev, true);
205         per_cpu(collecting, cpu) = 0;
206
207 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
208         gator_trace_emit_link(next);
209 #endif
210         marshal_sched_trace_switch(next->pid, state);
211
212         per_cpu(in_scheduler_context, cpu) = false;
213 }
214
215 GATOR_DEFINE_PROBE(sched_process_free, TP_PROTO(struct task_struct *p))
216 {
217         marshal_sched_trace_exit(p->tgid, p->pid);
218 }
219
220 static void do_nothing(void *info)
221 {
222         /* Intentionally do nothing */
223         (void)info;
224 }
225
226 static int register_scheduler_tracepoints(void)
227 {
228         /* register tracepoints */
229         if (GATOR_REGISTER_TRACE(sched_process_fork))
230                 goto fail_sched_process_fork;
231 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
232         if (GATOR_REGISTER_TRACE(sched_process_exec))
233                 goto fail_sched_process_exec;
234         if (GATOR_REGISTER_TRACE(task_rename))
235                 goto fail_task_rename;
236 #endif
237         if (GATOR_REGISTER_TRACE(sched_switch))
238                 goto fail_sched_switch;
239         if (GATOR_REGISTER_TRACE(sched_process_free))
240                 goto fail_sched_process_free;
241         pr_debug("gator: registered tracepoints\n");
242
243         /* Now that the scheduler tracepoint is registered, force a context
244          * switch on all cpus to capture what is currently running.
245          */
246         on_each_cpu(do_nothing, NULL, 0);
247
248         return 0;
249
250         /* unregister tracepoints on error */
251 fail_sched_process_free:
252         GATOR_UNREGISTER_TRACE(sched_switch);
253 fail_sched_switch:
254 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
255         GATOR_UNREGISTER_TRACE(task_rename);
256 fail_task_rename:
257         GATOR_UNREGISTER_TRACE(sched_process_exec);
258 fail_sched_process_exec:
259 #endif
260         GATOR_UNREGISTER_TRACE(sched_process_fork);
261 fail_sched_process_fork:
262         pr_err("gator: tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
263
264         return -1;
265 }
266
267 static void unregister_scheduler_tracepoints(void)
268 {
269         GATOR_UNREGISTER_TRACE(sched_process_fork);
270 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
271         GATOR_UNREGISTER_TRACE(sched_process_exec);
272         GATOR_UNREGISTER_TRACE(task_rename);
273 #endif
274         GATOR_UNREGISTER_TRACE(sched_switch);
275         GATOR_UNREGISTER_TRACE(sched_process_free);
276         pr_debug("gator: unregistered tracepoints\n");
277 }
278
279 static void gator_trace_sched_stop(void)
280 {
281         int cpu;
282
283         unregister_scheduler_tracepoints();
284
285         for_each_present_cpu(cpu) {
286                 kfree(per_cpu(taskname_keys, cpu));
287         }
288 }
289
290 static int gator_trace_sched_start(void)
291 {
292         int cpu, size;
293         int ret;
294
295         for_each_present_cpu(cpu) {
296                 size = TASK_MAP_ENTRIES * TASK_MAX_COLLISIONS * sizeof(uint64_t);
297                 per_cpu(taskname_keys, cpu) = kmalloc(size, GFP_KERNEL);
298                 if (!per_cpu(taskname_keys, cpu))
299                         return -1;
300                 memset(per_cpu(taskname_keys, cpu), 0, size);
301         }
302
303         ret = register_scheduler_tracepoints();
304
305         return ret;
306 }
307
308 static void gator_trace_sched_offline(void)
309 {
310         trace_sched_insert_idle();
311 }
312
313 static void gator_trace_sched_init(void)
314 {
315         int i;
316
317         for (i = 0; i < CPU_WAIT_TOTAL; i++) {
318                 cpu_wait_enabled[i] = 0;
319                 sched_cpu_key[i] = gator_events_get_key();
320         }
321 }