Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git...
[firefly-linux-kernel-4.4.55.git] / tools / perf / builtin-sched.c
1 #include "builtin.h"
2 #include "perf.h"
3
4 #include "util/util.h"
5 #include "util/evlist.h"
6 #include "util/cache.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13
14 #include "util/parse-options.h"
15 #include "util/trace-event.h"
16
17 #include "util/debug.h"
18
19 #include <sys/prctl.h>
20 #include <sys/resource.h>
21
22 #include <semaphore.h>
23 #include <pthread.h>
24 #include <math.h>
25
26 static const char               *input_name;
27
28 static char                     default_sort_order[] = "avg, max, switch, runtime";
29 static const char               *sort_order = default_sort_order;
30
31 static int                      profile_cpu = -1;
32
33 #define PR_SET_NAME             15               /* Set process name */
34 #define MAX_CPUS                4096
35
36 static u64                      run_measurement_overhead;
37 static u64                      sleep_measurement_overhead;
38
39 #define COMM_LEN                20
40 #define SYM_LEN                 129
41
42 #define MAX_PID                 65536
43
44 static unsigned long            nr_tasks;
45
46 struct sched_atom;
47
48 struct task_desc {
49         unsigned long           nr;
50         unsigned long           pid;
51         char                    comm[COMM_LEN];
52
53         unsigned long           nr_events;
54         unsigned long           curr_event;
55         struct sched_atom       **atoms;
56
57         pthread_t               thread;
58         sem_t                   sleep_sem;
59
60         sem_t                   ready_for_work;
61         sem_t                   work_done_sem;
62
63         u64                     cpu_usage;
64 };
65
66 enum sched_event_type {
67         SCHED_EVENT_RUN,
68         SCHED_EVENT_SLEEP,
69         SCHED_EVENT_WAKEUP,
70         SCHED_EVENT_MIGRATION,
71 };
72
73 struct sched_atom {
74         enum sched_event_type   type;
75         int                     specific_wait;
76         u64                     timestamp;
77         u64                     duration;
78         unsigned long           nr;
79         sem_t                   *wait_sem;
80         struct task_desc        *wakee;
81 };
82
83 static struct task_desc         *pid_to_task[MAX_PID];
84
85 static struct task_desc         **tasks;
86
87 static pthread_mutex_t          start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
88 static u64                      start_time;
89
90 static pthread_mutex_t          work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
91
92 static unsigned long            nr_run_events;
93 static unsigned long            nr_sleep_events;
94 static unsigned long            nr_wakeup_events;
95
96 static unsigned long            nr_sleep_corrections;
97 static unsigned long            nr_run_events_optimized;
98
99 static unsigned long            targetless_wakeups;
100 static unsigned long            multitarget_wakeups;
101
102 static u64                      cpu_usage;
103 static u64                      runavg_cpu_usage;
104 static u64                      parent_cpu_usage;
105 static u64                      runavg_parent_cpu_usage;
106
107 static unsigned long            nr_runs;
108 static u64                      sum_runtime;
109 static u64                      sum_fluct;
110 static u64                      run_avg;
111
112 static unsigned int             replay_repeat = 10;
113 static unsigned long            nr_timestamps;
114 static unsigned long            nr_unordered_timestamps;
115 static unsigned long            nr_state_machine_bugs;
116 static unsigned long            nr_context_switch_bugs;
117 static unsigned long            nr_events;
118 static unsigned long            nr_lost_chunks;
119 static unsigned long            nr_lost_events;
120
121 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
122
123 enum thread_state {
124         THREAD_SLEEPING = 0,
125         THREAD_WAIT_CPU,
126         THREAD_SCHED_IN,
127         THREAD_IGNORE
128 };
129
130 struct work_atom {
131         struct list_head        list;
132         enum thread_state       state;
133         u64                     sched_out_time;
134         u64                     wake_up_time;
135         u64                     sched_in_time;
136         u64                     runtime;
137 };
138
139 struct work_atoms {
140         struct list_head        work_list;
141         struct thread           *thread;
142         struct rb_node          node;
143         u64                     max_lat;
144         u64                     max_lat_at;
145         u64                     total_lat;
146         u64                     nb_atoms;
147         u64                     total_runtime;
148 };
149
150 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
151
152 static struct rb_root           atom_root, sorted_atom_root;
153
154 static u64                      all_runtime;
155 static u64                      all_count;
156
157
158 static u64 get_nsecs(void)
159 {
160         struct timespec ts;
161
162         clock_gettime(CLOCK_MONOTONIC, &ts);
163
164         return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
165 }
166
167 static void burn_nsecs(u64 nsecs)
168 {
169         u64 T0 = get_nsecs(), T1;
170
171         do {
172                 T1 = get_nsecs();
173         } while (T1 + run_measurement_overhead < T0 + nsecs);
174 }
175
176 static void sleep_nsecs(u64 nsecs)
177 {
178         struct timespec ts;
179
180         ts.tv_nsec = nsecs % 999999999;
181         ts.tv_sec = nsecs / 999999999;
182
183         nanosleep(&ts, NULL);
184 }
185
186 static void calibrate_run_measurement_overhead(void)
187 {
188         u64 T0, T1, delta, min_delta = 1000000000ULL;
189         int i;
190
191         for (i = 0; i < 10; i++) {
192                 T0 = get_nsecs();
193                 burn_nsecs(0);
194                 T1 = get_nsecs();
195                 delta = T1-T0;
196                 min_delta = min(min_delta, delta);
197         }
198         run_measurement_overhead = min_delta;
199
200         printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
201 }
202
203 static void calibrate_sleep_measurement_overhead(void)
204 {
205         u64 T0, T1, delta, min_delta = 1000000000ULL;
206         int i;
207
208         for (i = 0; i < 10; i++) {
209                 T0 = get_nsecs();
210                 sleep_nsecs(10000);
211                 T1 = get_nsecs();
212                 delta = T1-T0;
213                 min_delta = min(min_delta, delta);
214         }
215         min_delta -= 10000;
216         sleep_measurement_overhead = min_delta;
217
218         printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
219 }
220
221 static struct sched_atom *
222 get_new_event(struct task_desc *task, u64 timestamp)
223 {
224         struct sched_atom *event = zalloc(sizeof(*event));
225         unsigned long idx = task->nr_events;
226         size_t size;
227
228         event->timestamp = timestamp;
229         event->nr = idx;
230
231         task->nr_events++;
232         size = sizeof(struct sched_atom *) * task->nr_events;
233         task->atoms = realloc(task->atoms, size);
234         BUG_ON(!task->atoms);
235
236         task->atoms[idx] = event;
237
238         return event;
239 }
240
241 static struct sched_atom *last_event(struct task_desc *task)
242 {
243         if (!task->nr_events)
244                 return NULL;
245
246         return task->atoms[task->nr_events - 1];
247 }
248
249 static void
250 add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
251 {
252         struct sched_atom *event, *curr_event = last_event(task);
253
254         /*
255          * optimize an existing RUN event by merging this one
256          * to it:
257          */
258         if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
259                 nr_run_events_optimized++;
260                 curr_event->duration += duration;
261                 return;
262         }
263
264         event = get_new_event(task, timestamp);
265
266         event->type = SCHED_EVENT_RUN;
267         event->duration = duration;
268
269         nr_run_events++;
270 }
271
272 static void
273 add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
274                        struct task_desc *wakee)
275 {
276         struct sched_atom *event, *wakee_event;
277
278         event = get_new_event(task, timestamp);
279         event->type = SCHED_EVENT_WAKEUP;
280         event->wakee = wakee;
281
282         wakee_event = last_event(wakee);
283         if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
284                 targetless_wakeups++;
285                 return;
286         }
287         if (wakee_event->wait_sem) {
288                 multitarget_wakeups++;
289                 return;
290         }
291
292         wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
293         sem_init(wakee_event->wait_sem, 0, 0);
294         wakee_event->specific_wait = 1;
295         event->wait_sem = wakee_event->wait_sem;
296
297         nr_wakeup_events++;
298 }
299
300 static void
301 add_sched_event_sleep(struct task_desc *task, u64 timestamp,
302                       u64 task_state __used)
303 {
304         struct sched_atom *event = get_new_event(task, timestamp);
305
306         event->type = SCHED_EVENT_SLEEP;
307
308         nr_sleep_events++;
309 }
310
311 static struct task_desc *register_pid(unsigned long pid, const char *comm)
312 {
313         struct task_desc *task;
314
315         BUG_ON(pid >= MAX_PID);
316
317         task = pid_to_task[pid];
318
319         if (task)
320                 return task;
321
322         task = zalloc(sizeof(*task));
323         task->pid = pid;
324         task->nr = nr_tasks;
325         strcpy(task->comm, comm);
326         /*
327          * every task starts in sleeping state - this gets ignored
328          * if there's no wakeup pointing to this sleep state:
329          */
330         add_sched_event_sleep(task, 0, 0);
331
332         pid_to_task[pid] = task;
333         nr_tasks++;
334         tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
335         BUG_ON(!tasks);
336         tasks[task->nr] = task;
337
338         if (verbose)
339                 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
340
341         return task;
342 }
343
344
345 static void print_task_traces(void)
346 {
347         struct task_desc *task;
348         unsigned long i;
349
350         for (i = 0; i < nr_tasks; i++) {
351                 task = tasks[i];
352                 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
353                         task->nr, task->comm, task->pid, task->nr_events);
354         }
355 }
356
357 static void add_cross_task_wakeups(void)
358 {
359         struct task_desc *task1, *task2;
360         unsigned long i, j;
361
362         for (i = 0; i < nr_tasks; i++) {
363                 task1 = tasks[i];
364                 j = i + 1;
365                 if (j == nr_tasks)
366                         j = 0;
367                 task2 = tasks[j];
368                 add_sched_event_wakeup(task1, 0, task2);
369         }
370 }
371
372 static void
373 process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
374 {
375         int ret = 0;
376
377         switch (atom->type) {
378                 case SCHED_EVENT_RUN:
379                         burn_nsecs(atom->duration);
380                         break;
381                 case SCHED_EVENT_SLEEP:
382                         if (atom->wait_sem)
383                                 ret = sem_wait(atom->wait_sem);
384                         BUG_ON(ret);
385                         break;
386                 case SCHED_EVENT_WAKEUP:
387                         if (atom->wait_sem)
388                                 ret = sem_post(atom->wait_sem);
389                         BUG_ON(ret);
390                         break;
391                 case SCHED_EVENT_MIGRATION:
392                         break;
393                 default:
394                         BUG_ON(1);
395         }
396 }
397
398 static u64 get_cpu_usage_nsec_parent(void)
399 {
400         struct rusage ru;
401         u64 sum;
402         int err;
403
404         err = getrusage(RUSAGE_SELF, &ru);
405         BUG_ON(err);
406
407         sum =  ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
408         sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
409
410         return sum;
411 }
412
413 static int self_open_counters(void)
414 {
415         struct perf_event_attr attr;
416         int fd;
417
418         memset(&attr, 0, sizeof(attr));
419
420         attr.type = PERF_TYPE_SOFTWARE;
421         attr.config = PERF_COUNT_SW_TASK_CLOCK;
422
423         fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
424
425         if (fd < 0)
426                 die("Error: sys_perf_event_open() syscall returned"
427                     "with %d (%s)\n", fd, strerror(errno));
428         return fd;
429 }
430
431 static u64 get_cpu_usage_nsec_self(int fd)
432 {
433         u64 runtime;
434         int ret;
435
436         ret = read(fd, &runtime, sizeof(runtime));
437         BUG_ON(ret != sizeof(runtime));
438
439         return runtime;
440 }
441
442 static void *thread_func(void *ctx)
443 {
444         struct task_desc *this_task = ctx;
445         u64 cpu_usage_0, cpu_usage_1;
446         unsigned long i, ret;
447         char comm2[22];
448         int fd;
449
450         sprintf(comm2, ":%s", this_task->comm);
451         prctl(PR_SET_NAME, comm2);
452         fd = self_open_counters();
453
454 again:
455         ret = sem_post(&this_task->ready_for_work);
456         BUG_ON(ret);
457         ret = pthread_mutex_lock(&start_work_mutex);
458         BUG_ON(ret);
459         ret = pthread_mutex_unlock(&start_work_mutex);
460         BUG_ON(ret);
461
462         cpu_usage_0 = get_cpu_usage_nsec_self(fd);
463
464         for (i = 0; i < this_task->nr_events; i++) {
465                 this_task->curr_event = i;
466                 process_sched_event(this_task, this_task->atoms[i]);
467         }
468
469         cpu_usage_1 = get_cpu_usage_nsec_self(fd);
470         this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
471         ret = sem_post(&this_task->work_done_sem);
472         BUG_ON(ret);
473
474         ret = pthread_mutex_lock(&work_done_wait_mutex);
475         BUG_ON(ret);
476         ret = pthread_mutex_unlock(&work_done_wait_mutex);
477         BUG_ON(ret);
478
479         goto again;
480 }
481
482 static void create_tasks(void)
483 {
484         struct task_desc *task;
485         pthread_attr_t attr;
486         unsigned long i;
487         int err;
488
489         err = pthread_attr_init(&attr);
490         BUG_ON(err);
491         err = pthread_attr_setstacksize(&attr,
492                         (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
493         BUG_ON(err);
494         err = pthread_mutex_lock(&start_work_mutex);
495         BUG_ON(err);
496         err = pthread_mutex_lock(&work_done_wait_mutex);
497         BUG_ON(err);
498         for (i = 0; i < nr_tasks; i++) {
499                 task = tasks[i];
500                 sem_init(&task->sleep_sem, 0, 0);
501                 sem_init(&task->ready_for_work, 0, 0);
502                 sem_init(&task->work_done_sem, 0, 0);
503                 task->curr_event = 0;
504                 err = pthread_create(&task->thread, &attr, thread_func, task);
505                 BUG_ON(err);
506         }
507 }
508
509 static void wait_for_tasks(void)
510 {
511         u64 cpu_usage_0, cpu_usage_1;
512         struct task_desc *task;
513         unsigned long i, ret;
514
515         start_time = get_nsecs();
516         cpu_usage = 0;
517         pthread_mutex_unlock(&work_done_wait_mutex);
518
519         for (i = 0; i < nr_tasks; i++) {
520                 task = tasks[i];
521                 ret = sem_wait(&task->ready_for_work);
522                 BUG_ON(ret);
523                 sem_init(&task->ready_for_work, 0, 0);
524         }
525         ret = pthread_mutex_lock(&work_done_wait_mutex);
526         BUG_ON(ret);
527
528         cpu_usage_0 = get_cpu_usage_nsec_parent();
529
530         pthread_mutex_unlock(&start_work_mutex);
531
532         for (i = 0; i < nr_tasks; i++) {
533                 task = tasks[i];
534                 ret = sem_wait(&task->work_done_sem);
535                 BUG_ON(ret);
536                 sem_init(&task->work_done_sem, 0, 0);
537                 cpu_usage += task->cpu_usage;
538                 task->cpu_usage = 0;
539         }
540
541         cpu_usage_1 = get_cpu_usage_nsec_parent();
542         if (!runavg_cpu_usage)
543                 runavg_cpu_usage = cpu_usage;
544         runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
545
546         parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
547         if (!runavg_parent_cpu_usage)
548                 runavg_parent_cpu_usage = parent_cpu_usage;
549         runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
550                                    parent_cpu_usage)/10;
551
552         ret = pthread_mutex_lock(&start_work_mutex);
553         BUG_ON(ret);
554
555         for (i = 0; i < nr_tasks; i++) {
556                 task = tasks[i];
557                 sem_init(&task->sleep_sem, 0, 0);
558                 task->curr_event = 0;
559         }
560 }
561
562 static void run_one_test(void)
563 {
564         u64 T0, T1, delta, avg_delta, fluct;
565
566         T0 = get_nsecs();
567         wait_for_tasks();
568         T1 = get_nsecs();
569
570         delta = T1 - T0;
571         sum_runtime += delta;
572         nr_runs++;
573
574         avg_delta = sum_runtime / nr_runs;
575         if (delta < avg_delta)
576                 fluct = avg_delta - delta;
577         else
578                 fluct = delta - avg_delta;
579         sum_fluct += fluct;
580         if (!run_avg)
581                 run_avg = delta;
582         run_avg = (run_avg*9 + delta)/10;
583
584         printf("#%-3ld: %0.3f, ",
585                 nr_runs, (double)delta/1000000.0);
586
587         printf("ravg: %0.2f, ",
588                 (double)run_avg/1e6);
589
590         printf("cpu: %0.2f / %0.2f",
591                 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
592
593 #if 0
594         /*
595          * rusage statistics done by the parent, these are less
596          * accurate than the sum_exec_runtime based statistics:
597          */
598         printf(" [%0.2f / %0.2f]",
599                 (double)parent_cpu_usage/1e6,
600                 (double)runavg_parent_cpu_usage/1e6);
601 #endif
602
603         printf("\n");
604
605         if (nr_sleep_corrections)
606                 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
607         nr_sleep_corrections = 0;
608 }
609
610 static void test_calibrations(void)
611 {
612         u64 T0, T1;
613
614         T0 = get_nsecs();
615         burn_nsecs(1e6);
616         T1 = get_nsecs();
617
618         printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
619
620         T0 = get_nsecs();
621         sleep_nsecs(1e6);
622         T1 = get_nsecs();
623
624         printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
625 }
626
627 #define FILL_FIELD(ptr, field, event, data)     \
628         ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
629
630 #define FILL_ARRAY(ptr, array, event, data)                     \
631 do {                                                            \
632         void *__array = raw_field_ptr(event, #array, data);     \
633         memcpy(ptr.array, __array, sizeof(ptr.array));  \
634 } while(0)
635
636 #define FILL_COMMON_FIELDS(ptr, event, data)                    \
637 do {                                                            \
638         FILL_FIELD(ptr, common_type, event, data);              \
639         FILL_FIELD(ptr, common_flags, event, data);             \
640         FILL_FIELD(ptr, common_preempt_count, event, data);     \
641         FILL_FIELD(ptr, common_pid, event, data);               \
642         FILL_FIELD(ptr, common_tgid, event, data);              \
643 } while (0)
644
645
646
647 struct trace_switch_event {
648         u32 size;
649
650         u16 common_type;
651         u8 common_flags;
652         u8 common_preempt_count;
653         u32 common_pid;
654         u32 common_tgid;
655
656         char prev_comm[16];
657         u32 prev_pid;
658         u32 prev_prio;
659         u64 prev_state;
660         char next_comm[16];
661         u32 next_pid;
662         u32 next_prio;
663 };
664
665 struct trace_runtime_event {
666         u32 size;
667
668         u16 common_type;
669         u8 common_flags;
670         u8 common_preempt_count;
671         u32 common_pid;
672         u32 common_tgid;
673
674         char comm[16];
675         u32 pid;
676         u64 runtime;
677         u64 vruntime;
678 };
679
680 struct trace_wakeup_event {
681         u32 size;
682
683         u16 common_type;
684         u8 common_flags;
685         u8 common_preempt_count;
686         u32 common_pid;
687         u32 common_tgid;
688
689         char comm[16];
690         u32 pid;
691
692         u32 prio;
693         u32 success;
694         u32 cpu;
695 };
696
697 struct trace_fork_event {
698         u32 size;
699
700         u16 common_type;
701         u8 common_flags;
702         u8 common_preempt_count;
703         u32 common_pid;
704         u32 common_tgid;
705
706         char parent_comm[16];
707         u32 parent_pid;
708         char child_comm[16];
709         u32 child_pid;
710 };
711
712 struct trace_migrate_task_event {
713         u32 size;
714
715         u16 common_type;
716         u8 common_flags;
717         u8 common_preempt_count;
718         u32 common_pid;
719         u32 common_tgid;
720
721         char comm[16];
722         u32 pid;
723
724         u32 prio;
725         u32 cpu;
726 };
727
728 struct trace_sched_handler {
729         void (*switch_event)(struct trace_switch_event *,
730                              struct machine *,
731                              struct event_format *,
732                              struct perf_sample *sample);
733
734         void (*runtime_event)(struct trace_runtime_event *,
735                               struct machine *,
736                               struct perf_sample *sample);
737
738         void (*wakeup_event)(struct trace_wakeup_event *,
739                              struct machine *,
740                              struct event_format *,
741                              struct perf_sample *sample);
742
743         void (*fork_event)(struct trace_fork_event *,
744                            struct event_format *event);
745
746         void (*migrate_task_event)(struct trace_migrate_task_event *,
747                                    struct machine *machine,
748                                    struct perf_sample *sample);
749 };
750
751
752 static void
753 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
754                     struct machine *machine __used,
755                     struct event_format *event, struct perf_sample *sample)
756 {
757         struct task_desc *waker, *wakee;
758
759         if (verbose) {
760                 printf("sched_wakeup event %p\n", event);
761
762                 printf(" ... pid %d woke up %s/%d\n",
763                         wakeup_event->common_pid,
764                         wakeup_event->comm,
765                         wakeup_event->pid);
766         }
767
768         waker = register_pid(wakeup_event->common_pid, "<unknown>");
769         wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
770
771         add_sched_event_wakeup(waker, sample->time, wakee);
772 }
773
774 static u64 cpu_last_switched[MAX_CPUS];
775
776 static void
777 replay_switch_event(struct trace_switch_event *switch_event,
778                     struct machine *machine __used,
779                     struct event_format *event,
780                     struct perf_sample *sample)
781 {
782         struct task_desc *prev, __used *next;
783         u64 timestamp0, timestamp = sample->time;
784         int cpu = sample->cpu;
785         s64 delta;
786
787         if (verbose)
788                 printf("sched_switch event %p\n", event);
789
790         if (cpu >= MAX_CPUS || cpu < 0)
791                 return;
792
793         timestamp0 = cpu_last_switched[cpu];
794         if (timestamp0)
795                 delta = timestamp - timestamp0;
796         else
797                 delta = 0;
798
799         if (delta < 0)
800                 die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
801
802         if (verbose) {
803                 printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
804                         switch_event->prev_comm, switch_event->prev_pid,
805                         switch_event->next_comm, switch_event->next_pid,
806                         delta);
807         }
808
809         prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
810         next = register_pid(switch_event->next_pid, switch_event->next_comm);
811
812         cpu_last_switched[cpu] = timestamp;
813
814         add_sched_event_run(prev, timestamp, delta);
815         add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
816 }
817
818
819 static void
820 replay_fork_event(struct trace_fork_event *fork_event,
821                   struct event_format *event)
822 {
823         if (verbose) {
824                 printf("sched_fork event %p\n", event);
825                 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
826                 printf("...  child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
827         }
828         register_pid(fork_event->parent_pid, fork_event->parent_comm);
829         register_pid(fork_event->child_pid, fork_event->child_comm);
830 }
831
832 static struct trace_sched_handler replay_ops  = {
833         .wakeup_event           = replay_wakeup_event,
834         .switch_event           = replay_switch_event,
835         .fork_event             = replay_fork_event,
836 };
837
838 struct sort_dimension {
839         const char              *name;
840         sort_fn_t               cmp;
841         struct list_head        list;
842 };
843
844 static LIST_HEAD(cmp_pid);
845
846 static int
847 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
848 {
849         struct sort_dimension *sort;
850         int ret = 0;
851
852         BUG_ON(list_empty(list));
853
854         list_for_each_entry(sort, list, list) {
855                 ret = sort->cmp(l, r);
856                 if (ret)
857                         return ret;
858         }
859
860         return ret;
861 }
862
863 static struct work_atoms *
864 thread_atoms_search(struct rb_root *root, struct thread *thread,
865                          struct list_head *sort_list)
866 {
867         struct rb_node *node = root->rb_node;
868         struct work_atoms key = { .thread = thread };
869
870         while (node) {
871                 struct work_atoms *atoms;
872                 int cmp;
873
874                 atoms = container_of(node, struct work_atoms, node);
875
876                 cmp = thread_lat_cmp(sort_list, &key, atoms);
877                 if (cmp > 0)
878                         node = node->rb_left;
879                 else if (cmp < 0)
880                         node = node->rb_right;
881                 else {
882                         BUG_ON(thread != atoms->thread);
883                         return atoms;
884                 }
885         }
886         return NULL;
887 }
888
889 static void
890 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
891                          struct list_head *sort_list)
892 {
893         struct rb_node **new = &(root->rb_node), *parent = NULL;
894
895         while (*new) {
896                 struct work_atoms *this;
897                 int cmp;
898
899                 this = container_of(*new, struct work_atoms, node);
900                 parent = *new;
901
902                 cmp = thread_lat_cmp(sort_list, data, this);
903
904                 if (cmp > 0)
905                         new = &((*new)->rb_left);
906                 else
907                         new = &((*new)->rb_right);
908         }
909
910         rb_link_node(&data->node, parent, new);
911         rb_insert_color(&data->node, root);
912 }
913
914 static void thread_atoms_insert(struct thread *thread)
915 {
916         struct work_atoms *atoms = zalloc(sizeof(*atoms));
917         if (!atoms)
918                 die("No memory");
919
920         atoms->thread = thread;
921         INIT_LIST_HEAD(&atoms->work_list);
922         __thread_latency_insert(&atom_root, atoms, &cmp_pid);
923 }
924
925 static void
926 latency_fork_event(struct trace_fork_event *fork_event __used,
927                    struct event_format *event __used)
928 {
929         /* should insert the newcomer */
930 }
931
932 __used
933 static char sched_out_state(struct trace_switch_event *switch_event)
934 {
935         const char *str = TASK_STATE_TO_CHAR_STR;
936
937         return str[switch_event->prev_state];
938 }
939
940 static void
941 add_sched_out_event(struct work_atoms *atoms,
942                     char run_state,
943                     u64 timestamp)
944 {
945         struct work_atom *atom = zalloc(sizeof(*atom));
946         if (!atom)
947                 die("Non memory");
948
949         atom->sched_out_time = timestamp;
950
951         if (run_state == 'R') {
952                 atom->state = THREAD_WAIT_CPU;
953                 atom->wake_up_time = atom->sched_out_time;
954         }
955
956         list_add_tail(&atom->list, &atoms->work_list);
957 }
958
959 static void
960 add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
961 {
962         struct work_atom *atom;
963
964         BUG_ON(list_empty(&atoms->work_list));
965
966         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
967
968         atom->runtime += delta;
969         atoms->total_runtime += delta;
970 }
971
972 static void
973 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
974 {
975         struct work_atom *atom;
976         u64 delta;
977
978         if (list_empty(&atoms->work_list))
979                 return;
980
981         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
982
983         if (atom->state != THREAD_WAIT_CPU)
984                 return;
985
986         if (timestamp < atom->wake_up_time) {
987                 atom->state = THREAD_IGNORE;
988                 return;
989         }
990
991         atom->state = THREAD_SCHED_IN;
992         atom->sched_in_time = timestamp;
993
994         delta = atom->sched_in_time - atom->wake_up_time;
995         atoms->total_lat += delta;
996         if (delta > atoms->max_lat) {
997                 atoms->max_lat = delta;
998                 atoms->max_lat_at = timestamp;
999         }
1000         atoms->nb_atoms++;
1001 }
1002
1003 static void
1004 latency_switch_event(struct trace_switch_event *switch_event,
1005                      struct machine *machine,
1006                      struct event_format *event __used,
1007                      struct perf_sample *sample)
1008 {
1009         struct work_atoms *out_events, *in_events;
1010         struct thread *sched_out, *sched_in;
1011         u64 timestamp0, timestamp = sample->time;
1012         int cpu = sample->cpu;
1013         s64 delta;
1014
1015         BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1016
1017         timestamp0 = cpu_last_switched[cpu];
1018         cpu_last_switched[cpu] = timestamp;
1019         if (timestamp0)
1020                 delta = timestamp - timestamp0;
1021         else
1022                 delta = 0;
1023
1024         if (delta < 0)
1025                 die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1026
1027
1028         sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
1029         sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1030
1031         out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1032         if (!out_events) {
1033                 thread_atoms_insert(sched_out);
1034                 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1035                 if (!out_events)
1036                         die("out-event: Internal tree error");
1037         }
1038         add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1039
1040         in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1041         if (!in_events) {
1042                 thread_atoms_insert(sched_in);
1043                 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1044                 if (!in_events)
1045                         die("in-event: Internal tree error");
1046                 /*
1047                  * Take came in we have not heard about yet,
1048                  * add in an initial atom in runnable state:
1049                  */
1050                 add_sched_out_event(in_events, 'R', timestamp);
1051         }
1052         add_sched_in_event(in_events, timestamp);
1053 }
1054
1055 static void
1056 latency_runtime_event(struct trace_runtime_event *runtime_event,
1057                       struct machine *machine, struct perf_sample *sample)
1058 {
1059         struct thread *thread = machine__findnew_thread(machine, runtime_event->pid);
1060         struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1061         u64 timestamp = sample->time;
1062         int cpu = sample->cpu;
1063
1064         BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1065         if (!atoms) {
1066                 thread_atoms_insert(thread);
1067                 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1068                 if (!atoms)
1069                         die("in-event: Internal tree error");
1070                 add_sched_out_event(atoms, 'R', timestamp);
1071         }
1072
1073         add_runtime_event(atoms, runtime_event->runtime, timestamp);
1074 }
1075
1076 static void
1077 latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1078                      struct machine *machine, struct event_format *event __used,
1079                      struct perf_sample *sample)
1080 {
1081         struct work_atoms *atoms;
1082         struct work_atom *atom;
1083         struct thread *wakee;
1084         u64 timestamp = sample->time;
1085
1086         /* Note for later, it may be interesting to observe the failing cases */
1087         if (!wakeup_event->success)
1088                 return;
1089
1090         wakee = machine__findnew_thread(machine, wakeup_event->pid);
1091         atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1092         if (!atoms) {
1093                 thread_atoms_insert(wakee);
1094                 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1095                 if (!atoms)
1096                         die("wakeup-event: Internal tree error");
1097                 add_sched_out_event(atoms, 'S', timestamp);
1098         }
1099
1100         BUG_ON(list_empty(&atoms->work_list));
1101
1102         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1103
1104         /*
1105          * You WILL be missing events if you've recorded only
1106          * one CPU, or are only looking at only one, so don't
1107          * make useless noise.
1108          */
1109         if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1110                 nr_state_machine_bugs++;
1111
1112         nr_timestamps++;
1113         if (atom->sched_out_time > timestamp) {
1114                 nr_unordered_timestamps++;
1115                 return;
1116         }
1117
1118         atom->state = THREAD_WAIT_CPU;
1119         atom->wake_up_time = timestamp;
1120 }
1121
1122 static void
1123 latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1124                            struct machine *machine, struct perf_sample *sample)
1125 {
1126         u64 timestamp = sample->time;
1127         struct work_atoms *atoms;
1128         struct work_atom *atom;
1129         struct thread *migrant;
1130
1131         /*
1132          * Only need to worry about migration when profiling one CPU.
1133          */
1134         if (profile_cpu == -1)
1135                 return;
1136
1137         migrant = machine__findnew_thread(machine, migrate_task_event->pid);
1138         atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1139         if (!atoms) {
1140                 thread_atoms_insert(migrant);
1141                 register_pid(migrant->pid, migrant->comm);
1142                 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1143                 if (!atoms)
1144                         die("migration-event: Internal tree error");
1145                 add_sched_out_event(atoms, 'R', timestamp);
1146         }
1147
1148         BUG_ON(list_empty(&atoms->work_list));
1149
1150         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1151         atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1152
1153         nr_timestamps++;
1154
1155         if (atom->sched_out_time > timestamp)
1156                 nr_unordered_timestamps++;
1157 }
1158
1159 static struct trace_sched_handler lat_ops  = {
1160         .wakeup_event           = latency_wakeup_event,
1161         .switch_event           = latency_switch_event,
1162         .runtime_event          = latency_runtime_event,
1163         .fork_event             = latency_fork_event,
1164         .migrate_task_event     = latency_migrate_task_event,
1165 };
1166
1167 static void output_lat_thread(struct work_atoms *work_list)
1168 {
1169         int i;
1170         int ret;
1171         u64 avg;
1172
1173         if (!work_list->nb_atoms)
1174                 return;
1175         /*
1176          * Ignore idle threads:
1177          */
1178         if (!strcmp(work_list->thread->comm, "swapper"))
1179                 return;
1180
1181         all_runtime += work_list->total_runtime;
1182         all_count += work_list->nb_atoms;
1183
1184         ret = printf("  %s:%d ", work_list->thread->comm, work_list->thread->pid);
1185
1186         for (i = 0; i < 24 - ret; i++)
1187                 printf(" ");
1188
1189         avg = work_list->total_lat / work_list->nb_atoms;
1190
1191         printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
1192               (double)work_list->total_runtime / 1e6,
1193                  work_list->nb_atoms, (double)avg / 1e6,
1194                  (double)work_list->max_lat / 1e6,
1195                  (double)work_list->max_lat_at / 1e9);
1196 }
1197
1198 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1199 {
1200         if (l->thread->pid < r->thread->pid)
1201                 return -1;
1202         if (l->thread->pid > r->thread->pid)
1203                 return 1;
1204
1205         return 0;
1206 }
1207
1208 static struct sort_dimension pid_sort_dimension = {
1209         .name                   = "pid",
1210         .cmp                    = pid_cmp,
1211 };
1212
1213 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1214 {
1215         u64 avgl, avgr;
1216
1217         if (!l->nb_atoms)
1218                 return -1;
1219
1220         if (!r->nb_atoms)
1221                 return 1;
1222
1223         avgl = l->total_lat / l->nb_atoms;
1224         avgr = r->total_lat / r->nb_atoms;
1225
1226         if (avgl < avgr)
1227                 return -1;
1228         if (avgl > avgr)
1229                 return 1;
1230
1231         return 0;
1232 }
1233
1234 static struct sort_dimension avg_sort_dimension = {
1235         .name                   = "avg",
1236         .cmp                    = avg_cmp,
1237 };
1238
1239 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1240 {
1241         if (l->max_lat < r->max_lat)
1242                 return -1;
1243         if (l->max_lat > r->max_lat)
1244                 return 1;
1245
1246         return 0;
1247 }
1248
1249 static struct sort_dimension max_sort_dimension = {
1250         .name                   = "max",
1251         .cmp                    = max_cmp,
1252 };
1253
1254 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1255 {
1256         if (l->nb_atoms < r->nb_atoms)
1257                 return -1;
1258         if (l->nb_atoms > r->nb_atoms)
1259                 return 1;
1260
1261         return 0;
1262 }
1263
1264 static struct sort_dimension switch_sort_dimension = {
1265         .name                   = "switch",
1266         .cmp                    = switch_cmp,
1267 };
1268
1269 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1270 {
1271         if (l->total_runtime < r->total_runtime)
1272                 return -1;
1273         if (l->total_runtime > r->total_runtime)
1274                 return 1;
1275
1276         return 0;
1277 }
1278
1279 static struct sort_dimension runtime_sort_dimension = {
1280         .name                   = "runtime",
1281         .cmp                    = runtime_cmp,
1282 };
1283
1284 static struct sort_dimension *available_sorts[] = {
1285         &pid_sort_dimension,
1286         &avg_sort_dimension,
1287         &max_sort_dimension,
1288         &switch_sort_dimension,
1289         &runtime_sort_dimension,
1290 };
1291
1292 #define NB_AVAILABLE_SORTS      (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1293
1294 static LIST_HEAD(sort_list);
1295
1296 static int sort_dimension__add(const char *tok, struct list_head *list)
1297 {
1298         int i;
1299
1300         for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1301                 if (!strcmp(available_sorts[i]->name, tok)) {
1302                         list_add_tail(&available_sorts[i]->list, list);
1303
1304                         return 0;
1305                 }
1306         }
1307
1308         return -1;
1309 }
1310
1311 static void setup_sorting(void);
1312
1313 static void sort_lat(void)
1314 {
1315         struct rb_node *node;
1316
1317         for (;;) {
1318                 struct work_atoms *data;
1319                 node = rb_first(&atom_root);
1320                 if (!node)
1321                         break;
1322
1323                 rb_erase(node, &atom_root);
1324                 data = rb_entry(node, struct work_atoms, node);
1325                 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
1326         }
1327 }
1328
1329 static struct trace_sched_handler *trace_handler;
1330
1331 static void
1332 process_sched_wakeup_event(struct perf_tool *tool __used,
1333                            struct event_format *event,
1334                            struct perf_sample *sample,
1335                            struct machine *machine,
1336                            struct thread *thread __used)
1337 {
1338         void *data = sample->raw_data;
1339         struct trace_wakeup_event wakeup_event;
1340
1341         FILL_COMMON_FIELDS(wakeup_event, event, data);
1342
1343         FILL_ARRAY(wakeup_event, comm, event, data);
1344         FILL_FIELD(wakeup_event, pid, event, data);
1345         FILL_FIELD(wakeup_event, prio, event, data);
1346         FILL_FIELD(wakeup_event, success, event, data);
1347         FILL_FIELD(wakeup_event, cpu, event, data);
1348
1349         if (trace_handler->wakeup_event)
1350                 trace_handler->wakeup_event(&wakeup_event, machine, event, sample);
1351 }
1352
1353 /*
1354  * Track the current task - that way we can know whether there's any
1355  * weird events, such as a task being switched away that is not current.
1356  */
1357 static int max_cpu;
1358
1359 static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1360
1361 static struct thread *curr_thread[MAX_CPUS];
1362
1363 static char next_shortname1 = 'A';
1364 static char next_shortname2 = '0';
1365
1366 static void
1367 map_switch_event(struct trace_switch_event *switch_event,
1368                  struct machine *machine,
1369                  struct event_format *event __used,
1370                  struct perf_sample *sample)
1371 {
1372         struct thread *sched_out __used, *sched_in;
1373         int new_shortname;
1374         u64 timestamp0, timestamp = sample->time;
1375         s64 delta;
1376         int cpu, this_cpu = sample->cpu;
1377
1378         BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1379
1380         if (this_cpu > max_cpu)
1381                 max_cpu = this_cpu;
1382
1383         timestamp0 = cpu_last_switched[this_cpu];
1384         cpu_last_switched[this_cpu] = timestamp;
1385         if (timestamp0)
1386                 delta = timestamp - timestamp0;
1387         else
1388                 delta = 0;
1389
1390         if (delta < 0)
1391                 die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1392
1393
1394         sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
1395         sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1396
1397         curr_thread[this_cpu] = sched_in;
1398
1399         printf("  ");
1400
1401         new_shortname = 0;
1402         if (!sched_in->shortname[0]) {
1403                 sched_in->shortname[0] = next_shortname1;
1404                 sched_in->shortname[1] = next_shortname2;
1405
1406                 if (next_shortname1 < 'Z') {
1407                         next_shortname1++;
1408                 } else {
1409                         next_shortname1='A';
1410                         if (next_shortname2 < '9') {
1411                                 next_shortname2++;
1412                         } else {
1413                                 next_shortname2='0';
1414                         }
1415                 }
1416                 new_shortname = 1;
1417         }
1418
1419         for (cpu = 0; cpu <= max_cpu; cpu++) {
1420                 if (cpu != this_cpu)
1421                         printf(" ");
1422                 else
1423                         printf("*");
1424
1425                 if (curr_thread[cpu]) {
1426                         if (curr_thread[cpu]->pid)
1427                                 printf("%2s ", curr_thread[cpu]->shortname);
1428                         else
1429                                 printf(".  ");
1430                 } else
1431                         printf("   ");
1432         }
1433
1434         printf("  %12.6f secs ", (double)timestamp/1e9);
1435         if (new_shortname) {
1436                 printf("%s => %s:%d\n",
1437                         sched_in->shortname, sched_in->comm, sched_in->pid);
1438         } else {
1439                 printf("\n");
1440         }
1441 }
1442
1443 static void
1444 process_sched_switch_event(struct perf_tool *tool __used,
1445                            struct event_format *event,
1446                            struct perf_sample *sample,
1447                            struct machine *machine,
1448                            struct thread *thread __used)
1449 {
1450         int this_cpu = sample->cpu;
1451         void *data = sample->raw_data;
1452         struct trace_switch_event switch_event;
1453
1454         FILL_COMMON_FIELDS(switch_event, event, data);
1455
1456         FILL_ARRAY(switch_event, prev_comm, event, data);
1457         FILL_FIELD(switch_event, prev_pid, event, data);
1458         FILL_FIELD(switch_event, prev_prio, event, data);
1459         FILL_FIELD(switch_event, prev_state, event, data);
1460         FILL_ARRAY(switch_event, next_comm, event, data);
1461         FILL_FIELD(switch_event, next_pid, event, data);
1462         FILL_FIELD(switch_event, next_prio, event, data);
1463
1464         if (curr_pid[this_cpu] != (u32)-1) {
1465                 /*
1466                  * Are we trying to switch away a PID that is
1467                  * not current?
1468                  */
1469                 if (curr_pid[this_cpu] != switch_event.prev_pid)
1470                         nr_context_switch_bugs++;
1471         }
1472         if (trace_handler->switch_event)
1473                 trace_handler->switch_event(&switch_event, machine, event, sample);
1474
1475         curr_pid[this_cpu] = switch_event.next_pid;
1476 }
1477
1478 static void
1479 process_sched_runtime_event(struct perf_tool *tool __used,
1480                             struct event_format *event,
1481                             struct perf_sample *sample,
1482                             struct machine *machine,
1483                             struct thread *thread __used)
1484 {
1485         void *data = sample->raw_data;
1486         struct trace_runtime_event runtime_event;
1487
1488         FILL_ARRAY(runtime_event, comm, event, data);
1489         FILL_FIELD(runtime_event, pid, event, data);
1490         FILL_FIELD(runtime_event, runtime, event, data);
1491         FILL_FIELD(runtime_event, vruntime, event, data);
1492
1493         if (trace_handler->runtime_event)
1494                 trace_handler->runtime_event(&runtime_event, machine, sample);
1495 }
1496
1497 static void
1498 process_sched_fork_event(struct perf_tool *tool __used,
1499                          struct event_format *event,
1500                          struct perf_sample *sample,
1501                          struct machine *machine __used,
1502                          struct thread *thread __used)
1503 {
1504         void *data = sample->raw_data;
1505         struct trace_fork_event fork_event;
1506
1507         FILL_COMMON_FIELDS(fork_event, event, data);
1508
1509         FILL_ARRAY(fork_event, parent_comm, event, data);
1510         FILL_FIELD(fork_event, parent_pid, event, data);
1511         FILL_ARRAY(fork_event, child_comm, event, data);
1512         FILL_FIELD(fork_event, child_pid, event, data);
1513
1514         if (trace_handler->fork_event)
1515                 trace_handler->fork_event(&fork_event, event);
1516 }
1517
1518 static void
1519 process_sched_exit_event(struct perf_tool *tool __used,
1520                          struct event_format *event,
1521                          struct perf_sample *sample __used,
1522                          struct machine *machine __used,
1523                          struct thread *thread __used)
1524 {
1525         if (verbose)
1526                 printf("sched_exit event %p\n", event);
1527 }
1528
1529 static void
1530 process_sched_migrate_task_event(struct perf_tool *tool __used,
1531                                  struct event_format *event,
1532                                  struct perf_sample *sample,
1533                                  struct machine *machine,
1534                                  struct thread *thread __used)
1535 {
1536         void *data = sample->raw_data;
1537         struct trace_migrate_task_event migrate_task_event;
1538
1539         FILL_COMMON_FIELDS(migrate_task_event, event, data);
1540
1541         FILL_ARRAY(migrate_task_event, comm, event, data);
1542         FILL_FIELD(migrate_task_event, pid, event, data);
1543         FILL_FIELD(migrate_task_event, prio, event, data);
1544         FILL_FIELD(migrate_task_event, cpu, event, data);
1545
1546         if (trace_handler->migrate_task_event)
1547                 trace_handler->migrate_task_event(&migrate_task_event, machine, sample);
1548 }
1549
1550 typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event,
1551                                    struct perf_sample *sample,
1552                                    struct machine *machine,
1553                                    struct thread *thread);
1554
1555 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used,
1556                                                  union perf_event *event __used,
1557                                                  struct perf_sample *sample,
1558                                                  struct perf_evsel *evsel,
1559                                                  struct machine *machine)
1560 {
1561         struct thread *thread = machine__findnew_thread(machine, sample->pid);
1562
1563         if (thread == NULL) {
1564                 pr_debug("problem processing %s event, skipping it.\n",
1565                          perf_evsel__name(evsel));
1566                 return -1;
1567         }
1568
1569         evsel->hists.stats.total_period += sample->period;
1570         hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
1571
1572         if (evsel->handler.func != NULL) {
1573                 tracepoint_handler f = evsel->handler.func;
1574                 f(tool, evsel->tp_format, sample, machine, thread);
1575         }
1576
1577         return 0;
1578 }
1579
1580 static struct perf_tool perf_sched = {
1581         .sample          = perf_sched__process_tracepoint_sample,
1582         .comm            = perf_event__process_comm,
1583         .lost            = perf_event__process_lost,
1584         .fork            = perf_event__process_task,
1585         .ordered_samples = true,
1586 };
1587
1588 static void read_events(bool destroy, struct perf_session **psession)
1589 {
1590         int err = -EINVAL;
1591         const struct perf_evsel_str_handler handlers[] = {
1592                 { "sched:sched_switch",       process_sched_switch_event, },
1593                 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1594                 { "sched:sched_wakeup",       process_sched_wakeup_event, },
1595                 { "sched:sched_wakeup_new",   process_sched_wakeup_event, },
1596                 { "sched:sched_process_fork", process_sched_fork_event, },
1597                 { "sched:sched_process_exit", process_sched_exit_event, },
1598                 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1599         };
1600         struct perf_session *session;
1601
1602         session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_sched);
1603         if (session == NULL)
1604                 die("No Memory");
1605
1606         err = perf_session__set_tracepoints_handlers(session, handlers);
1607         assert(err == 0);
1608
1609         if (perf_session__has_traces(session, "record -R")) {
1610                 err = perf_session__process_events(session, &perf_sched);
1611                 if (err)
1612                         die("Failed to process events, error %d", err);
1613
1614                 nr_events      = session->hists.stats.nr_events[0];
1615                 nr_lost_events = session->hists.stats.total_lost;
1616                 nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
1617         }
1618
1619         if (destroy)
1620                 perf_session__delete(session);
1621
1622         if (psession)
1623                 *psession = session;
1624 }
1625
1626 static void print_bad_events(void)
1627 {
1628         if (nr_unordered_timestamps && nr_timestamps) {
1629                 printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1630                         (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1631                         nr_unordered_timestamps, nr_timestamps);
1632         }
1633         if (nr_lost_events && nr_events) {
1634                 printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1635                         (double)nr_lost_events/(double)nr_events*100.0,
1636                         nr_lost_events, nr_events, nr_lost_chunks);
1637         }
1638         if (nr_state_machine_bugs && nr_timestamps) {
1639                 printf("  INFO: %.3f%% state machine bugs (%ld out of %ld)",
1640                         (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1641                         nr_state_machine_bugs, nr_timestamps);
1642                 if (nr_lost_events)
1643                         printf(" (due to lost events?)");
1644                 printf("\n");
1645         }
1646         if (nr_context_switch_bugs && nr_timestamps) {
1647                 printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
1648                         (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1649                         nr_context_switch_bugs, nr_timestamps);
1650                 if (nr_lost_events)
1651                         printf(" (due to lost events?)");
1652                 printf("\n");
1653         }
1654 }
1655
1656 static void __cmd_lat(void)
1657 {
1658         struct rb_node *next;
1659         struct perf_session *session;
1660
1661         setup_pager();
1662         read_events(false, &session);
1663         sort_lat();
1664
1665         printf("\n ---------------------------------------------------------------------------------------------------------------\n");
1666         printf("  Task                  |   Runtime ms  | Switches | Average delay ms | Maximum delay ms | Maximum delay at     |\n");
1667         printf(" ---------------------------------------------------------------------------------------------------------------\n");
1668
1669         next = rb_first(&sorted_atom_root);
1670
1671         while (next) {
1672                 struct work_atoms *work_list;
1673
1674                 work_list = rb_entry(next, struct work_atoms, node);
1675                 output_lat_thread(work_list);
1676                 next = rb_next(next);
1677         }
1678
1679         printf(" -----------------------------------------------------------------------------------------\n");
1680         printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
1681                 (double)all_runtime/1e6, all_count);
1682
1683         printf(" ---------------------------------------------------\n");
1684
1685         print_bad_events();
1686         printf("\n");
1687
1688         perf_session__delete(session);
1689 }
1690
1691 static struct trace_sched_handler map_ops  = {
1692         .wakeup_event           = NULL,
1693         .switch_event           = map_switch_event,
1694         .runtime_event          = NULL,
1695         .fork_event             = NULL,
1696 };
1697
1698 static void __cmd_map(void)
1699 {
1700         max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1701
1702         setup_pager();
1703         read_events(true, NULL);
1704         print_bad_events();
1705 }
1706
1707 static void __cmd_replay(void)
1708 {
1709         unsigned long i;
1710
1711         calibrate_run_measurement_overhead();
1712         calibrate_sleep_measurement_overhead();
1713
1714         test_calibrations();
1715
1716         read_events(true, NULL);
1717
1718         printf("nr_run_events:        %ld\n", nr_run_events);
1719         printf("nr_sleep_events:      %ld\n", nr_sleep_events);
1720         printf("nr_wakeup_events:     %ld\n", nr_wakeup_events);
1721
1722         if (targetless_wakeups)
1723                 printf("target-less wakeups:  %ld\n", targetless_wakeups);
1724         if (multitarget_wakeups)
1725                 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1726         if (nr_run_events_optimized)
1727                 printf("run atoms optimized: %ld\n",
1728                         nr_run_events_optimized);
1729
1730         print_task_traces();
1731         add_cross_task_wakeups();
1732
1733         create_tasks();
1734         printf("------------------------------------------------------------\n");
1735         for (i = 0; i < replay_repeat; i++)
1736                 run_one_test();
1737 }
1738
1739
1740 static const char * const sched_usage[] = {
1741         "perf sched [<options>] {record|latency|map|replay|script}",
1742         NULL
1743 };
1744
1745 static const struct option sched_options[] = {
1746         OPT_STRING('i', "input", &input_name, "file",
1747                     "input file name"),
1748         OPT_INCR('v', "verbose", &verbose,
1749                     "be more verbose (show symbol address, etc)"),
1750         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1751                     "dump raw trace in ASCII"),
1752         OPT_END()
1753 };
1754
1755 static const char * const latency_usage[] = {
1756         "perf sched latency [<options>]",
1757         NULL
1758 };
1759
1760 static const struct option latency_options[] = {
1761         OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1762                    "sort by key(s): runtime, switch, avg, max"),
1763         OPT_INCR('v', "verbose", &verbose,
1764                     "be more verbose (show symbol address, etc)"),
1765         OPT_INTEGER('C', "CPU", &profile_cpu,
1766                     "CPU to profile on"),
1767         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1768                     "dump raw trace in ASCII"),
1769         OPT_END()
1770 };
1771
1772 static const char * const replay_usage[] = {
1773         "perf sched replay [<options>]",
1774         NULL
1775 };
1776
1777 static const struct option replay_options[] = {
1778         OPT_UINTEGER('r', "repeat", &replay_repeat,
1779                      "repeat the workload replay N times (-1: infinite)"),
1780         OPT_INCR('v', "verbose", &verbose,
1781                     "be more verbose (show symbol address, etc)"),
1782         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1783                     "dump raw trace in ASCII"),
1784         OPT_END()
1785 };
1786
1787 static void setup_sorting(void)
1788 {
1789         char *tmp, *tok, *str = strdup(sort_order);
1790
1791         for (tok = strtok_r(str, ", ", &tmp);
1792                         tok; tok = strtok_r(NULL, ", ", &tmp)) {
1793                 if (sort_dimension__add(tok, &sort_list) < 0) {
1794                         error("Unknown --sort key: `%s'", tok);
1795                         usage_with_options(latency_usage, latency_options);
1796                 }
1797         }
1798
1799         free(str);
1800
1801         sort_dimension__add("pid", &cmp_pid);
1802 }
1803
1804 static const char *record_args[] = {
1805         "record",
1806         "-a",
1807         "-R",
1808         "-f",
1809         "-m", "1024",
1810         "-c", "1",
1811         "-e", "sched:sched_switch",
1812         "-e", "sched:sched_stat_wait",
1813         "-e", "sched:sched_stat_sleep",
1814         "-e", "sched:sched_stat_iowait",
1815         "-e", "sched:sched_stat_runtime",
1816         "-e", "sched:sched_process_exit",
1817         "-e", "sched:sched_process_fork",
1818         "-e", "sched:sched_wakeup",
1819         "-e", "sched:sched_migrate_task",
1820 };
1821
1822 static int __cmd_record(int argc, const char **argv)
1823 {
1824         unsigned int rec_argc, i, j;
1825         const char **rec_argv;
1826
1827         rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1828         rec_argv = calloc(rec_argc + 1, sizeof(char *));
1829
1830         if (rec_argv == NULL)
1831                 return -ENOMEM;
1832
1833         for (i = 0; i < ARRAY_SIZE(record_args); i++)
1834                 rec_argv[i] = strdup(record_args[i]);
1835
1836         for (j = 1; j < (unsigned int)argc; j++, i++)
1837                 rec_argv[i] = argv[j];
1838
1839         BUG_ON(i != rec_argc);
1840
1841         return cmd_record(i, rec_argv, NULL);
1842 }
1843
1844 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1845 {
1846         argc = parse_options(argc, argv, sched_options, sched_usage,
1847                              PARSE_OPT_STOP_AT_NON_OPTION);
1848         if (!argc)
1849                 usage_with_options(sched_usage, sched_options);
1850
1851         /*
1852          * Aliased to 'perf script' for now:
1853          */
1854         if (!strcmp(argv[0], "script"))
1855                 return cmd_script(argc, argv, prefix);
1856
1857         symbol__init();
1858         if (!strncmp(argv[0], "rec", 3)) {
1859                 return __cmd_record(argc, argv);
1860         } else if (!strncmp(argv[0], "lat", 3)) {
1861                 trace_handler = &lat_ops;
1862                 if (argc > 1) {
1863                         argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1864                         if (argc)
1865                                 usage_with_options(latency_usage, latency_options);
1866                 }
1867                 setup_sorting();
1868                 __cmd_lat();
1869         } else if (!strcmp(argv[0], "map")) {
1870                 trace_handler = &map_ops;
1871                 setup_sorting();
1872                 __cmd_map();
1873         } else if (!strncmp(argv[0], "rep", 3)) {
1874                 trace_handler = &replay_ops;
1875                 if (argc) {
1876                         argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1877                         if (argc)
1878                                 usage_with_options(replay_usage, replay_options);
1879                 }
1880                 __cmd_replay();
1881         } else {
1882                 usage_with_options(sched_usage, sched_options);
1883         }
1884
1885         return 0;
1886 }