perf sched: Remove die() calls
[firefly-linux-kernel-4.4.55.git] / tools / perf / builtin-sched.c
1 #include "builtin.h"
2 #include "perf.h"
3
4 #include "util/util.h"
5 #include "util/evlist.h"
6 #include "util/cache.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13
14 #include "util/parse-options.h"
15 #include "util/trace-event.h"
16
17 #include "util/debug.h"
18
19 #include <sys/prctl.h>
20 #include <sys/resource.h>
21
22 #include <semaphore.h>
23 #include <pthread.h>
24 #include <math.h>
25
26 static const char               *input_name;
27
28 static char                     default_sort_order[] = "avg, max, switch, runtime";
29 static const char               *sort_order = default_sort_order;
30
31 static int                      profile_cpu = -1;
32
33 #define PR_SET_NAME             15               /* Set process name */
34 #define MAX_CPUS                4096
35
36 static u64                      run_measurement_overhead;
37 static u64                      sleep_measurement_overhead;
38
39 #define COMM_LEN                20
40 #define SYM_LEN                 129
41
42 #define MAX_PID                 65536
43
44 static unsigned long            nr_tasks;
45
46 struct sched_atom;
47
48 struct task_desc {
49         unsigned long           nr;
50         unsigned long           pid;
51         char                    comm[COMM_LEN];
52
53         unsigned long           nr_events;
54         unsigned long           curr_event;
55         struct sched_atom       **atoms;
56
57         pthread_t               thread;
58         sem_t                   sleep_sem;
59
60         sem_t                   ready_for_work;
61         sem_t                   work_done_sem;
62
63         u64                     cpu_usage;
64 };
65
66 enum sched_event_type {
67         SCHED_EVENT_RUN,
68         SCHED_EVENT_SLEEP,
69         SCHED_EVENT_WAKEUP,
70         SCHED_EVENT_MIGRATION,
71 };
72
73 struct sched_atom {
74         enum sched_event_type   type;
75         int                     specific_wait;
76         u64                     timestamp;
77         u64                     duration;
78         unsigned long           nr;
79         sem_t                   *wait_sem;
80         struct task_desc        *wakee;
81 };
82
83 static struct task_desc         *pid_to_task[MAX_PID];
84
85 static struct task_desc         **tasks;
86
87 static pthread_mutex_t          start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
88 static u64                      start_time;
89
90 static pthread_mutex_t          work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
91
92 static unsigned long            nr_run_events;
93 static unsigned long            nr_sleep_events;
94 static unsigned long            nr_wakeup_events;
95
96 static unsigned long            nr_sleep_corrections;
97 static unsigned long            nr_run_events_optimized;
98
99 static unsigned long            targetless_wakeups;
100 static unsigned long            multitarget_wakeups;
101
102 static u64                      cpu_usage;
103 static u64                      runavg_cpu_usage;
104 static u64                      parent_cpu_usage;
105 static u64                      runavg_parent_cpu_usage;
106
107 static unsigned long            nr_runs;
108 static u64                      sum_runtime;
109 static u64                      sum_fluct;
110 static u64                      run_avg;
111
112 static unsigned int             replay_repeat = 10;
113 static unsigned long            nr_timestamps;
114 static unsigned long            nr_unordered_timestamps;
115 static unsigned long            nr_state_machine_bugs;
116 static unsigned long            nr_context_switch_bugs;
117 static unsigned long            nr_events;
118 static unsigned long            nr_lost_chunks;
119 static unsigned long            nr_lost_events;
120
121 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
122
123 enum thread_state {
124         THREAD_SLEEPING = 0,
125         THREAD_WAIT_CPU,
126         THREAD_SCHED_IN,
127         THREAD_IGNORE
128 };
129
130 struct work_atom {
131         struct list_head        list;
132         enum thread_state       state;
133         u64                     sched_out_time;
134         u64                     wake_up_time;
135         u64                     sched_in_time;
136         u64                     runtime;
137 };
138
139 struct work_atoms {
140         struct list_head        work_list;
141         struct thread           *thread;
142         struct rb_node          node;
143         u64                     max_lat;
144         u64                     max_lat_at;
145         u64                     total_lat;
146         u64                     nb_atoms;
147         u64                     total_runtime;
148 };
149
150 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
151
152 static struct rb_root           atom_root, sorted_atom_root;
153
154 static u64                      all_runtime;
155 static u64                      all_count;
156
157
158 static u64 get_nsecs(void)
159 {
160         struct timespec ts;
161
162         clock_gettime(CLOCK_MONOTONIC, &ts);
163
164         return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
165 }
166
167 static void burn_nsecs(u64 nsecs)
168 {
169         u64 T0 = get_nsecs(), T1;
170
171         do {
172                 T1 = get_nsecs();
173         } while (T1 + run_measurement_overhead < T0 + nsecs);
174 }
175
176 static void sleep_nsecs(u64 nsecs)
177 {
178         struct timespec ts;
179
180         ts.tv_nsec = nsecs % 999999999;
181         ts.tv_sec = nsecs / 999999999;
182
183         nanosleep(&ts, NULL);
184 }
185
186 static void calibrate_run_measurement_overhead(void)
187 {
188         u64 T0, T1, delta, min_delta = 1000000000ULL;
189         int i;
190
191         for (i = 0; i < 10; i++) {
192                 T0 = get_nsecs();
193                 burn_nsecs(0);
194                 T1 = get_nsecs();
195                 delta = T1-T0;
196                 min_delta = min(min_delta, delta);
197         }
198         run_measurement_overhead = min_delta;
199
200         printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
201 }
202
203 static void calibrate_sleep_measurement_overhead(void)
204 {
205         u64 T0, T1, delta, min_delta = 1000000000ULL;
206         int i;
207
208         for (i = 0; i < 10; i++) {
209                 T0 = get_nsecs();
210                 sleep_nsecs(10000);
211                 T1 = get_nsecs();
212                 delta = T1-T0;
213                 min_delta = min(min_delta, delta);
214         }
215         min_delta -= 10000;
216         sleep_measurement_overhead = min_delta;
217
218         printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
219 }
220
221 static struct sched_atom *
222 get_new_event(struct task_desc *task, u64 timestamp)
223 {
224         struct sched_atom *event = zalloc(sizeof(*event));
225         unsigned long idx = task->nr_events;
226         size_t size;
227
228         event->timestamp = timestamp;
229         event->nr = idx;
230
231         task->nr_events++;
232         size = sizeof(struct sched_atom *) * task->nr_events;
233         task->atoms = realloc(task->atoms, size);
234         BUG_ON(!task->atoms);
235
236         task->atoms[idx] = event;
237
238         return event;
239 }
240
241 static struct sched_atom *last_event(struct task_desc *task)
242 {
243         if (!task->nr_events)
244                 return NULL;
245
246         return task->atoms[task->nr_events - 1];
247 }
248
249 static void
250 add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
251 {
252         struct sched_atom *event, *curr_event = last_event(task);
253
254         /*
255          * optimize an existing RUN event by merging this one
256          * to it:
257          */
258         if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
259                 nr_run_events_optimized++;
260                 curr_event->duration += duration;
261                 return;
262         }
263
264         event = get_new_event(task, timestamp);
265
266         event->type = SCHED_EVENT_RUN;
267         event->duration = duration;
268
269         nr_run_events++;
270 }
271
272 static void
273 add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
274                        struct task_desc *wakee)
275 {
276         struct sched_atom *event, *wakee_event;
277
278         event = get_new_event(task, timestamp);
279         event->type = SCHED_EVENT_WAKEUP;
280         event->wakee = wakee;
281
282         wakee_event = last_event(wakee);
283         if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
284                 targetless_wakeups++;
285                 return;
286         }
287         if (wakee_event->wait_sem) {
288                 multitarget_wakeups++;
289                 return;
290         }
291
292         wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
293         sem_init(wakee_event->wait_sem, 0, 0);
294         wakee_event->specific_wait = 1;
295         event->wait_sem = wakee_event->wait_sem;
296
297         nr_wakeup_events++;
298 }
299
300 static void
301 add_sched_event_sleep(struct task_desc *task, u64 timestamp,
302                       u64 task_state __used)
303 {
304         struct sched_atom *event = get_new_event(task, timestamp);
305
306         event->type = SCHED_EVENT_SLEEP;
307
308         nr_sleep_events++;
309 }
310
311 static struct task_desc *register_pid(unsigned long pid, const char *comm)
312 {
313         struct task_desc *task;
314
315         BUG_ON(pid >= MAX_PID);
316
317         task = pid_to_task[pid];
318
319         if (task)
320                 return task;
321
322         task = zalloc(sizeof(*task));
323         task->pid = pid;
324         task->nr = nr_tasks;
325         strcpy(task->comm, comm);
326         /*
327          * every task starts in sleeping state - this gets ignored
328          * if there's no wakeup pointing to this sleep state:
329          */
330         add_sched_event_sleep(task, 0, 0);
331
332         pid_to_task[pid] = task;
333         nr_tasks++;
334         tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
335         BUG_ON(!tasks);
336         tasks[task->nr] = task;
337
338         if (verbose)
339                 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
340
341         return task;
342 }
343
344
345 static void print_task_traces(void)
346 {
347         struct task_desc *task;
348         unsigned long i;
349
350         for (i = 0; i < nr_tasks; i++) {
351                 task = tasks[i];
352                 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
353                         task->nr, task->comm, task->pid, task->nr_events);
354         }
355 }
356
357 static void add_cross_task_wakeups(void)
358 {
359         struct task_desc *task1, *task2;
360         unsigned long i, j;
361
362         for (i = 0; i < nr_tasks; i++) {
363                 task1 = tasks[i];
364                 j = i + 1;
365                 if (j == nr_tasks)
366                         j = 0;
367                 task2 = tasks[j];
368                 add_sched_event_wakeup(task1, 0, task2);
369         }
370 }
371
372 static void
373 process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
374 {
375         int ret = 0;
376
377         switch (atom->type) {
378                 case SCHED_EVENT_RUN:
379                         burn_nsecs(atom->duration);
380                         break;
381                 case SCHED_EVENT_SLEEP:
382                         if (atom->wait_sem)
383                                 ret = sem_wait(atom->wait_sem);
384                         BUG_ON(ret);
385                         break;
386                 case SCHED_EVENT_WAKEUP:
387                         if (atom->wait_sem)
388                                 ret = sem_post(atom->wait_sem);
389                         BUG_ON(ret);
390                         break;
391                 case SCHED_EVENT_MIGRATION:
392                         break;
393                 default:
394                         BUG_ON(1);
395         }
396 }
397
398 static u64 get_cpu_usage_nsec_parent(void)
399 {
400         struct rusage ru;
401         u64 sum;
402         int err;
403
404         err = getrusage(RUSAGE_SELF, &ru);
405         BUG_ON(err);
406
407         sum =  ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
408         sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
409
410         return sum;
411 }
412
413 static int self_open_counters(void)
414 {
415         struct perf_event_attr attr;
416         int fd;
417
418         memset(&attr, 0, sizeof(attr));
419
420         attr.type = PERF_TYPE_SOFTWARE;
421         attr.config = PERF_COUNT_SW_TASK_CLOCK;
422
423         fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
424
425         if (fd < 0)
426                 pr_debug("Error: sys_perf_event_open() syscall returned"
427                          "with %d (%s)\n", fd, strerror(errno));
428         return fd;
429 }
430
431 static u64 get_cpu_usage_nsec_self(int fd)
432 {
433         u64 runtime;
434         int ret;
435
436         ret = read(fd, &runtime, sizeof(runtime));
437         BUG_ON(ret != sizeof(runtime));
438
439         return runtime;
440 }
441
442 static void *thread_func(void *ctx)
443 {
444         struct task_desc *this_task = ctx;
445         u64 cpu_usage_0, cpu_usage_1;
446         unsigned long i, ret;
447         char comm2[22];
448         int fd;
449
450         sprintf(comm2, ":%s", this_task->comm);
451         prctl(PR_SET_NAME, comm2);
452         fd = self_open_counters();
453         if (fd < 0)
454                 return NULL;
455 again:
456         ret = sem_post(&this_task->ready_for_work);
457         BUG_ON(ret);
458         ret = pthread_mutex_lock(&start_work_mutex);
459         BUG_ON(ret);
460         ret = pthread_mutex_unlock(&start_work_mutex);
461         BUG_ON(ret);
462
463         cpu_usage_0 = get_cpu_usage_nsec_self(fd);
464
465         for (i = 0; i < this_task->nr_events; i++) {
466                 this_task->curr_event = i;
467                 process_sched_event(this_task, this_task->atoms[i]);
468         }
469
470         cpu_usage_1 = get_cpu_usage_nsec_self(fd);
471         this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
472         ret = sem_post(&this_task->work_done_sem);
473         BUG_ON(ret);
474
475         ret = pthread_mutex_lock(&work_done_wait_mutex);
476         BUG_ON(ret);
477         ret = pthread_mutex_unlock(&work_done_wait_mutex);
478         BUG_ON(ret);
479
480         goto again;
481 }
482
483 static void create_tasks(void)
484 {
485         struct task_desc *task;
486         pthread_attr_t attr;
487         unsigned long i;
488         int err;
489
490         err = pthread_attr_init(&attr);
491         BUG_ON(err);
492         err = pthread_attr_setstacksize(&attr,
493                         (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
494         BUG_ON(err);
495         err = pthread_mutex_lock(&start_work_mutex);
496         BUG_ON(err);
497         err = pthread_mutex_lock(&work_done_wait_mutex);
498         BUG_ON(err);
499         for (i = 0; i < nr_tasks; i++) {
500                 task = tasks[i];
501                 sem_init(&task->sleep_sem, 0, 0);
502                 sem_init(&task->ready_for_work, 0, 0);
503                 sem_init(&task->work_done_sem, 0, 0);
504                 task->curr_event = 0;
505                 err = pthread_create(&task->thread, &attr, thread_func, task);
506                 BUG_ON(err);
507         }
508 }
509
510 static void wait_for_tasks(void)
511 {
512         u64 cpu_usage_0, cpu_usage_1;
513         struct task_desc *task;
514         unsigned long i, ret;
515
516         start_time = get_nsecs();
517         cpu_usage = 0;
518         pthread_mutex_unlock(&work_done_wait_mutex);
519
520         for (i = 0; i < nr_tasks; i++) {
521                 task = tasks[i];
522                 ret = sem_wait(&task->ready_for_work);
523                 BUG_ON(ret);
524                 sem_init(&task->ready_for_work, 0, 0);
525         }
526         ret = pthread_mutex_lock(&work_done_wait_mutex);
527         BUG_ON(ret);
528
529         cpu_usage_0 = get_cpu_usage_nsec_parent();
530
531         pthread_mutex_unlock(&start_work_mutex);
532
533         for (i = 0; i < nr_tasks; i++) {
534                 task = tasks[i];
535                 ret = sem_wait(&task->work_done_sem);
536                 BUG_ON(ret);
537                 sem_init(&task->work_done_sem, 0, 0);
538                 cpu_usage += task->cpu_usage;
539                 task->cpu_usage = 0;
540         }
541
542         cpu_usage_1 = get_cpu_usage_nsec_parent();
543         if (!runavg_cpu_usage)
544                 runavg_cpu_usage = cpu_usage;
545         runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
546
547         parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
548         if (!runavg_parent_cpu_usage)
549                 runavg_parent_cpu_usage = parent_cpu_usage;
550         runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
551                                    parent_cpu_usage)/10;
552
553         ret = pthread_mutex_lock(&start_work_mutex);
554         BUG_ON(ret);
555
556         for (i = 0; i < nr_tasks; i++) {
557                 task = tasks[i];
558                 sem_init(&task->sleep_sem, 0, 0);
559                 task->curr_event = 0;
560         }
561 }
562
563 static void run_one_test(void)
564 {
565         u64 T0, T1, delta, avg_delta, fluct;
566
567         T0 = get_nsecs();
568         wait_for_tasks();
569         T1 = get_nsecs();
570
571         delta = T1 - T0;
572         sum_runtime += delta;
573         nr_runs++;
574
575         avg_delta = sum_runtime / nr_runs;
576         if (delta < avg_delta)
577                 fluct = avg_delta - delta;
578         else
579                 fluct = delta - avg_delta;
580         sum_fluct += fluct;
581         if (!run_avg)
582                 run_avg = delta;
583         run_avg = (run_avg*9 + delta)/10;
584
585         printf("#%-3ld: %0.3f, ",
586                 nr_runs, (double)delta/1000000.0);
587
588         printf("ravg: %0.2f, ",
589                 (double)run_avg/1e6);
590
591         printf("cpu: %0.2f / %0.2f",
592                 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
593
594 #if 0
595         /*
596          * rusage statistics done by the parent, these are less
597          * accurate than the sum_exec_runtime based statistics:
598          */
599         printf(" [%0.2f / %0.2f]",
600                 (double)parent_cpu_usage/1e6,
601                 (double)runavg_parent_cpu_usage/1e6);
602 #endif
603
604         printf("\n");
605
606         if (nr_sleep_corrections)
607                 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
608         nr_sleep_corrections = 0;
609 }
610
611 static void test_calibrations(void)
612 {
613         u64 T0, T1;
614
615         T0 = get_nsecs();
616         burn_nsecs(1e6);
617         T1 = get_nsecs();
618
619         printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
620
621         T0 = get_nsecs();
622         sleep_nsecs(1e6);
623         T1 = get_nsecs();
624
625         printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
626 }
627
628 #define FILL_FIELD(ptr, field, event, data)     \
629         ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
630
631 #define FILL_ARRAY(ptr, array, event, data)                     \
632 do {                                                            \
633         void *__array = raw_field_ptr(event, #array, data);     \
634         memcpy(ptr.array, __array, sizeof(ptr.array));  \
635 } while(0)
636
637 #define FILL_COMMON_FIELDS(ptr, event, data)                    \
638 do {                                                            \
639         FILL_FIELD(ptr, common_type, event, data);              \
640         FILL_FIELD(ptr, common_flags, event, data);             \
641         FILL_FIELD(ptr, common_preempt_count, event, data);     \
642         FILL_FIELD(ptr, common_pid, event, data);               \
643         FILL_FIELD(ptr, common_tgid, event, data);              \
644 } while (0)
645
646
647
648 struct trace_switch_event {
649         u32 size;
650
651         u16 common_type;
652         u8 common_flags;
653         u8 common_preempt_count;
654         u32 common_pid;
655         u32 common_tgid;
656
657         char prev_comm[16];
658         u32 prev_pid;
659         u32 prev_prio;
660         u64 prev_state;
661         char next_comm[16];
662         u32 next_pid;
663         u32 next_prio;
664 };
665
666 struct trace_runtime_event {
667         u32 size;
668
669         u16 common_type;
670         u8 common_flags;
671         u8 common_preempt_count;
672         u32 common_pid;
673         u32 common_tgid;
674
675         char comm[16];
676         u32 pid;
677         u64 runtime;
678         u64 vruntime;
679 };
680
681 struct trace_wakeup_event {
682         u32 size;
683
684         u16 common_type;
685         u8 common_flags;
686         u8 common_preempt_count;
687         u32 common_pid;
688         u32 common_tgid;
689
690         char comm[16];
691         u32 pid;
692
693         u32 prio;
694         u32 success;
695         u32 cpu;
696 };
697
698 struct trace_fork_event {
699         u32 size;
700
701         u16 common_type;
702         u8 common_flags;
703         u8 common_preempt_count;
704         u32 common_pid;
705         u32 common_tgid;
706
707         char parent_comm[16];
708         u32 parent_pid;
709         char child_comm[16];
710         u32 child_pid;
711 };
712
713 struct trace_migrate_task_event {
714         u32 size;
715
716         u16 common_type;
717         u8 common_flags;
718         u8 common_preempt_count;
719         u32 common_pid;
720         u32 common_tgid;
721
722         char comm[16];
723         u32 pid;
724
725         u32 prio;
726         u32 cpu;
727 };
728
729 struct trace_sched_handler {
730         int (*switch_event)(struct trace_switch_event *event,
731                             struct machine *machine,
732                             struct event_format *tp_format,
733                             struct perf_sample *sample);
734
735         int (*runtime_event)(struct trace_runtime_event *event,
736                              struct machine *machine,
737                              struct perf_sample *sample);
738
739         int (*wakeup_event)(struct trace_wakeup_event *event,
740                             struct machine *machine,
741                             struct event_format *tp_format,
742                             struct perf_sample *sample);
743
744         int (*fork_event)(struct trace_fork_event *event,
745                           struct event_format *tp_format);
746
747         int (*migrate_task_event)(struct trace_migrate_task_event *event,
748                                   struct machine *machine,
749                                   struct perf_sample *sample);
750 };
751
752
753 static int
754 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
755                     struct machine *machine __used,
756                     struct event_format *event, struct perf_sample *sample)
757 {
758         struct task_desc *waker, *wakee;
759
760         if (verbose) {
761                 printf("sched_wakeup event %p\n", event);
762
763                 printf(" ... pid %d woke up %s/%d\n",
764                         wakeup_event->common_pid,
765                         wakeup_event->comm,
766                         wakeup_event->pid);
767         }
768
769         waker = register_pid(wakeup_event->common_pid, "<unknown>");
770         wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
771
772         add_sched_event_wakeup(waker, sample->time, wakee);
773         return 0;
774 }
775
776 static u64 cpu_last_switched[MAX_CPUS];
777
778 static int
779 replay_switch_event(struct trace_switch_event *switch_event,
780                     struct machine *machine __used,
781                     struct event_format *event,
782                     struct perf_sample *sample)
783 {
784         struct task_desc *prev, __used *next;
785         u64 timestamp0, timestamp = sample->time;
786         int cpu = sample->cpu;
787         s64 delta;
788
789         if (verbose)
790                 printf("sched_switch event %p\n", event);
791
792         if (cpu >= MAX_CPUS || cpu < 0)
793                 return 0;
794
795         timestamp0 = cpu_last_switched[cpu];
796         if (timestamp0)
797                 delta = timestamp - timestamp0;
798         else
799                 delta = 0;
800
801         if (delta < 0) {
802                 pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta);
803                 return -1;
804         }
805
806         if (verbose) {
807                 printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
808                         switch_event->prev_comm, switch_event->prev_pid,
809                         switch_event->next_comm, switch_event->next_pid,
810                         delta);
811         }
812
813         prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
814         next = register_pid(switch_event->next_pid, switch_event->next_comm);
815
816         cpu_last_switched[cpu] = timestamp;
817
818         add_sched_event_run(prev, timestamp, delta);
819         add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
820
821         return 0;
822 }
823
824
825 static int
826 replay_fork_event(struct trace_fork_event *fork_event,
827                   struct event_format *event)
828 {
829         if (verbose) {
830                 printf("sched_fork event %p\n", event);
831                 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
832                 printf("...  child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
833         }
834         register_pid(fork_event->parent_pid, fork_event->parent_comm);
835         register_pid(fork_event->child_pid, fork_event->child_comm);
836         return 0;
837 }
838
839 static struct trace_sched_handler replay_ops  = {
840         .wakeup_event           = replay_wakeup_event,
841         .switch_event           = replay_switch_event,
842         .fork_event             = replay_fork_event,
843 };
844
845 struct sort_dimension {
846         const char              *name;
847         sort_fn_t               cmp;
848         struct list_head        list;
849 };
850
851 static LIST_HEAD(cmp_pid);
852
853 static int
854 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
855 {
856         struct sort_dimension *sort;
857         int ret = 0;
858
859         BUG_ON(list_empty(list));
860
861         list_for_each_entry(sort, list, list) {
862                 ret = sort->cmp(l, r);
863                 if (ret)
864                         return ret;
865         }
866
867         return ret;
868 }
869
870 static struct work_atoms *
871 thread_atoms_search(struct rb_root *root, struct thread *thread,
872                          struct list_head *sort_list)
873 {
874         struct rb_node *node = root->rb_node;
875         struct work_atoms key = { .thread = thread };
876
877         while (node) {
878                 struct work_atoms *atoms;
879                 int cmp;
880
881                 atoms = container_of(node, struct work_atoms, node);
882
883                 cmp = thread_lat_cmp(sort_list, &key, atoms);
884                 if (cmp > 0)
885                         node = node->rb_left;
886                 else if (cmp < 0)
887                         node = node->rb_right;
888                 else {
889                         BUG_ON(thread != atoms->thread);
890                         return atoms;
891                 }
892         }
893         return NULL;
894 }
895
896 static void
897 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
898                          struct list_head *sort_list)
899 {
900         struct rb_node **new = &(root->rb_node), *parent = NULL;
901
902         while (*new) {
903                 struct work_atoms *this;
904                 int cmp;
905
906                 this = container_of(*new, struct work_atoms, node);
907                 parent = *new;
908
909                 cmp = thread_lat_cmp(sort_list, data, this);
910
911                 if (cmp > 0)
912                         new = &((*new)->rb_left);
913                 else
914                         new = &((*new)->rb_right);
915         }
916
917         rb_link_node(&data->node, parent, new);
918         rb_insert_color(&data->node, root);
919 }
920
921 static int thread_atoms_insert(struct thread *thread)
922 {
923         struct work_atoms *atoms = zalloc(sizeof(*atoms));
924         if (!atoms) {
925                 pr_err("No memory at %s\n", __func__);
926                 return -1;
927         }
928
929         atoms->thread = thread;
930         INIT_LIST_HEAD(&atoms->work_list);
931         __thread_latency_insert(&atom_root, atoms, &cmp_pid);
932         return 0;
933 }
934
935 static int
936 latency_fork_event(struct trace_fork_event *fork_event __used,
937                    struct event_format *event __used)
938 {
939         /* should insert the newcomer */
940         return 0;
941 }
942
943 __used
944 static char sched_out_state(struct trace_switch_event *switch_event)
945 {
946         const char *str = TASK_STATE_TO_CHAR_STR;
947
948         return str[switch_event->prev_state];
949 }
950
951 static int
952 add_sched_out_event(struct work_atoms *atoms,
953                     char run_state,
954                     u64 timestamp)
955 {
956         struct work_atom *atom = zalloc(sizeof(*atom));
957         if (!atom) {
958                 pr_err("Non memory at %s", __func__);
959                 return -1;
960         }
961
962         atom->sched_out_time = timestamp;
963
964         if (run_state == 'R') {
965                 atom->state = THREAD_WAIT_CPU;
966                 atom->wake_up_time = atom->sched_out_time;
967         }
968
969         list_add_tail(&atom->list, &atoms->work_list);
970         return 0;
971 }
972
973 static void
974 add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
975 {
976         struct work_atom *atom;
977
978         BUG_ON(list_empty(&atoms->work_list));
979
980         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
981
982         atom->runtime += delta;
983         atoms->total_runtime += delta;
984 }
985
986 static void
987 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
988 {
989         struct work_atom *atom;
990         u64 delta;
991
992         if (list_empty(&atoms->work_list))
993                 return;
994
995         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
996
997         if (atom->state != THREAD_WAIT_CPU)
998                 return;
999
1000         if (timestamp < atom->wake_up_time) {
1001                 atom->state = THREAD_IGNORE;
1002                 return;
1003         }
1004
1005         atom->state = THREAD_SCHED_IN;
1006         atom->sched_in_time = timestamp;
1007
1008         delta = atom->sched_in_time - atom->wake_up_time;
1009         atoms->total_lat += delta;
1010         if (delta > atoms->max_lat) {
1011                 atoms->max_lat = delta;
1012                 atoms->max_lat_at = timestamp;
1013         }
1014         atoms->nb_atoms++;
1015 }
1016
1017 static int
1018 latency_switch_event(struct trace_switch_event *switch_event,
1019                      struct machine *machine,
1020                      struct event_format *event __used,
1021                      struct perf_sample *sample)
1022 {
1023         struct work_atoms *out_events, *in_events;
1024         struct thread *sched_out, *sched_in;
1025         u64 timestamp0, timestamp = sample->time;
1026         int cpu = sample->cpu;
1027         s64 delta;
1028
1029         BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1030
1031         timestamp0 = cpu_last_switched[cpu];
1032         cpu_last_switched[cpu] = timestamp;
1033         if (timestamp0)
1034                 delta = timestamp - timestamp0;
1035         else
1036                 delta = 0;
1037
1038         if (delta < 0) {
1039                 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1040                 return -1;
1041         }
1042
1043         sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
1044         sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1045
1046         out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1047         if (!out_events) {
1048                 if (thread_atoms_insert(sched_out))
1049                         return -1;
1050                 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1051                 if (!out_events) {
1052                         pr_err("out-event: Internal tree error");
1053                         return -1;
1054                 }
1055         }
1056         if (add_sched_out_event(out_events, sched_out_state(switch_event), timestamp))
1057                 return -1;
1058
1059         in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1060         if (!in_events) {
1061                 if (thread_atoms_insert(sched_in))
1062                         return -1;
1063                 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1064                 if (!in_events) {
1065                         pr_err("in-event: Internal tree error");
1066                         return -1;
1067                 }
1068                 /*
1069                  * Take came in we have not heard about yet,
1070                  * add in an initial atom in runnable state:
1071                  */
1072                 if (add_sched_out_event(in_events, 'R', timestamp))
1073                         return -1;
1074         }
1075         add_sched_in_event(in_events, timestamp);
1076
1077         return 0;
1078 }
1079
1080 static int
1081 latency_runtime_event(struct trace_runtime_event *runtime_event,
1082                       struct machine *machine, struct perf_sample *sample)
1083 {
1084         struct thread *thread = machine__findnew_thread(machine, runtime_event->pid);
1085         struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1086         u64 timestamp = sample->time;
1087         int cpu = sample->cpu;
1088
1089         BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1090         if (!atoms) {
1091                 if (thread_atoms_insert(thread))
1092                         return -1;
1093                 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1094                 if (!atoms) {
1095                         pr_debug("in-event: Internal tree error");
1096                         return -1;
1097                 }
1098                 if (add_sched_out_event(atoms, 'R', timestamp))
1099                         return -1;
1100         }
1101
1102         add_runtime_event(atoms, runtime_event->runtime, timestamp);
1103         return 0;
1104 }
1105
1106 static int
1107 latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1108                      struct machine *machine, struct event_format *event __used,
1109                      struct perf_sample *sample)
1110 {
1111         struct work_atoms *atoms;
1112         struct work_atom *atom;
1113         struct thread *wakee;
1114         u64 timestamp = sample->time;
1115
1116         /* Note for later, it may be interesting to observe the failing cases */
1117         if (!wakeup_event->success)
1118                 return 0;
1119
1120         wakee = machine__findnew_thread(machine, wakeup_event->pid);
1121         atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1122         if (!atoms) {
1123                 if (thread_atoms_insert(wakee))
1124                         return -1;
1125                 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1126                 if (!atoms) {
1127                         pr_debug("wakeup-event: Internal tree error");
1128                         return -1;
1129                 }
1130                 if (add_sched_out_event(atoms, 'S', timestamp))
1131                         return -1;
1132         }
1133
1134         BUG_ON(list_empty(&atoms->work_list));
1135
1136         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1137
1138         /*
1139          * You WILL be missing events if you've recorded only
1140          * one CPU, or are only looking at only one, so don't
1141          * make useless noise.
1142          */
1143         if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1144                 nr_state_machine_bugs++;
1145
1146         nr_timestamps++;
1147         if (atom->sched_out_time > timestamp) {
1148                 nr_unordered_timestamps++;
1149                 return 0;
1150         }
1151
1152         atom->state = THREAD_WAIT_CPU;
1153         atom->wake_up_time = timestamp;
1154         return 0;
1155 }
1156
1157 static int
1158 latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1159                            struct machine *machine, struct perf_sample *sample)
1160 {
1161         u64 timestamp = sample->time;
1162         struct work_atoms *atoms;
1163         struct work_atom *atom;
1164         struct thread *migrant;
1165
1166         /*
1167          * Only need to worry about migration when profiling one CPU.
1168          */
1169         if (profile_cpu == -1)
1170                 return 0;
1171
1172         migrant = machine__findnew_thread(machine, migrate_task_event->pid);
1173         atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1174         if (!atoms) {
1175                 if (thread_atoms_insert(migrant))
1176                         return -1;
1177                 register_pid(migrant->pid, migrant->comm);
1178                 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1179                 if (!atoms) {
1180                         pr_debug("migration-event: Internal tree error");
1181                         return -1;
1182                 }
1183                 if (add_sched_out_event(atoms, 'R', timestamp))
1184                         return -1;
1185         }
1186
1187         BUG_ON(list_empty(&atoms->work_list));
1188
1189         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1190         atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1191
1192         nr_timestamps++;
1193
1194         if (atom->sched_out_time > timestamp)
1195                 nr_unordered_timestamps++;
1196
1197         return 0;
1198 }
1199
1200 static struct trace_sched_handler lat_ops  = {
1201         .wakeup_event           = latency_wakeup_event,
1202         .switch_event           = latency_switch_event,
1203         .runtime_event          = latency_runtime_event,
1204         .fork_event             = latency_fork_event,
1205         .migrate_task_event     = latency_migrate_task_event,
1206 };
1207
1208 static void output_lat_thread(struct work_atoms *work_list)
1209 {
1210         int i;
1211         int ret;
1212         u64 avg;
1213
1214         if (!work_list->nb_atoms)
1215                 return;
1216         /*
1217          * Ignore idle threads:
1218          */
1219         if (!strcmp(work_list->thread->comm, "swapper"))
1220                 return;
1221
1222         all_runtime += work_list->total_runtime;
1223         all_count += work_list->nb_atoms;
1224
1225         ret = printf("  %s:%d ", work_list->thread->comm, work_list->thread->pid);
1226
1227         for (i = 0; i < 24 - ret; i++)
1228                 printf(" ");
1229
1230         avg = work_list->total_lat / work_list->nb_atoms;
1231
1232         printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
1233               (double)work_list->total_runtime / 1e6,
1234                  work_list->nb_atoms, (double)avg / 1e6,
1235                  (double)work_list->max_lat / 1e6,
1236                  (double)work_list->max_lat_at / 1e9);
1237 }
1238
1239 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1240 {
1241         if (l->thread->pid < r->thread->pid)
1242                 return -1;
1243         if (l->thread->pid > r->thread->pid)
1244                 return 1;
1245
1246         return 0;
1247 }
1248
1249 static struct sort_dimension pid_sort_dimension = {
1250         .name                   = "pid",
1251         .cmp                    = pid_cmp,
1252 };
1253
1254 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1255 {
1256         u64 avgl, avgr;
1257
1258         if (!l->nb_atoms)
1259                 return -1;
1260
1261         if (!r->nb_atoms)
1262                 return 1;
1263
1264         avgl = l->total_lat / l->nb_atoms;
1265         avgr = r->total_lat / r->nb_atoms;
1266
1267         if (avgl < avgr)
1268                 return -1;
1269         if (avgl > avgr)
1270                 return 1;
1271
1272         return 0;
1273 }
1274
1275 static struct sort_dimension avg_sort_dimension = {
1276         .name                   = "avg",
1277         .cmp                    = avg_cmp,
1278 };
1279
1280 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1281 {
1282         if (l->max_lat < r->max_lat)
1283                 return -1;
1284         if (l->max_lat > r->max_lat)
1285                 return 1;
1286
1287         return 0;
1288 }
1289
1290 static struct sort_dimension max_sort_dimension = {
1291         .name                   = "max",
1292         .cmp                    = max_cmp,
1293 };
1294
1295 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1296 {
1297         if (l->nb_atoms < r->nb_atoms)
1298                 return -1;
1299         if (l->nb_atoms > r->nb_atoms)
1300                 return 1;
1301
1302         return 0;
1303 }
1304
1305 static struct sort_dimension switch_sort_dimension = {
1306         .name                   = "switch",
1307         .cmp                    = switch_cmp,
1308 };
1309
1310 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1311 {
1312         if (l->total_runtime < r->total_runtime)
1313                 return -1;
1314         if (l->total_runtime > r->total_runtime)
1315                 return 1;
1316
1317         return 0;
1318 }
1319
1320 static struct sort_dimension runtime_sort_dimension = {
1321         .name                   = "runtime",
1322         .cmp                    = runtime_cmp,
1323 };
1324
1325 static struct sort_dimension *available_sorts[] = {
1326         &pid_sort_dimension,
1327         &avg_sort_dimension,
1328         &max_sort_dimension,
1329         &switch_sort_dimension,
1330         &runtime_sort_dimension,
1331 };
1332
1333 #define NB_AVAILABLE_SORTS      (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1334
1335 static LIST_HEAD(sort_list);
1336
1337 static int sort_dimension__add(const char *tok, struct list_head *list)
1338 {
1339         int i;
1340
1341         for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1342                 if (!strcmp(available_sorts[i]->name, tok)) {
1343                         list_add_tail(&available_sorts[i]->list, list);
1344
1345                         return 0;
1346                 }
1347         }
1348
1349         return -1;
1350 }
1351
1352 static void setup_sorting(void);
1353
1354 static void sort_lat(void)
1355 {
1356         struct rb_node *node;
1357
1358         for (;;) {
1359                 struct work_atoms *data;
1360                 node = rb_first(&atom_root);
1361                 if (!node)
1362                         break;
1363
1364                 rb_erase(node, &atom_root);
1365                 data = rb_entry(node, struct work_atoms, node);
1366                 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
1367         }
1368 }
1369
1370 static struct trace_sched_handler *trace_handler;
1371
1372 static int
1373 process_sched_wakeup_event(struct perf_tool *tool __used,
1374                            struct event_format *event,
1375                            struct perf_sample *sample,
1376                            struct machine *machine,
1377                            struct thread *thread __used)
1378 {
1379         void *data = sample->raw_data;
1380         struct trace_wakeup_event wakeup_event;
1381         int err = 0;
1382
1383         FILL_COMMON_FIELDS(wakeup_event, event, data);
1384
1385         FILL_ARRAY(wakeup_event, comm, event, data);
1386         FILL_FIELD(wakeup_event, pid, event, data);
1387         FILL_FIELD(wakeup_event, prio, event, data);
1388         FILL_FIELD(wakeup_event, success, event, data);
1389         FILL_FIELD(wakeup_event, cpu, event, data);
1390
1391         if (trace_handler->wakeup_event)
1392                 err = trace_handler->wakeup_event(&wakeup_event, machine, event, sample);
1393
1394         return err;
1395 }
1396
1397 /*
1398  * Track the current task - that way we can know whether there's any
1399  * weird events, such as a task being switched away that is not current.
1400  */
1401 static int max_cpu;
1402
1403 static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1404
1405 static struct thread *curr_thread[MAX_CPUS];
1406
1407 static char next_shortname1 = 'A';
1408 static char next_shortname2 = '0';
1409
1410 static int
1411 map_switch_event(struct trace_switch_event *switch_event,
1412                  struct machine *machine,
1413                  struct event_format *event __used,
1414                  struct perf_sample *sample)
1415 {
1416         struct thread *sched_out __used, *sched_in;
1417         int new_shortname;
1418         u64 timestamp0, timestamp = sample->time;
1419         s64 delta;
1420         int cpu, this_cpu = sample->cpu;
1421
1422         BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1423
1424         if (this_cpu > max_cpu)
1425                 max_cpu = this_cpu;
1426
1427         timestamp0 = cpu_last_switched[this_cpu];
1428         cpu_last_switched[this_cpu] = timestamp;
1429         if (timestamp0)
1430                 delta = timestamp - timestamp0;
1431         else
1432                 delta = 0;
1433
1434         if (delta < 0) {
1435                 pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1436                 return -1;
1437         }
1438
1439         sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
1440         sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1441
1442         curr_thread[this_cpu] = sched_in;
1443
1444         printf("  ");
1445
1446         new_shortname = 0;
1447         if (!sched_in->shortname[0]) {
1448                 sched_in->shortname[0] = next_shortname1;
1449                 sched_in->shortname[1] = next_shortname2;
1450
1451                 if (next_shortname1 < 'Z') {
1452                         next_shortname1++;
1453                 } else {
1454                         next_shortname1='A';
1455                         if (next_shortname2 < '9') {
1456                                 next_shortname2++;
1457                         } else {
1458                                 next_shortname2='0';
1459                         }
1460                 }
1461                 new_shortname = 1;
1462         }
1463
1464         for (cpu = 0; cpu <= max_cpu; cpu++) {
1465                 if (cpu != this_cpu)
1466                         printf(" ");
1467                 else
1468                         printf("*");
1469
1470                 if (curr_thread[cpu]) {
1471                         if (curr_thread[cpu]->pid)
1472                                 printf("%2s ", curr_thread[cpu]->shortname);
1473                         else
1474                                 printf(".  ");
1475                 } else
1476                         printf("   ");
1477         }
1478
1479         printf("  %12.6f secs ", (double)timestamp/1e9);
1480         if (new_shortname) {
1481                 printf("%s => %s:%d\n",
1482                         sched_in->shortname, sched_in->comm, sched_in->pid);
1483         } else {
1484                 printf("\n");
1485         }
1486
1487         return 0;
1488 }
1489
1490 static int
1491 process_sched_switch_event(struct perf_tool *tool __used,
1492                            struct event_format *event,
1493                            struct perf_sample *sample,
1494                            struct machine *machine,
1495                            struct thread *thread __used)
1496 {
1497         int this_cpu = sample->cpu, err = 0;
1498         void *data = sample->raw_data;
1499         struct trace_switch_event switch_event;
1500
1501         FILL_COMMON_FIELDS(switch_event, event, data);
1502
1503         FILL_ARRAY(switch_event, prev_comm, event, data);
1504         FILL_FIELD(switch_event, prev_pid, event, data);
1505         FILL_FIELD(switch_event, prev_prio, event, data);
1506         FILL_FIELD(switch_event, prev_state, event, data);
1507         FILL_ARRAY(switch_event, next_comm, event, data);
1508         FILL_FIELD(switch_event, next_pid, event, data);
1509         FILL_FIELD(switch_event, next_prio, event, data);
1510
1511         if (curr_pid[this_cpu] != (u32)-1) {
1512                 /*
1513                  * Are we trying to switch away a PID that is
1514                  * not current?
1515                  */
1516                 if (curr_pid[this_cpu] != switch_event.prev_pid)
1517                         nr_context_switch_bugs++;
1518         }
1519         if (trace_handler->switch_event)
1520                 err = trace_handler->switch_event(&switch_event, machine, event, sample);
1521
1522         curr_pid[this_cpu] = switch_event.next_pid;
1523         return err;
1524 }
1525
1526 static int
1527 process_sched_runtime_event(struct perf_tool *tool __used,
1528                             struct event_format *event,
1529                             struct perf_sample *sample,
1530                             struct machine *machine,
1531                             struct thread *thread __used)
1532 {
1533         void *data = sample->raw_data;
1534         struct trace_runtime_event runtime_event;
1535         int err = 0;
1536
1537         FILL_ARRAY(runtime_event, comm, event, data);
1538         FILL_FIELD(runtime_event, pid, event, data);
1539         FILL_FIELD(runtime_event, runtime, event, data);
1540         FILL_FIELD(runtime_event, vruntime, event, data);
1541
1542         if (trace_handler->runtime_event)
1543                 err = trace_handler->runtime_event(&runtime_event, machine, sample);
1544
1545         return err;
1546 }
1547
1548 static int
1549 process_sched_fork_event(struct perf_tool *tool __used,
1550                          struct event_format *event,
1551                          struct perf_sample *sample,
1552                          struct machine *machine __used,
1553                          struct thread *thread __used)
1554 {
1555         void *data = sample->raw_data;
1556         struct trace_fork_event fork_event;
1557         int err = 0;
1558
1559         FILL_COMMON_FIELDS(fork_event, event, data);
1560
1561         FILL_ARRAY(fork_event, parent_comm, event, data);
1562         FILL_FIELD(fork_event, parent_pid, event, data);
1563         FILL_ARRAY(fork_event, child_comm, event, data);
1564         FILL_FIELD(fork_event, child_pid, event, data);
1565
1566         if (trace_handler->fork_event)
1567                 err = trace_handler->fork_event(&fork_event, event);
1568
1569         return err;
1570 }
1571
1572 static int
1573 process_sched_exit_event(struct perf_tool *tool __used,
1574                          struct event_format *event,
1575                          struct perf_sample *sample __used,
1576                          struct machine *machine __used,
1577                          struct thread *thread __used)
1578 {
1579         if (verbose)
1580                 printf("sched_exit event %p\n", event);
1581
1582         return 0;
1583 }
1584
1585 static int
1586 process_sched_migrate_task_event(struct perf_tool *tool __used,
1587                                  struct event_format *event,
1588                                  struct perf_sample *sample,
1589                                  struct machine *machine,
1590                                  struct thread *thread __used)
1591 {
1592         void *data = sample->raw_data;
1593         struct trace_migrate_task_event migrate_task_event;
1594         int err = 0;
1595
1596         FILL_COMMON_FIELDS(migrate_task_event, event, data);
1597
1598         FILL_ARRAY(migrate_task_event, comm, event, data);
1599         FILL_FIELD(migrate_task_event, pid, event, data);
1600         FILL_FIELD(migrate_task_event, prio, event, data);
1601         FILL_FIELD(migrate_task_event, cpu, event, data);
1602
1603         if (trace_handler->migrate_task_event)
1604                 err = trace_handler->migrate_task_event(&migrate_task_event, machine, sample);
1605
1606         return err;
1607 }
1608
1609 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1610                                   struct event_format *tp_format,
1611                                   struct perf_sample *sample,
1612                                   struct machine *machine,
1613                                   struct thread *thread);
1614
1615 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used,
1616                                                  union perf_event *event __used,
1617                                                  struct perf_sample *sample,
1618                                                  struct perf_evsel *evsel,
1619                                                  struct machine *machine)
1620 {
1621         struct thread *thread = machine__findnew_thread(machine, sample->pid);
1622         int err = 0;
1623
1624         if (thread == NULL) {
1625                 pr_debug("problem processing %s event, skipping it.\n",
1626                          perf_evsel__name(evsel));
1627                 return -1;
1628         }
1629
1630         evsel->hists.stats.total_period += sample->period;
1631         hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
1632
1633         if (evsel->handler.func != NULL) {
1634                 tracepoint_handler f = evsel->handler.func;
1635                 err = f(tool, evsel->tp_format, sample, machine, thread);
1636         }
1637
1638         return err;
1639 }
1640
1641 static struct perf_tool perf_sched = {
1642         .sample          = perf_sched__process_tracepoint_sample,
1643         .comm            = perf_event__process_comm,
1644         .lost            = perf_event__process_lost,
1645         .fork            = perf_event__process_task,
1646         .ordered_samples = true,
1647 };
1648
1649 static int read_events(bool destroy, struct perf_session **psession)
1650 {
1651         const struct perf_evsel_str_handler handlers[] = {
1652                 { "sched:sched_switch",       process_sched_switch_event, },
1653                 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1654                 { "sched:sched_wakeup",       process_sched_wakeup_event, },
1655                 { "sched:sched_wakeup_new",   process_sched_wakeup_event, },
1656                 { "sched:sched_process_fork", process_sched_fork_event, },
1657                 { "sched:sched_process_exit", process_sched_exit_event, },
1658                 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1659         };
1660         struct perf_session *session;
1661
1662         session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_sched);
1663         if (session == NULL) {
1664                 pr_debug("No Memory for session\n");
1665                 return -1;
1666         }
1667
1668         if (perf_session__set_tracepoints_handlers(session, handlers))
1669                 goto out_delete;
1670
1671         if (perf_session__has_traces(session, "record -R")) {
1672                 int err = perf_session__process_events(session, &perf_sched);
1673                 if (err) {
1674                         pr_err("Failed to process events, error %d", err);
1675                         goto out_delete;
1676                 }
1677
1678                 nr_events      = session->hists.stats.nr_events[0];
1679                 nr_lost_events = session->hists.stats.total_lost;
1680                 nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
1681         }
1682
1683         if (destroy)
1684                 perf_session__delete(session);
1685
1686         if (psession)
1687                 *psession = session;
1688
1689         return 0;
1690
1691 out_delete:
1692         perf_session__delete(session);
1693         return -1;
1694 }
1695
1696 static void print_bad_events(void)
1697 {
1698         if (nr_unordered_timestamps && nr_timestamps) {
1699                 printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1700                         (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1701                         nr_unordered_timestamps, nr_timestamps);
1702         }
1703         if (nr_lost_events && nr_events) {
1704                 printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1705                         (double)nr_lost_events/(double)nr_events*100.0,
1706                         nr_lost_events, nr_events, nr_lost_chunks);
1707         }
1708         if (nr_state_machine_bugs && nr_timestamps) {
1709                 printf("  INFO: %.3f%% state machine bugs (%ld out of %ld)",
1710                         (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1711                         nr_state_machine_bugs, nr_timestamps);
1712                 if (nr_lost_events)
1713                         printf(" (due to lost events?)");
1714                 printf("\n");
1715         }
1716         if (nr_context_switch_bugs && nr_timestamps) {
1717                 printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
1718                         (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1719                         nr_context_switch_bugs, nr_timestamps);
1720                 if (nr_lost_events)
1721                         printf(" (due to lost events?)");
1722                 printf("\n");
1723         }
1724 }
1725
1726 static int __cmd_lat(void)
1727 {
1728         struct rb_node *next;
1729         struct perf_session *session;
1730
1731         setup_pager();
1732         if (read_events(false, &session))
1733                 return -1;
1734         sort_lat();
1735
1736         printf("\n ---------------------------------------------------------------------------------------------------------------\n");
1737         printf("  Task                  |   Runtime ms  | Switches | Average delay ms | Maximum delay ms | Maximum delay at     |\n");
1738         printf(" ---------------------------------------------------------------------------------------------------------------\n");
1739
1740         next = rb_first(&sorted_atom_root);
1741
1742         while (next) {
1743                 struct work_atoms *work_list;
1744
1745                 work_list = rb_entry(next, struct work_atoms, node);
1746                 output_lat_thread(work_list);
1747                 next = rb_next(next);
1748         }
1749
1750         printf(" -----------------------------------------------------------------------------------------\n");
1751         printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
1752                 (double)all_runtime/1e6, all_count);
1753
1754         printf(" ---------------------------------------------------\n");
1755
1756         print_bad_events();
1757         printf("\n");
1758
1759         perf_session__delete(session);
1760         return 0;
1761 }
1762
1763 static struct trace_sched_handler map_ops  = {
1764         .wakeup_event           = NULL,
1765         .switch_event           = map_switch_event,
1766         .runtime_event          = NULL,
1767         .fork_event             = NULL,
1768 };
1769
1770 static int __cmd_map(void)
1771 {
1772         max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1773
1774         setup_pager();
1775         if (read_events(true, NULL))
1776                 return -1;
1777         print_bad_events();
1778         return 0;
1779 }
1780
1781 static int __cmd_replay(void)
1782 {
1783         unsigned long i;
1784
1785         calibrate_run_measurement_overhead();
1786         calibrate_sleep_measurement_overhead();
1787
1788         test_calibrations();
1789
1790         if (read_events(true, NULL))
1791                 return -1;
1792
1793         printf("nr_run_events:        %ld\n", nr_run_events);
1794         printf("nr_sleep_events:      %ld\n", nr_sleep_events);
1795         printf("nr_wakeup_events:     %ld\n", nr_wakeup_events);
1796
1797         if (targetless_wakeups)
1798                 printf("target-less wakeups:  %ld\n", targetless_wakeups);
1799         if (multitarget_wakeups)
1800                 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1801         if (nr_run_events_optimized)
1802                 printf("run atoms optimized: %ld\n",
1803                         nr_run_events_optimized);
1804
1805         print_task_traces();
1806         add_cross_task_wakeups();
1807
1808         create_tasks();
1809         printf("------------------------------------------------------------\n");
1810         for (i = 0; i < replay_repeat; i++)
1811                 run_one_test();
1812
1813         return 0;
1814 }
1815
1816
1817 static const char * const sched_usage[] = {
1818         "perf sched [<options>] {record|latency|map|replay|script}",
1819         NULL
1820 };
1821
1822 static const struct option sched_options[] = {
1823         OPT_STRING('i', "input", &input_name, "file",
1824                     "input file name"),
1825         OPT_INCR('v', "verbose", &verbose,
1826                     "be more verbose (show symbol address, etc)"),
1827         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1828                     "dump raw trace in ASCII"),
1829         OPT_END()
1830 };
1831
1832 static const char * const latency_usage[] = {
1833         "perf sched latency [<options>]",
1834         NULL
1835 };
1836
1837 static const struct option latency_options[] = {
1838         OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1839                    "sort by key(s): runtime, switch, avg, max"),
1840         OPT_INCR('v', "verbose", &verbose,
1841                     "be more verbose (show symbol address, etc)"),
1842         OPT_INTEGER('C', "CPU", &profile_cpu,
1843                     "CPU to profile on"),
1844         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1845                     "dump raw trace in ASCII"),
1846         OPT_END()
1847 };
1848
1849 static const char * const replay_usage[] = {
1850         "perf sched replay [<options>]",
1851         NULL
1852 };
1853
1854 static const struct option replay_options[] = {
1855         OPT_UINTEGER('r', "repeat", &replay_repeat,
1856                      "repeat the workload replay N times (-1: infinite)"),
1857         OPT_INCR('v', "verbose", &verbose,
1858                     "be more verbose (show symbol address, etc)"),
1859         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1860                     "dump raw trace in ASCII"),
1861         OPT_END()
1862 };
1863
1864 static void setup_sorting(void)
1865 {
1866         char *tmp, *tok, *str = strdup(sort_order);
1867
1868         for (tok = strtok_r(str, ", ", &tmp);
1869                         tok; tok = strtok_r(NULL, ", ", &tmp)) {
1870                 if (sort_dimension__add(tok, &sort_list) < 0) {
1871                         error("Unknown --sort key: `%s'", tok);
1872                         usage_with_options(latency_usage, latency_options);
1873                 }
1874         }
1875
1876         free(str);
1877
1878         sort_dimension__add("pid", &cmp_pid);
1879 }
1880
1881 static const char *record_args[] = {
1882         "record",
1883         "-a",
1884         "-R",
1885         "-f",
1886         "-m", "1024",
1887         "-c", "1",
1888         "-e", "sched:sched_switch",
1889         "-e", "sched:sched_stat_wait",
1890         "-e", "sched:sched_stat_sleep",
1891         "-e", "sched:sched_stat_iowait",
1892         "-e", "sched:sched_stat_runtime",
1893         "-e", "sched:sched_process_exit",
1894         "-e", "sched:sched_process_fork",
1895         "-e", "sched:sched_wakeup",
1896         "-e", "sched:sched_migrate_task",
1897 };
1898
1899 static int __cmd_record(int argc, const char **argv)
1900 {
1901         unsigned int rec_argc, i, j;
1902         const char **rec_argv;
1903
1904         rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1905         rec_argv = calloc(rec_argc + 1, sizeof(char *));
1906
1907         if (rec_argv == NULL)
1908                 return -ENOMEM;
1909
1910         for (i = 0; i < ARRAY_SIZE(record_args); i++)
1911                 rec_argv[i] = strdup(record_args[i]);
1912
1913         for (j = 1; j < (unsigned int)argc; j++, i++)
1914                 rec_argv[i] = argv[j];
1915
1916         BUG_ON(i != rec_argc);
1917
1918         return cmd_record(i, rec_argv, NULL);
1919 }
1920
1921 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1922 {
1923         argc = parse_options(argc, argv, sched_options, sched_usage,
1924                              PARSE_OPT_STOP_AT_NON_OPTION);
1925         if (!argc)
1926                 usage_with_options(sched_usage, sched_options);
1927
1928         /*
1929          * Aliased to 'perf script' for now:
1930          */
1931         if (!strcmp(argv[0], "script"))
1932                 return cmd_script(argc, argv, prefix);
1933
1934         symbol__init();
1935         if (!strncmp(argv[0], "rec", 3)) {
1936                 return __cmd_record(argc, argv);
1937         } else if (!strncmp(argv[0], "lat", 3)) {
1938                 trace_handler = &lat_ops;
1939                 if (argc > 1) {
1940                         argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1941                         if (argc)
1942                                 usage_with_options(latency_usage, latency_options);
1943                 }
1944                 setup_sorting();
1945                 return __cmd_lat();
1946         } else if (!strcmp(argv[0], "map")) {
1947                 trace_handler = &map_ops;
1948                 setup_sorting();
1949                 return __cmd_map();
1950         } else if (!strncmp(argv[0], "rep", 3)) {
1951                 trace_handler = &replay_ops;
1952                 if (argc) {
1953                         argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1954                         if (argc)
1955                                 usage_with_options(replay_usage, replay_options);
1956                 }
1957                 return __cmd_replay();
1958         } else {
1959                 usage_with_options(sched_usage, sched_options);
1960         }
1961
1962         return 0;
1963 }