5 #include "util/evlist.h"
6 #include "util/cache.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
14 #include "util/parse-options.h"
15 #include "util/trace-event.h"
17 #include "util/debug.h"
19 #include <sys/prctl.h>
20 #include <sys/resource.h>
22 #include <semaphore.h>
26 static const char *input_name;
28 static char default_sort_order[] = "avg, max, switch, runtime";
29 static const char *sort_order = default_sort_order;
31 static int profile_cpu = -1;
33 #define PR_SET_NAME 15 /* Set process name */
36 static u64 run_measurement_overhead;
37 static u64 sleep_measurement_overhead;
44 static unsigned long nr_tasks;
53 unsigned long nr_events;
54 unsigned long curr_event;
55 struct sched_atom **atoms;
66 enum sched_event_type {
70 SCHED_EVENT_MIGRATION,
74 enum sched_event_type type;
80 struct task_desc *wakee;
83 static struct task_desc *pid_to_task[MAX_PID];
85 static struct task_desc **tasks;
87 static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
88 static u64 start_time;
90 static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
92 static unsigned long nr_run_events;
93 static unsigned long nr_sleep_events;
94 static unsigned long nr_wakeup_events;
96 static unsigned long nr_sleep_corrections;
97 static unsigned long nr_run_events_optimized;
99 static unsigned long targetless_wakeups;
100 static unsigned long multitarget_wakeups;
102 static u64 cpu_usage;
103 static u64 runavg_cpu_usage;
104 static u64 parent_cpu_usage;
105 static u64 runavg_parent_cpu_usage;
107 static unsigned long nr_runs;
108 static u64 sum_runtime;
109 static u64 sum_fluct;
112 static unsigned int replay_repeat = 10;
113 static unsigned long nr_timestamps;
114 static unsigned long nr_unordered_timestamps;
115 static unsigned long nr_state_machine_bugs;
116 static unsigned long nr_context_switch_bugs;
117 static unsigned long nr_events;
118 static unsigned long nr_lost_chunks;
119 static unsigned long nr_lost_events;
121 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
131 struct list_head list;
132 enum thread_state state;
140 struct list_head work_list;
141 struct thread *thread;
150 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
152 static struct rb_root atom_root, sorted_atom_root;
154 static u64 all_runtime;
155 static u64 all_count;
158 static u64 get_nsecs(void)
162 clock_gettime(CLOCK_MONOTONIC, &ts);
164 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
167 static void burn_nsecs(u64 nsecs)
169 u64 T0 = get_nsecs(), T1;
173 } while (T1 + run_measurement_overhead < T0 + nsecs);
176 static void sleep_nsecs(u64 nsecs)
180 ts.tv_nsec = nsecs % 999999999;
181 ts.tv_sec = nsecs / 999999999;
183 nanosleep(&ts, NULL);
186 static void calibrate_run_measurement_overhead(void)
188 u64 T0, T1, delta, min_delta = 1000000000ULL;
191 for (i = 0; i < 10; i++) {
196 min_delta = min(min_delta, delta);
198 run_measurement_overhead = min_delta;
200 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
203 static void calibrate_sleep_measurement_overhead(void)
205 u64 T0, T1, delta, min_delta = 1000000000ULL;
208 for (i = 0; i < 10; i++) {
213 min_delta = min(min_delta, delta);
216 sleep_measurement_overhead = min_delta;
218 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
221 static struct sched_atom *
222 get_new_event(struct task_desc *task, u64 timestamp)
224 struct sched_atom *event = zalloc(sizeof(*event));
225 unsigned long idx = task->nr_events;
228 event->timestamp = timestamp;
232 size = sizeof(struct sched_atom *) * task->nr_events;
233 task->atoms = realloc(task->atoms, size);
234 BUG_ON(!task->atoms);
236 task->atoms[idx] = event;
241 static struct sched_atom *last_event(struct task_desc *task)
243 if (!task->nr_events)
246 return task->atoms[task->nr_events - 1];
250 add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
252 struct sched_atom *event, *curr_event = last_event(task);
255 * optimize an existing RUN event by merging this one
258 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
259 nr_run_events_optimized++;
260 curr_event->duration += duration;
264 event = get_new_event(task, timestamp);
266 event->type = SCHED_EVENT_RUN;
267 event->duration = duration;
273 add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
274 struct task_desc *wakee)
276 struct sched_atom *event, *wakee_event;
278 event = get_new_event(task, timestamp);
279 event->type = SCHED_EVENT_WAKEUP;
280 event->wakee = wakee;
282 wakee_event = last_event(wakee);
283 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
284 targetless_wakeups++;
287 if (wakee_event->wait_sem) {
288 multitarget_wakeups++;
292 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
293 sem_init(wakee_event->wait_sem, 0, 0);
294 wakee_event->specific_wait = 1;
295 event->wait_sem = wakee_event->wait_sem;
301 add_sched_event_sleep(struct task_desc *task, u64 timestamp,
302 u64 task_state __used)
304 struct sched_atom *event = get_new_event(task, timestamp);
306 event->type = SCHED_EVENT_SLEEP;
311 static struct task_desc *register_pid(unsigned long pid, const char *comm)
313 struct task_desc *task;
315 BUG_ON(pid >= MAX_PID);
317 task = pid_to_task[pid];
322 task = zalloc(sizeof(*task));
325 strcpy(task->comm, comm);
327 * every task starts in sleeping state - this gets ignored
328 * if there's no wakeup pointing to this sleep state:
330 add_sched_event_sleep(task, 0, 0);
332 pid_to_task[pid] = task;
334 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
336 tasks[task->nr] = task;
339 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
345 static void print_task_traces(void)
347 struct task_desc *task;
350 for (i = 0; i < nr_tasks; i++) {
352 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
353 task->nr, task->comm, task->pid, task->nr_events);
357 static void add_cross_task_wakeups(void)
359 struct task_desc *task1, *task2;
362 for (i = 0; i < nr_tasks; i++) {
368 add_sched_event_wakeup(task1, 0, task2);
373 process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
377 switch (atom->type) {
378 case SCHED_EVENT_RUN:
379 burn_nsecs(atom->duration);
381 case SCHED_EVENT_SLEEP:
383 ret = sem_wait(atom->wait_sem);
386 case SCHED_EVENT_WAKEUP:
388 ret = sem_post(atom->wait_sem);
391 case SCHED_EVENT_MIGRATION:
398 static u64 get_cpu_usage_nsec_parent(void)
404 err = getrusage(RUSAGE_SELF, &ru);
407 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
408 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
413 static int self_open_counters(void)
415 struct perf_event_attr attr;
418 memset(&attr, 0, sizeof(attr));
420 attr.type = PERF_TYPE_SOFTWARE;
421 attr.config = PERF_COUNT_SW_TASK_CLOCK;
423 fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
426 pr_debug("Error: sys_perf_event_open() syscall returned"
427 "with %d (%s)\n", fd, strerror(errno));
431 static u64 get_cpu_usage_nsec_self(int fd)
436 ret = read(fd, &runtime, sizeof(runtime));
437 BUG_ON(ret != sizeof(runtime));
442 static void *thread_func(void *ctx)
444 struct task_desc *this_task = ctx;
445 u64 cpu_usage_0, cpu_usage_1;
446 unsigned long i, ret;
450 sprintf(comm2, ":%s", this_task->comm);
451 prctl(PR_SET_NAME, comm2);
452 fd = self_open_counters();
456 ret = sem_post(&this_task->ready_for_work);
458 ret = pthread_mutex_lock(&start_work_mutex);
460 ret = pthread_mutex_unlock(&start_work_mutex);
463 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
465 for (i = 0; i < this_task->nr_events; i++) {
466 this_task->curr_event = i;
467 process_sched_event(this_task, this_task->atoms[i]);
470 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
471 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
472 ret = sem_post(&this_task->work_done_sem);
475 ret = pthread_mutex_lock(&work_done_wait_mutex);
477 ret = pthread_mutex_unlock(&work_done_wait_mutex);
483 static void create_tasks(void)
485 struct task_desc *task;
490 err = pthread_attr_init(&attr);
492 err = pthread_attr_setstacksize(&attr,
493 (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
495 err = pthread_mutex_lock(&start_work_mutex);
497 err = pthread_mutex_lock(&work_done_wait_mutex);
499 for (i = 0; i < nr_tasks; i++) {
501 sem_init(&task->sleep_sem, 0, 0);
502 sem_init(&task->ready_for_work, 0, 0);
503 sem_init(&task->work_done_sem, 0, 0);
504 task->curr_event = 0;
505 err = pthread_create(&task->thread, &attr, thread_func, task);
510 static void wait_for_tasks(void)
512 u64 cpu_usage_0, cpu_usage_1;
513 struct task_desc *task;
514 unsigned long i, ret;
516 start_time = get_nsecs();
518 pthread_mutex_unlock(&work_done_wait_mutex);
520 for (i = 0; i < nr_tasks; i++) {
522 ret = sem_wait(&task->ready_for_work);
524 sem_init(&task->ready_for_work, 0, 0);
526 ret = pthread_mutex_lock(&work_done_wait_mutex);
529 cpu_usage_0 = get_cpu_usage_nsec_parent();
531 pthread_mutex_unlock(&start_work_mutex);
533 for (i = 0; i < nr_tasks; i++) {
535 ret = sem_wait(&task->work_done_sem);
537 sem_init(&task->work_done_sem, 0, 0);
538 cpu_usage += task->cpu_usage;
542 cpu_usage_1 = get_cpu_usage_nsec_parent();
543 if (!runavg_cpu_usage)
544 runavg_cpu_usage = cpu_usage;
545 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
547 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
548 if (!runavg_parent_cpu_usage)
549 runavg_parent_cpu_usage = parent_cpu_usage;
550 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
551 parent_cpu_usage)/10;
553 ret = pthread_mutex_lock(&start_work_mutex);
556 for (i = 0; i < nr_tasks; i++) {
558 sem_init(&task->sleep_sem, 0, 0);
559 task->curr_event = 0;
563 static void run_one_test(void)
565 u64 T0, T1, delta, avg_delta, fluct;
572 sum_runtime += delta;
575 avg_delta = sum_runtime / nr_runs;
576 if (delta < avg_delta)
577 fluct = avg_delta - delta;
579 fluct = delta - avg_delta;
583 run_avg = (run_avg*9 + delta)/10;
585 printf("#%-3ld: %0.3f, ",
586 nr_runs, (double)delta/1000000.0);
588 printf("ravg: %0.2f, ",
589 (double)run_avg/1e6);
591 printf("cpu: %0.2f / %0.2f",
592 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
596 * rusage statistics done by the parent, these are less
597 * accurate than the sum_exec_runtime based statistics:
599 printf(" [%0.2f / %0.2f]",
600 (double)parent_cpu_usage/1e6,
601 (double)runavg_parent_cpu_usage/1e6);
606 if (nr_sleep_corrections)
607 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
608 nr_sleep_corrections = 0;
611 static void test_calibrations(void)
619 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
625 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
628 #define FILL_FIELD(ptr, field, event, data) \
629 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
631 #define FILL_ARRAY(ptr, array, event, data) \
633 void *__array = raw_field_ptr(event, #array, data); \
634 memcpy(ptr.array, __array, sizeof(ptr.array)); \
637 #define FILL_COMMON_FIELDS(ptr, event, data) \
639 FILL_FIELD(ptr, common_type, event, data); \
640 FILL_FIELD(ptr, common_flags, event, data); \
641 FILL_FIELD(ptr, common_preempt_count, event, data); \
642 FILL_FIELD(ptr, common_pid, event, data); \
643 FILL_FIELD(ptr, common_tgid, event, data); \
648 struct trace_switch_event {
653 u8 common_preempt_count;
666 struct trace_runtime_event {
671 u8 common_preempt_count;
681 struct trace_wakeup_event {
686 u8 common_preempt_count;
698 struct trace_fork_event {
703 u8 common_preempt_count;
707 char parent_comm[16];
713 struct trace_migrate_task_event {
718 u8 common_preempt_count;
729 struct trace_sched_handler {
730 int (*switch_event)(struct trace_switch_event *event,
731 struct machine *machine,
732 struct event_format *tp_format,
733 struct perf_sample *sample);
735 int (*runtime_event)(struct trace_runtime_event *event,
736 struct machine *machine,
737 struct perf_sample *sample);
739 int (*wakeup_event)(struct trace_wakeup_event *event,
740 struct machine *machine,
741 struct event_format *tp_format,
742 struct perf_sample *sample);
744 int (*fork_event)(struct trace_fork_event *event,
745 struct event_format *tp_format);
747 int (*migrate_task_event)(struct trace_migrate_task_event *event,
748 struct machine *machine,
749 struct perf_sample *sample);
754 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
755 struct machine *machine __used,
756 struct event_format *event, struct perf_sample *sample)
758 struct task_desc *waker, *wakee;
761 printf("sched_wakeup event %p\n", event);
763 printf(" ... pid %d woke up %s/%d\n",
764 wakeup_event->common_pid,
769 waker = register_pid(wakeup_event->common_pid, "<unknown>");
770 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
772 add_sched_event_wakeup(waker, sample->time, wakee);
776 static u64 cpu_last_switched[MAX_CPUS];
779 replay_switch_event(struct trace_switch_event *switch_event,
780 struct machine *machine __used,
781 struct event_format *event,
782 struct perf_sample *sample)
784 struct task_desc *prev, __used *next;
785 u64 timestamp0, timestamp = sample->time;
786 int cpu = sample->cpu;
790 printf("sched_switch event %p\n", event);
792 if (cpu >= MAX_CPUS || cpu < 0)
795 timestamp0 = cpu_last_switched[cpu];
797 delta = timestamp - timestamp0;
802 pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta);
807 printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
808 switch_event->prev_comm, switch_event->prev_pid,
809 switch_event->next_comm, switch_event->next_pid,
813 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
814 next = register_pid(switch_event->next_pid, switch_event->next_comm);
816 cpu_last_switched[cpu] = timestamp;
818 add_sched_event_run(prev, timestamp, delta);
819 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
826 replay_fork_event(struct trace_fork_event *fork_event,
827 struct event_format *event)
830 printf("sched_fork event %p\n", event);
831 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
832 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
834 register_pid(fork_event->parent_pid, fork_event->parent_comm);
835 register_pid(fork_event->child_pid, fork_event->child_comm);
839 static struct trace_sched_handler replay_ops = {
840 .wakeup_event = replay_wakeup_event,
841 .switch_event = replay_switch_event,
842 .fork_event = replay_fork_event,
845 struct sort_dimension {
848 struct list_head list;
851 static LIST_HEAD(cmp_pid);
854 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
856 struct sort_dimension *sort;
859 BUG_ON(list_empty(list));
861 list_for_each_entry(sort, list, list) {
862 ret = sort->cmp(l, r);
870 static struct work_atoms *
871 thread_atoms_search(struct rb_root *root, struct thread *thread,
872 struct list_head *sort_list)
874 struct rb_node *node = root->rb_node;
875 struct work_atoms key = { .thread = thread };
878 struct work_atoms *atoms;
881 atoms = container_of(node, struct work_atoms, node);
883 cmp = thread_lat_cmp(sort_list, &key, atoms);
885 node = node->rb_left;
887 node = node->rb_right;
889 BUG_ON(thread != atoms->thread);
897 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
898 struct list_head *sort_list)
900 struct rb_node **new = &(root->rb_node), *parent = NULL;
903 struct work_atoms *this;
906 this = container_of(*new, struct work_atoms, node);
909 cmp = thread_lat_cmp(sort_list, data, this);
912 new = &((*new)->rb_left);
914 new = &((*new)->rb_right);
917 rb_link_node(&data->node, parent, new);
918 rb_insert_color(&data->node, root);
921 static int thread_atoms_insert(struct thread *thread)
923 struct work_atoms *atoms = zalloc(sizeof(*atoms));
925 pr_err("No memory at %s\n", __func__);
929 atoms->thread = thread;
930 INIT_LIST_HEAD(&atoms->work_list);
931 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
936 latency_fork_event(struct trace_fork_event *fork_event __used,
937 struct event_format *event __used)
939 /* should insert the newcomer */
944 static char sched_out_state(struct trace_switch_event *switch_event)
946 const char *str = TASK_STATE_TO_CHAR_STR;
948 return str[switch_event->prev_state];
952 add_sched_out_event(struct work_atoms *atoms,
956 struct work_atom *atom = zalloc(sizeof(*atom));
958 pr_err("Non memory at %s", __func__);
962 atom->sched_out_time = timestamp;
964 if (run_state == 'R') {
965 atom->state = THREAD_WAIT_CPU;
966 atom->wake_up_time = atom->sched_out_time;
969 list_add_tail(&atom->list, &atoms->work_list);
974 add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
976 struct work_atom *atom;
978 BUG_ON(list_empty(&atoms->work_list));
980 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
982 atom->runtime += delta;
983 atoms->total_runtime += delta;
987 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
989 struct work_atom *atom;
992 if (list_empty(&atoms->work_list))
995 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
997 if (atom->state != THREAD_WAIT_CPU)
1000 if (timestamp < atom->wake_up_time) {
1001 atom->state = THREAD_IGNORE;
1005 atom->state = THREAD_SCHED_IN;
1006 atom->sched_in_time = timestamp;
1008 delta = atom->sched_in_time - atom->wake_up_time;
1009 atoms->total_lat += delta;
1010 if (delta > atoms->max_lat) {
1011 atoms->max_lat = delta;
1012 atoms->max_lat_at = timestamp;
1018 latency_switch_event(struct trace_switch_event *switch_event,
1019 struct machine *machine,
1020 struct event_format *event __used,
1021 struct perf_sample *sample)
1023 struct work_atoms *out_events, *in_events;
1024 struct thread *sched_out, *sched_in;
1025 u64 timestamp0, timestamp = sample->time;
1026 int cpu = sample->cpu;
1029 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1031 timestamp0 = cpu_last_switched[cpu];
1032 cpu_last_switched[cpu] = timestamp;
1034 delta = timestamp - timestamp0;
1039 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1043 sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
1044 sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1046 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1048 if (thread_atoms_insert(sched_out))
1050 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1052 pr_err("out-event: Internal tree error");
1056 if (add_sched_out_event(out_events, sched_out_state(switch_event), timestamp))
1059 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1061 if (thread_atoms_insert(sched_in))
1063 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1065 pr_err("in-event: Internal tree error");
1069 * Take came in we have not heard about yet,
1070 * add in an initial atom in runnable state:
1072 if (add_sched_out_event(in_events, 'R', timestamp))
1075 add_sched_in_event(in_events, timestamp);
1081 latency_runtime_event(struct trace_runtime_event *runtime_event,
1082 struct machine *machine, struct perf_sample *sample)
1084 struct thread *thread = machine__findnew_thread(machine, runtime_event->pid);
1085 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1086 u64 timestamp = sample->time;
1087 int cpu = sample->cpu;
1089 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1091 if (thread_atoms_insert(thread))
1093 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1095 pr_debug("in-event: Internal tree error");
1098 if (add_sched_out_event(atoms, 'R', timestamp))
1102 add_runtime_event(atoms, runtime_event->runtime, timestamp);
1107 latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1108 struct machine *machine, struct event_format *event __used,
1109 struct perf_sample *sample)
1111 struct work_atoms *atoms;
1112 struct work_atom *atom;
1113 struct thread *wakee;
1114 u64 timestamp = sample->time;
1116 /* Note for later, it may be interesting to observe the failing cases */
1117 if (!wakeup_event->success)
1120 wakee = machine__findnew_thread(machine, wakeup_event->pid);
1121 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1123 if (thread_atoms_insert(wakee))
1125 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1127 pr_debug("wakeup-event: Internal tree error");
1130 if (add_sched_out_event(atoms, 'S', timestamp))
1134 BUG_ON(list_empty(&atoms->work_list));
1136 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1139 * You WILL be missing events if you've recorded only
1140 * one CPU, or are only looking at only one, so don't
1141 * make useless noise.
1143 if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1144 nr_state_machine_bugs++;
1147 if (atom->sched_out_time > timestamp) {
1148 nr_unordered_timestamps++;
1152 atom->state = THREAD_WAIT_CPU;
1153 atom->wake_up_time = timestamp;
1158 latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1159 struct machine *machine, struct perf_sample *sample)
1161 u64 timestamp = sample->time;
1162 struct work_atoms *atoms;
1163 struct work_atom *atom;
1164 struct thread *migrant;
1167 * Only need to worry about migration when profiling one CPU.
1169 if (profile_cpu == -1)
1172 migrant = machine__findnew_thread(machine, migrate_task_event->pid);
1173 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1175 if (thread_atoms_insert(migrant))
1177 register_pid(migrant->pid, migrant->comm);
1178 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1180 pr_debug("migration-event: Internal tree error");
1183 if (add_sched_out_event(atoms, 'R', timestamp))
1187 BUG_ON(list_empty(&atoms->work_list));
1189 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1190 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1194 if (atom->sched_out_time > timestamp)
1195 nr_unordered_timestamps++;
1200 static struct trace_sched_handler lat_ops = {
1201 .wakeup_event = latency_wakeup_event,
1202 .switch_event = latency_switch_event,
1203 .runtime_event = latency_runtime_event,
1204 .fork_event = latency_fork_event,
1205 .migrate_task_event = latency_migrate_task_event,
1208 static void output_lat_thread(struct work_atoms *work_list)
1214 if (!work_list->nb_atoms)
1217 * Ignore idle threads:
1219 if (!strcmp(work_list->thread->comm, "swapper"))
1222 all_runtime += work_list->total_runtime;
1223 all_count += work_list->nb_atoms;
1225 ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
1227 for (i = 0; i < 24 - ret; i++)
1230 avg = work_list->total_lat / work_list->nb_atoms;
1232 printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
1233 (double)work_list->total_runtime / 1e6,
1234 work_list->nb_atoms, (double)avg / 1e6,
1235 (double)work_list->max_lat / 1e6,
1236 (double)work_list->max_lat_at / 1e9);
1239 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1241 if (l->thread->pid < r->thread->pid)
1243 if (l->thread->pid > r->thread->pid)
1249 static struct sort_dimension pid_sort_dimension = {
1254 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1264 avgl = l->total_lat / l->nb_atoms;
1265 avgr = r->total_lat / r->nb_atoms;
1275 static struct sort_dimension avg_sort_dimension = {
1280 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1282 if (l->max_lat < r->max_lat)
1284 if (l->max_lat > r->max_lat)
1290 static struct sort_dimension max_sort_dimension = {
1295 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1297 if (l->nb_atoms < r->nb_atoms)
1299 if (l->nb_atoms > r->nb_atoms)
1305 static struct sort_dimension switch_sort_dimension = {
1310 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1312 if (l->total_runtime < r->total_runtime)
1314 if (l->total_runtime > r->total_runtime)
1320 static struct sort_dimension runtime_sort_dimension = {
1325 static struct sort_dimension *available_sorts[] = {
1326 &pid_sort_dimension,
1327 &avg_sort_dimension,
1328 &max_sort_dimension,
1329 &switch_sort_dimension,
1330 &runtime_sort_dimension,
1333 #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1335 static LIST_HEAD(sort_list);
1337 static int sort_dimension__add(const char *tok, struct list_head *list)
1341 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1342 if (!strcmp(available_sorts[i]->name, tok)) {
1343 list_add_tail(&available_sorts[i]->list, list);
1352 static void setup_sorting(void);
1354 static void sort_lat(void)
1356 struct rb_node *node;
1359 struct work_atoms *data;
1360 node = rb_first(&atom_root);
1364 rb_erase(node, &atom_root);
1365 data = rb_entry(node, struct work_atoms, node);
1366 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
1370 static struct trace_sched_handler *trace_handler;
1373 process_sched_wakeup_event(struct perf_tool *tool __used,
1374 struct event_format *event,
1375 struct perf_sample *sample,
1376 struct machine *machine,
1377 struct thread *thread __used)
1379 void *data = sample->raw_data;
1380 struct trace_wakeup_event wakeup_event;
1383 FILL_COMMON_FIELDS(wakeup_event, event, data);
1385 FILL_ARRAY(wakeup_event, comm, event, data);
1386 FILL_FIELD(wakeup_event, pid, event, data);
1387 FILL_FIELD(wakeup_event, prio, event, data);
1388 FILL_FIELD(wakeup_event, success, event, data);
1389 FILL_FIELD(wakeup_event, cpu, event, data);
1391 if (trace_handler->wakeup_event)
1392 err = trace_handler->wakeup_event(&wakeup_event, machine, event, sample);
1398 * Track the current task - that way we can know whether there's any
1399 * weird events, such as a task being switched away that is not current.
1403 static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1405 static struct thread *curr_thread[MAX_CPUS];
1407 static char next_shortname1 = 'A';
1408 static char next_shortname2 = '0';
1411 map_switch_event(struct trace_switch_event *switch_event,
1412 struct machine *machine,
1413 struct event_format *event __used,
1414 struct perf_sample *sample)
1416 struct thread *sched_out __used, *sched_in;
1418 u64 timestamp0, timestamp = sample->time;
1420 int cpu, this_cpu = sample->cpu;
1422 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1424 if (this_cpu > max_cpu)
1427 timestamp0 = cpu_last_switched[this_cpu];
1428 cpu_last_switched[this_cpu] = timestamp;
1430 delta = timestamp - timestamp0;
1435 pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1439 sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
1440 sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1442 curr_thread[this_cpu] = sched_in;
1447 if (!sched_in->shortname[0]) {
1448 sched_in->shortname[0] = next_shortname1;
1449 sched_in->shortname[1] = next_shortname2;
1451 if (next_shortname1 < 'Z') {
1454 next_shortname1='A';
1455 if (next_shortname2 < '9') {
1458 next_shortname2='0';
1464 for (cpu = 0; cpu <= max_cpu; cpu++) {
1465 if (cpu != this_cpu)
1470 if (curr_thread[cpu]) {
1471 if (curr_thread[cpu]->pid)
1472 printf("%2s ", curr_thread[cpu]->shortname);
1479 printf(" %12.6f secs ", (double)timestamp/1e9);
1480 if (new_shortname) {
1481 printf("%s => %s:%d\n",
1482 sched_in->shortname, sched_in->comm, sched_in->pid);
1491 process_sched_switch_event(struct perf_tool *tool __used,
1492 struct event_format *event,
1493 struct perf_sample *sample,
1494 struct machine *machine,
1495 struct thread *thread __used)
1497 int this_cpu = sample->cpu, err = 0;
1498 void *data = sample->raw_data;
1499 struct trace_switch_event switch_event;
1501 FILL_COMMON_FIELDS(switch_event, event, data);
1503 FILL_ARRAY(switch_event, prev_comm, event, data);
1504 FILL_FIELD(switch_event, prev_pid, event, data);
1505 FILL_FIELD(switch_event, prev_prio, event, data);
1506 FILL_FIELD(switch_event, prev_state, event, data);
1507 FILL_ARRAY(switch_event, next_comm, event, data);
1508 FILL_FIELD(switch_event, next_pid, event, data);
1509 FILL_FIELD(switch_event, next_prio, event, data);
1511 if (curr_pid[this_cpu] != (u32)-1) {
1513 * Are we trying to switch away a PID that is
1516 if (curr_pid[this_cpu] != switch_event.prev_pid)
1517 nr_context_switch_bugs++;
1519 if (trace_handler->switch_event)
1520 err = trace_handler->switch_event(&switch_event, machine, event, sample);
1522 curr_pid[this_cpu] = switch_event.next_pid;
1527 process_sched_runtime_event(struct perf_tool *tool __used,
1528 struct event_format *event,
1529 struct perf_sample *sample,
1530 struct machine *machine,
1531 struct thread *thread __used)
1533 void *data = sample->raw_data;
1534 struct trace_runtime_event runtime_event;
1537 FILL_ARRAY(runtime_event, comm, event, data);
1538 FILL_FIELD(runtime_event, pid, event, data);
1539 FILL_FIELD(runtime_event, runtime, event, data);
1540 FILL_FIELD(runtime_event, vruntime, event, data);
1542 if (trace_handler->runtime_event)
1543 err = trace_handler->runtime_event(&runtime_event, machine, sample);
1549 process_sched_fork_event(struct perf_tool *tool __used,
1550 struct event_format *event,
1551 struct perf_sample *sample,
1552 struct machine *machine __used,
1553 struct thread *thread __used)
1555 void *data = sample->raw_data;
1556 struct trace_fork_event fork_event;
1559 FILL_COMMON_FIELDS(fork_event, event, data);
1561 FILL_ARRAY(fork_event, parent_comm, event, data);
1562 FILL_FIELD(fork_event, parent_pid, event, data);
1563 FILL_ARRAY(fork_event, child_comm, event, data);
1564 FILL_FIELD(fork_event, child_pid, event, data);
1566 if (trace_handler->fork_event)
1567 err = trace_handler->fork_event(&fork_event, event);
1573 process_sched_exit_event(struct perf_tool *tool __used,
1574 struct event_format *event,
1575 struct perf_sample *sample __used,
1576 struct machine *machine __used,
1577 struct thread *thread __used)
1580 printf("sched_exit event %p\n", event);
1586 process_sched_migrate_task_event(struct perf_tool *tool __used,
1587 struct event_format *event,
1588 struct perf_sample *sample,
1589 struct machine *machine,
1590 struct thread *thread __used)
1592 void *data = sample->raw_data;
1593 struct trace_migrate_task_event migrate_task_event;
1596 FILL_COMMON_FIELDS(migrate_task_event, event, data);
1598 FILL_ARRAY(migrate_task_event, comm, event, data);
1599 FILL_FIELD(migrate_task_event, pid, event, data);
1600 FILL_FIELD(migrate_task_event, prio, event, data);
1601 FILL_FIELD(migrate_task_event, cpu, event, data);
1603 if (trace_handler->migrate_task_event)
1604 err = trace_handler->migrate_task_event(&migrate_task_event, machine, sample);
1609 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1610 struct event_format *tp_format,
1611 struct perf_sample *sample,
1612 struct machine *machine,
1613 struct thread *thread);
1615 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used,
1616 union perf_event *event __used,
1617 struct perf_sample *sample,
1618 struct perf_evsel *evsel,
1619 struct machine *machine)
1621 struct thread *thread = machine__findnew_thread(machine, sample->pid);
1624 if (thread == NULL) {
1625 pr_debug("problem processing %s event, skipping it.\n",
1626 perf_evsel__name(evsel));
1630 evsel->hists.stats.total_period += sample->period;
1631 hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
1633 if (evsel->handler.func != NULL) {
1634 tracepoint_handler f = evsel->handler.func;
1635 err = f(tool, evsel->tp_format, sample, machine, thread);
1641 static struct perf_tool perf_sched = {
1642 .sample = perf_sched__process_tracepoint_sample,
1643 .comm = perf_event__process_comm,
1644 .lost = perf_event__process_lost,
1645 .fork = perf_event__process_task,
1646 .ordered_samples = true,
1649 static int read_events(bool destroy, struct perf_session **psession)
1651 const struct perf_evsel_str_handler handlers[] = {
1652 { "sched:sched_switch", process_sched_switch_event, },
1653 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1654 { "sched:sched_wakeup", process_sched_wakeup_event, },
1655 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1656 { "sched:sched_process_fork", process_sched_fork_event, },
1657 { "sched:sched_process_exit", process_sched_exit_event, },
1658 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1660 struct perf_session *session;
1662 session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_sched);
1663 if (session == NULL) {
1664 pr_debug("No Memory for session\n");
1668 if (perf_session__set_tracepoints_handlers(session, handlers))
1671 if (perf_session__has_traces(session, "record -R")) {
1672 int err = perf_session__process_events(session, &perf_sched);
1674 pr_err("Failed to process events, error %d", err);
1678 nr_events = session->hists.stats.nr_events[0];
1679 nr_lost_events = session->hists.stats.total_lost;
1680 nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
1684 perf_session__delete(session);
1687 *psession = session;
1692 perf_session__delete(session);
1696 static void print_bad_events(void)
1698 if (nr_unordered_timestamps && nr_timestamps) {
1699 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1700 (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1701 nr_unordered_timestamps, nr_timestamps);
1703 if (nr_lost_events && nr_events) {
1704 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1705 (double)nr_lost_events/(double)nr_events*100.0,
1706 nr_lost_events, nr_events, nr_lost_chunks);
1708 if (nr_state_machine_bugs && nr_timestamps) {
1709 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1710 (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1711 nr_state_machine_bugs, nr_timestamps);
1713 printf(" (due to lost events?)");
1716 if (nr_context_switch_bugs && nr_timestamps) {
1717 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1718 (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1719 nr_context_switch_bugs, nr_timestamps);
1721 printf(" (due to lost events?)");
1726 static int __cmd_lat(void)
1728 struct rb_node *next;
1729 struct perf_session *session;
1732 if (read_events(false, &session))
1736 printf("\n ---------------------------------------------------------------------------------------------------------------\n");
1737 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
1738 printf(" ---------------------------------------------------------------------------------------------------------------\n");
1740 next = rb_first(&sorted_atom_root);
1743 struct work_atoms *work_list;
1745 work_list = rb_entry(next, struct work_atoms, node);
1746 output_lat_thread(work_list);
1747 next = rb_next(next);
1750 printf(" -----------------------------------------------------------------------------------------\n");
1751 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
1752 (double)all_runtime/1e6, all_count);
1754 printf(" ---------------------------------------------------\n");
1759 perf_session__delete(session);
1763 static struct trace_sched_handler map_ops = {
1764 .wakeup_event = NULL,
1765 .switch_event = map_switch_event,
1766 .runtime_event = NULL,
1770 static int __cmd_map(void)
1772 max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1775 if (read_events(true, NULL))
1781 static int __cmd_replay(void)
1785 calibrate_run_measurement_overhead();
1786 calibrate_sleep_measurement_overhead();
1788 test_calibrations();
1790 if (read_events(true, NULL))
1793 printf("nr_run_events: %ld\n", nr_run_events);
1794 printf("nr_sleep_events: %ld\n", nr_sleep_events);
1795 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
1797 if (targetless_wakeups)
1798 printf("target-less wakeups: %ld\n", targetless_wakeups);
1799 if (multitarget_wakeups)
1800 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1801 if (nr_run_events_optimized)
1802 printf("run atoms optimized: %ld\n",
1803 nr_run_events_optimized);
1805 print_task_traces();
1806 add_cross_task_wakeups();
1809 printf("------------------------------------------------------------\n");
1810 for (i = 0; i < replay_repeat; i++)
1817 static const char * const sched_usage[] = {
1818 "perf sched [<options>] {record|latency|map|replay|script}",
1822 static const struct option sched_options[] = {
1823 OPT_STRING('i', "input", &input_name, "file",
1825 OPT_INCR('v', "verbose", &verbose,
1826 "be more verbose (show symbol address, etc)"),
1827 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1828 "dump raw trace in ASCII"),
1832 static const char * const latency_usage[] = {
1833 "perf sched latency [<options>]",
1837 static const struct option latency_options[] = {
1838 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1839 "sort by key(s): runtime, switch, avg, max"),
1840 OPT_INCR('v', "verbose", &verbose,
1841 "be more verbose (show symbol address, etc)"),
1842 OPT_INTEGER('C', "CPU", &profile_cpu,
1843 "CPU to profile on"),
1844 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1845 "dump raw trace in ASCII"),
1849 static const char * const replay_usage[] = {
1850 "perf sched replay [<options>]",
1854 static const struct option replay_options[] = {
1855 OPT_UINTEGER('r', "repeat", &replay_repeat,
1856 "repeat the workload replay N times (-1: infinite)"),
1857 OPT_INCR('v', "verbose", &verbose,
1858 "be more verbose (show symbol address, etc)"),
1859 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1860 "dump raw trace in ASCII"),
1864 static void setup_sorting(void)
1866 char *tmp, *tok, *str = strdup(sort_order);
1868 for (tok = strtok_r(str, ", ", &tmp);
1869 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1870 if (sort_dimension__add(tok, &sort_list) < 0) {
1871 error("Unknown --sort key: `%s'", tok);
1872 usage_with_options(latency_usage, latency_options);
1878 sort_dimension__add("pid", &cmp_pid);
1881 static const char *record_args[] = {
1888 "-e", "sched:sched_switch",
1889 "-e", "sched:sched_stat_wait",
1890 "-e", "sched:sched_stat_sleep",
1891 "-e", "sched:sched_stat_iowait",
1892 "-e", "sched:sched_stat_runtime",
1893 "-e", "sched:sched_process_exit",
1894 "-e", "sched:sched_process_fork",
1895 "-e", "sched:sched_wakeup",
1896 "-e", "sched:sched_migrate_task",
1899 static int __cmd_record(int argc, const char **argv)
1901 unsigned int rec_argc, i, j;
1902 const char **rec_argv;
1904 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1905 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1907 if (rec_argv == NULL)
1910 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1911 rec_argv[i] = strdup(record_args[i]);
1913 for (j = 1; j < (unsigned int)argc; j++, i++)
1914 rec_argv[i] = argv[j];
1916 BUG_ON(i != rec_argc);
1918 return cmd_record(i, rec_argv, NULL);
1921 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1923 argc = parse_options(argc, argv, sched_options, sched_usage,
1924 PARSE_OPT_STOP_AT_NON_OPTION);
1926 usage_with_options(sched_usage, sched_options);
1929 * Aliased to 'perf script' for now:
1931 if (!strcmp(argv[0], "script"))
1932 return cmd_script(argc, argv, prefix);
1935 if (!strncmp(argv[0], "rec", 3)) {
1936 return __cmd_record(argc, argv);
1937 } else if (!strncmp(argv[0], "lat", 3)) {
1938 trace_handler = &lat_ops;
1940 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1942 usage_with_options(latency_usage, latency_options);
1946 } else if (!strcmp(argv[0], "map")) {
1947 trace_handler = &map_ops;
1950 } else if (!strncmp(argv[0], "rep", 3)) {
1951 trace_handler = &replay_ops;
1953 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1955 usage_with_options(replay_usage, replay_options);
1957 return __cmd_replay();
1959 usage_with_options(sched_usage, sched_options);