perf tool: handle errors in synthesized event functions
[firefly-linux-kernel-4.4.55.git] / tools / perf / util / event.c
1 #include <linux/types.h>
2 #include "event.h"
3 #include "debug.h"
4 #include "sort.h"
5 #include "string.h"
6 #include "strlist.h"
7 #include "thread.h"
8 #include "thread_map.h"
9
10 static const char *perf_event__names[] = {
11         [0]                                     = "TOTAL",
12         [PERF_RECORD_MMAP]                      = "MMAP",
13         [PERF_RECORD_LOST]                      = "LOST",
14         [PERF_RECORD_COMM]                      = "COMM",
15         [PERF_RECORD_EXIT]                      = "EXIT",
16         [PERF_RECORD_THROTTLE]                  = "THROTTLE",
17         [PERF_RECORD_UNTHROTTLE]                = "UNTHROTTLE",
18         [PERF_RECORD_FORK]                      = "FORK",
19         [PERF_RECORD_READ]                      = "READ",
20         [PERF_RECORD_SAMPLE]                    = "SAMPLE",
21         [PERF_RECORD_HEADER_ATTR]               = "ATTR",
22         [PERF_RECORD_HEADER_EVENT_TYPE]         = "EVENT_TYPE",
23         [PERF_RECORD_HEADER_TRACING_DATA]       = "TRACING_DATA",
24         [PERF_RECORD_HEADER_BUILD_ID]           = "BUILD_ID",
25         [PERF_RECORD_FINISHED_ROUND]            = "FINISHED_ROUND",
26 };
27
28 const char *perf_event__name(unsigned int id)
29 {
30         if (id >= ARRAY_SIZE(perf_event__names))
31                 return "INVALID";
32         if (!perf_event__names[id])
33                 return "UNKNOWN";
34         return perf_event__names[id];
35 }
36
37 static struct perf_sample synth_sample = {
38         .pid       = -1,
39         .tid       = -1,
40         .time      = -1,
41         .stream_id = -1,
42         .cpu       = -1,
43         .period    = 1,
44 };
45
46 static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
47 {
48         char filename[PATH_MAX];
49         char bf[BUFSIZ];
50         FILE *fp;
51         size_t size = 0;
52         pid_t tgid = -1;
53
54         snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
55
56         fp = fopen(filename, "r");
57         if (fp == NULL) {
58                 pr_debug("couldn't open %s\n", filename);
59                 return 0;
60         }
61
62         while (!comm[0] || (tgid < 0)) {
63                 if (fgets(bf, sizeof(bf), fp) == NULL) {
64                         pr_warning("couldn't get COMM and pgid, malformed %s\n",
65                                    filename);
66                         break;
67                 }
68
69                 if (memcmp(bf, "Name:", 5) == 0) {
70                         char *name = bf + 5;
71                         while (*name && isspace(*name))
72                                 ++name;
73                         size = strlen(name) - 1;
74                         if (size >= len)
75                                 size = len - 1;
76                         memcpy(comm, name, size);
77                         comm[size] = '\0';
78
79                 } else if (memcmp(bf, "Tgid:", 5) == 0) {
80                         char *tgids = bf + 5;
81                         while (*tgids && isspace(*tgids))
82                                 ++tgids;
83                         tgid = atoi(tgids);
84                 }
85         }
86
87         fclose(fp);
88
89         return tgid;
90 }
91
92 static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
93                                          union perf_event *event, pid_t pid,
94                                          int full,
95                                          perf_event__handler_t process,
96                                          struct machine *machine)
97 {
98         char filename[PATH_MAX];
99         size_t size;
100         DIR *tasks;
101         struct dirent dirent, *next;
102         pid_t tgid;
103
104         memset(&event->comm, 0, sizeof(event->comm));
105
106         tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
107                                          sizeof(event->comm.comm));
108         if (tgid < 0)
109                 goto out;
110
111         event->comm.pid = tgid;
112         event->comm.header.type = PERF_RECORD_COMM;
113
114         size = strlen(event->comm.comm) + 1;
115         size = ALIGN(size, sizeof(u64));
116         memset(event->comm.comm + size, 0, machine->id_hdr_size);
117         event->comm.header.size = (sizeof(event->comm) -
118                                 (sizeof(event->comm.comm) - size) +
119                                 machine->id_hdr_size);
120         if (!full) {
121                 event->comm.tid = pid;
122
123                 if (process(tool, event, &synth_sample, machine) != 0)
124                         return -1;
125
126                 goto out;
127         }
128
129         snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
130
131         tasks = opendir(filename);
132         if (tasks == NULL) {
133                 pr_debug("couldn't open %s\n", filename);
134                 return 0;
135         }
136
137         while (!readdir_r(tasks, &dirent, &next) && next) {
138                 char *end;
139                 pid = strtol(dirent.d_name, &end, 10);
140                 if (*end)
141                         continue;
142
143                 /* already have tgid; jut want to update the comm */
144                 (void) perf_event__get_comm_tgid(pid, event->comm.comm,
145                                          sizeof(event->comm.comm));
146
147                 size = strlen(event->comm.comm) + 1;
148                 size = ALIGN(size, sizeof(u64));
149                 memset(event->comm.comm + size, 0, machine->id_hdr_size);
150                 event->comm.header.size = (sizeof(event->comm) -
151                                           (sizeof(event->comm.comm) - size) +
152                                           machine->id_hdr_size);
153
154                 event->comm.tid = pid;
155
156                 if (process(tool, event, &synth_sample, machine) != 0) {
157                         tgid = -1;
158                         break;
159                 }
160         }
161
162         closedir(tasks);
163 out:
164         return tgid;
165 }
166
167 static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
168                                               union perf_event *event,
169                                               pid_t pid, pid_t tgid,
170                                               perf_event__handler_t process,
171                                               struct machine *machine)
172 {
173         char filename[PATH_MAX];
174         FILE *fp;
175         int rc = 0;
176
177         snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
178
179         fp = fopen(filename, "r");
180         if (fp == NULL) {
181                 /*
182                  * We raced with a task exiting - just return:
183                  */
184                 pr_debug("couldn't open %s\n", filename);
185                 return -1;
186         }
187
188         event->header.type = PERF_RECORD_MMAP;
189         /*
190          * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
191          */
192         event->header.misc = PERF_RECORD_MISC_USER;
193
194         while (1) {
195                 char bf[BUFSIZ], *pbf = bf;
196                 int n;
197                 size_t size;
198                 if (fgets(bf, sizeof(bf), fp) == NULL)
199                         break;
200
201                 /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
202                 n = hex2u64(pbf, &event->mmap.start);
203                 if (n < 0)
204                         continue;
205                 pbf += n + 1;
206                 n = hex2u64(pbf, &event->mmap.len);
207                 if (n < 0)
208                         continue;
209                 pbf += n + 3;
210                 if (*pbf == 'x') { /* vm_exec */
211                         char anonstr[] = "//anon\n";
212                         char *execname = strchr(bf, '/');
213
214                         /* Catch VDSO */
215                         if (execname == NULL)
216                                 execname = strstr(bf, "[vdso]");
217
218                         /* Catch anonymous mmaps */
219                         if ((execname == NULL) && !strstr(bf, "["))
220                                 execname = anonstr;
221
222                         if (execname == NULL)
223                                 continue;
224
225                         pbf += 3;
226                         n = hex2u64(pbf, &event->mmap.pgoff);
227
228                         size = strlen(execname);
229                         execname[size - 1] = '\0'; /* Remove \n */
230                         memcpy(event->mmap.filename, execname, size);
231                         size = ALIGN(size, sizeof(u64));
232                         event->mmap.len -= event->mmap.start;
233                         event->mmap.header.size = (sizeof(event->mmap) -
234                                                 (sizeof(event->mmap.filename) - size));
235                         memset(event->mmap.filename + size, 0, machine->id_hdr_size);
236                         event->mmap.header.size += machine->id_hdr_size;
237                         event->mmap.pid = tgid;
238                         event->mmap.tid = pid;
239
240                         if (process(tool, event, &synth_sample, machine) != 0) {
241                                 rc = -1;
242                                 break;
243                         }
244                 }
245         }
246
247         fclose(fp);
248         return rc;
249 }
250
251 int perf_event__synthesize_modules(struct perf_tool *tool,
252                                    perf_event__handler_t process,
253                                    struct machine *machine)
254 {
255         int rc = 0;
256         struct rb_node *nd;
257         struct map_groups *kmaps = &machine->kmaps;
258         union perf_event *event = zalloc((sizeof(event->mmap) +
259                                           machine->id_hdr_size));
260         if (event == NULL) {
261                 pr_debug("Not enough memory synthesizing mmap event "
262                          "for kernel modules\n");
263                 return -1;
264         }
265
266         event->header.type = PERF_RECORD_MMAP;
267
268         /*
269          * kernel uses 0 for user space maps, see kernel/perf_event.c
270          * __perf_event_mmap
271          */
272         if (machine__is_host(machine))
273                 event->header.misc = PERF_RECORD_MISC_KERNEL;
274         else
275                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
276
277         for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
278              nd; nd = rb_next(nd)) {
279                 size_t size;
280                 struct map *pos = rb_entry(nd, struct map, rb_node);
281
282                 if (pos->dso->kernel)
283                         continue;
284
285                 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
286                 event->mmap.header.type = PERF_RECORD_MMAP;
287                 event->mmap.header.size = (sizeof(event->mmap) -
288                                         (sizeof(event->mmap.filename) - size));
289                 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
290                 event->mmap.header.size += machine->id_hdr_size;
291                 event->mmap.start = pos->start;
292                 event->mmap.len   = pos->end - pos->start;
293                 event->mmap.pid   = machine->pid;
294
295                 memcpy(event->mmap.filename, pos->dso->long_name,
296                        pos->dso->long_name_len + 1);
297                 if (process(tool, event, &synth_sample, machine) != 0) {
298                         rc = -1;
299                         break;
300                 }
301         }
302
303         free(event);
304         return rc;
305 }
306
307 static int __event__synthesize_thread(union perf_event *comm_event,
308                                       union perf_event *mmap_event,
309                                       pid_t pid, int full,
310                                           perf_event__handler_t process,
311                                       struct perf_tool *tool,
312                                       struct machine *machine)
313 {
314         pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full,
315                                                  process, machine);
316         if (tgid == -1)
317                 return -1;
318         return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
319                                                   process, machine);
320 }
321
322 int perf_event__synthesize_thread_map(struct perf_tool *tool,
323                                       struct thread_map *threads,
324                                       perf_event__handler_t process,
325                                       struct machine *machine)
326 {
327         union perf_event *comm_event, *mmap_event;
328         int err = -1, thread, j;
329
330         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
331         if (comm_event == NULL)
332                 goto out;
333
334         mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
335         if (mmap_event == NULL)
336                 goto out_free_comm;
337
338         err = 0;
339         for (thread = 0; thread < threads->nr; ++thread) {
340                 if (__event__synthesize_thread(comm_event, mmap_event,
341                                                threads->map[thread], 0,
342                                                process, tool, machine)) {
343                         err = -1;
344                         break;
345                 }
346
347                 /*
348                  * comm.pid is set to thread group id by
349                  * perf_event__synthesize_comm
350                  */
351                 if ((int) comm_event->comm.pid != threads->map[thread]) {
352                         bool need_leader = true;
353
354                         /* is thread group leader in thread_map? */
355                         for (j = 0; j < threads->nr; ++j) {
356                                 if ((int) comm_event->comm.pid == threads->map[j]) {
357                                         need_leader = false;
358                                         break;
359                                 }
360                         }
361
362                         /* if not, generate events for it */
363                         if (need_leader &&
364                             __event__synthesize_thread(comm_event,
365                                                       mmap_event,
366                                                       comm_event->comm.pid, 0,
367                                                       process, tool, machine)) {
368                                 err = -1;
369                                 break;
370                         }
371                 }
372         }
373         free(mmap_event);
374 out_free_comm:
375         free(comm_event);
376 out:
377         return err;
378 }
379
380 int perf_event__synthesize_threads(struct perf_tool *tool,
381                                    perf_event__handler_t process,
382                                    struct machine *machine)
383 {
384         DIR *proc;
385         struct dirent dirent, *next;
386         union perf_event *comm_event, *mmap_event;
387         int err = -1;
388
389         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
390         if (comm_event == NULL)
391                 goto out;
392
393         mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
394         if (mmap_event == NULL)
395                 goto out_free_comm;
396
397         proc = opendir("/proc");
398         if (proc == NULL)
399                 goto out_free_mmap;
400
401         while (!readdir_r(proc, &dirent, &next) && next) {
402                 char *end;
403                 pid_t pid = strtol(dirent.d_name, &end, 10);
404
405                 if (*end) /* only interested in proper numerical dirents */
406                         continue;
407
408                 if (__event__synthesize_thread(comm_event, mmap_event, pid, 1,
409                                            process, tool, machine) != 0) {
410                         err = -1;
411                         goto out_closedir;
412                 }
413         }
414
415         err = 0;
416 out_closedir:
417         closedir(proc);
418 out_free_mmap:
419         free(mmap_event);
420 out_free_comm:
421         free(comm_event);
422 out:
423         return err;
424 }
425
426 struct process_symbol_args {
427         const char *name;
428         u64        start;
429 };
430
431 static int find_symbol_cb(void *arg, const char *name, char type,
432                           u64 start)
433 {
434         struct process_symbol_args *args = arg;
435
436         /*
437          * Must be a function or at least an alias, as in PARISC64, where "_text" is
438          * an 'A' to the same address as "_stext".
439          */
440         if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
441               type == 'A') || strcmp(name, args->name))
442                 return 0;
443
444         args->start = start;
445         return 1;
446 }
447
448 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
449                                        perf_event__handler_t process,
450                                        struct machine *machine,
451                                        const char *symbol_name)
452 {
453         size_t size;
454         const char *filename, *mmap_name;
455         char path[PATH_MAX];
456         char name_buff[PATH_MAX];
457         struct map *map;
458         int err;
459         /*
460          * We should get this from /sys/kernel/sections/.text, but till that is
461          * available use this, and after it is use this as a fallback for older
462          * kernels.
463          */
464         struct process_symbol_args args = { .name = symbol_name, };
465         union perf_event *event = zalloc((sizeof(event->mmap) +
466                                           machine->id_hdr_size));
467         if (event == NULL) {
468                 pr_debug("Not enough memory synthesizing mmap event "
469                          "for kernel modules\n");
470                 return -1;
471         }
472
473         mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
474         if (machine__is_host(machine)) {
475                 /*
476                  * kernel uses PERF_RECORD_MISC_USER for user space maps,
477                  * see kernel/perf_event.c __perf_event_mmap
478                  */
479                 event->header.misc = PERF_RECORD_MISC_KERNEL;
480                 filename = "/proc/kallsyms";
481         } else {
482                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
483                 if (machine__is_default_guest(machine))
484                         filename = (char *) symbol_conf.default_guest_kallsyms;
485                 else {
486                         sprintf(path, "%s/proc/kallsyms", machine->root_dir);
487                         filename = path;
488                 }
489         }
490
491         if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
492                 return -ENOENT;
493
494         map = machine->vmlinux_maps[MAP__FUNCTION];
495         size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
496                         "%s%s", mmap_name, symbol_name) + 1;
497         size = ALIGN(size, sizeof(u64));
498         event->mmap.header.type = PERF_RECORD_MMAP;
499         event->mmap.header.size = (sizeof(event->mmap) -
500                         (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
501         event->mmap.pgoff = args.start;
502         event->mmap.start = map->start;
503         event->mmap.len   = map->end - event->mmap.start;
504         event->mmap.pid   = machine->pid;
505
506         err = process(tool, event, &synth_sample, machine);
507         free(event);
508
509         return err;
510 }
511
512 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
513 {
514         return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
515 }
516
517 int perf_event__process_comm(struct perf_tool *tool __used,
518                              union perf_event *event,
519                              struct perf_sample *sample __used,
520                              struct machine *machine)
521 {
522         struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
523
524         if (dump_trace)
525                 perf_event__fprintf_comm(event, stdout);
526
527         if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
528                 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
529                 return -1;
530         }
531
532         return 0;
533 }
534
535 int perf_event__process_lost(struct perf_tool *tool __used,
536                              union perf_event *event,
537                              struct perf_sample *sample __used,
538                              struct machine *machine __used)
539 {
540         dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
541                     event->lost.id, event->lost.lost);
542         return 0;
543 }
544
545 static void perf_event__set_kernel_mmap_len(union perf_event *event,
546                                             struct map **maps)
547 {
548         maps[MAP__FUNCTION]->start = event->mmap.start;
549         maps[MAP__FUNCTION]->end   = event->mmap.start + event->mmap.len;
550         /*
551          * Be a bit paranoid here, some perf.data file came with
552          * a zero sized synthesized MMAP event for the kernel.
553          */
554         if (maps[MAP__FUNCTION]->end == 0)
555                 maps[MAP__FUNCTION]->end = ~0ULL;
556 }
557
558 static int perf_event__process_kernel_mmap(struct perf_tool *tool __used,
559                                            union perf_event *event,
560                                            struct machine *machine)
561 {
562         struct map *map;
563         char kmmap_prefix[PATH_MAX];
564         enum dso_kernel_type kernel_type;
565         bool is_kernel_mmap;
566
567         machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
568         if (machine__is_host(machine))
569                 kernel_type = DSO_TYPE_KERNEL;
570         else
571                 kernel_type = DSO_TYPE_GUEST_KERNEL;
572
573         is_kernel_mmap = memcmp(event->mmap.filename,
574                                 kmmap_prefix,
575                                 strlen(kmmap_prefix) - 1) == 0;
576         if (event->mmap.filename[0] == '/' ||
577             (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
578
579                 char short_module_name[1024];
580                 char *name, *dot;
581
582                 if (event->mmap.filename[0] == '/') {
583                         name = strrchr(event->mmap.filename, '/');
584                         if (name == NULL)
585                                 goto out_problem;
586
587                         ++name; /* skip / */
588                         dot = strrchr(name, '.');
589                         if (dot == NULL)
590                                 goto out_problem;
591                         snprintf(short_module_name, sizeof(short_module_name),
592                                         "[%.*s]", (int)(dot - name), name);
593                         strxfrchar(short_module_name, '-', '_');
594                 } else
595                         strcpy(short_module_name, event->mmap.filename);
596
597                 map = machine__new_module(machine, event->mmap.start,
598                                           event->mmap.filename);
599                 if (map == NULL)
600                         goto out_problem;
601
602                 name = strdup(short_module_name);
603                 if (name == NULL)
604                         goto out_problem;
605
606                 map->dso->short_name = name;
607                 map->dso->sname_alloc = 1;
608                 map->end = map->start + event->mmap.len;
609         } else if (is_kernel_mmap) {
610                 const char *symbol_name = (event->mmap.filename +
611                                 strlen(kmmap_prefix));
612                 /*
613                  * Should be there already, from the build-id table in
614                  * the header.
615                  */
616                 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
617                                                      kmmap_prefix);
618                 if (kernel == NULL)
619                         goto out_problem;
620
621                 kernel->kernel = kernel_type;
622                 if (__machine__create_kernel_maps(machine, kernel) < 0)
623                         goto out_problem;
624
625                 perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps);
626
627                 /*
628                  * Avoid using a zero address (kptr_restrict) for the ref reloc
629                  * symbol. Effectively having zero here means that at record
630                  * time /proc/sys/kernel/kptr_restrict was non zero.
631                  */
632                 if (event->mmap.pgoff != 0) {
633                         maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
634                                                          symbol_name,
635                                                          event->mmap.pgoff);
636                 }
637
638                 if (machine__is_default_guest(machine)) {
639                         /*
640                          * preload dso of guest kernel and modules
641                          */
642                         dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
643                                   NULL);
644                 }
645         }
646         return 0;
647 out_problem:
648         return -1;
649 }
650
651 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
652 {
653         return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
654                        event->mmap.pid, event->mmap.tid, event->mmap.start,
655                        event->mmap.len, event->mmap.pgoff, event->mmap.filename);
656 }
657
658 int perf_event__process_mmap(struct perf_tool *tool,
659                              union perf_event *event,
660                              struct perf_sample *sample __used,
661                              struct machine *machine)
662 {
663         struct thread *thread;
664         struct map *map;
665         u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
666         int ret = 0;
667
668         if (dump_trace)
669                 perf_event__fprintf_mmap(event, stdout);
670
671         if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
672             cpumode == PERF_RECORD_MISC_KERNEL) {
673                 ret = perf_event__process_kernel_mmap(tool, event, machine);
674                 if (ret < 0)
675                         goto out_problem;
676                 return 0;
677         }
678
679         thread = machine__findnew_thread(machine, event->mmap.pid);
680         if (thread == NULL)
681                 goto out_problem;
682         map = map__new(&machine->user_dsos, event->mmap.start,
683                         event->mmap.len, event->mmap.pgoff,
684                         event->mmap.pid, event->mmap.filename,
685                         MAP__FUNCTION);
686         if (map == NULL)
687                 goto out_problem;
688
689         thread__insert_map(thread, map);
690         return 0;
691
692 out_problem:
693         dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
694         return 0;
695 }
696
697 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
698 {
699         return fprintf(fp, "(%d:%d):(%d:%d)\n",
700                        event->fork.pid, event->fork.tid,
701                        event->fork.ppid, event->fork.ptid);
702 }
703
704 int perf_event__process_task(struct perf_tool *tool __used,
705                              union perf_event *event,
706                              struct perf_sample *sample __used,
707                               struct machine *machine)
708 {
709         struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
710         struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
711
712         if (dump_trace)
713                 perf_event__fprintf_task(event, stdout);
714
715         if (event->header.type == PERF_RECORD_EXIT) {
716                 machine__remove_thread(machine, thread);
717                 return 0;
718         }
719
720         if (thread == NULL || parent == NULL ||
721             thread__fork(thread, parent) < 0) {
722                 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
723                 return -1;
724         }
725
726         return 0;
727 }
728
729 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
730 {
731         size_t ret = fprintf(fp, "PERF_RECORD_%s",
732                              perf_event__name(event->header.type));
733
734         switch (event->header.type) {
735         case PERF_RECORD_COMM:
736                 ret += perf_event__fprintf_comm(event, fp);
737                 break;
738         case PERF_RECORD_FORK:
739         case PERF_RECORD_EXIT:
740                 ret += perf_event__fprintf_task(event, fp);
741                 break;
742         case PERF_RECORD_MMAP:
743                 ret += perf_event__fprintf_mmap(event, fp);
744                 break;
745         default:
746                 ret += fprintf(fp, "\n");
747         }
748
749         return ret;
750 }
751
752 int perf_event__process(struct perf_tool *tool, union perf_event *event,
753                         struct perf_sample *sample, struct machine *machine)
754 {
755         switch (event->header.type) {
756         case PERF_RECORD_COMM:
757                 perf_event__process_comm(tool, event, sample, machine);
758                 break;
759         case PERF_RECORD_MMAP:
760                 perf_event__process_mmap(tool, event, sample, machine);
761                 break;
762         case PERF_RECORD_FORK:
763         case PERF_RECORD_EXIT:
764                 perf_event__process_task(tool, event, sample, machine);
765                 break;
766         case PERF_RECORD_LOST:
767                 perf_event__process_lost(tool, event, sample, machine);
768         default:
769                 break;
770         }
771
772         return 0;
773 }
774
775 void thread__find_addr_map(struct thread *self,
776                            struct machine *machine, u8 cpumode,
777                            enum map_type type, u64 addr,
778                            struct addr_location *al)
779 {
780         struct map_groups *mg = &self->mg;
781
782         al->thread = self;
783         al->addr = addr;
784         al->cpumode = cpumode;
785         al->filtered = false;
786
787         if (machine == NULL) {
788                 al->map = NULL;
789                 return;
790         }
791
792         if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
793                 al->level = 'k';
794                 mg = &machine->kmaps;
795         } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
796                 al->level = '.';
797         } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
798                 al->level = 'g';
799                 mg = &machine->kmaps;
800         } else {
801                 /*
802                  * 'u' means guest os user space.
803                  * TODO: We don't support guest user space. Might support late.
804                  */
805                 if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
806                         al->level = 'u';
807                 else
808                         al->level = 'H';
809                 al->map = NULL;
810
811                 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
812                         cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
813                         !perf_guest)
814                         al->filtered = true;
815                 if ((cpumode == PERF_RECORD_MISC_USER ||
816                         cpumode == PERF_RECORD_MISC_KERNEL) &&
817                         !perf_host)
818                         al->filtered = true;
819
820                 return;
821         }
822 try_again:
823         al->map = map_groups__find(mg, type, al->addr);
824         if (al->map == NULL) {
825                 /*
826                  * If this is outside of all known maps, and is a negative
827                  * address, try to look it up in the kernel dso, as it might be
828                  * a vsyscall or vdso (which executes in user-mode).
829                  *
830                  * XXX This is nasty, we should have a symbol list in the
831                  * "[vdso]" dso, but for now lets use the old trick of looking
832                  * in the whole kernel symbol list.
833                  */
834                 if ((long long)al->addr < 0 &&
835                     cpumode == PERF_RECORD_MISC_USER &&
836                     machine && mg != &machine->kmaps) {
837                         mg = &machine->kmaps;
838                         goto try_again;
839                 }
840         } else
841                 al->addr = al->map->map_ip(al->map, al->addr);
842 }
843
844 void thread__find_addr_location(struct thread *thread, struct machine *machine,
845                                 u8 cpumode, enum map_type type, u64 addr,
846                                 struct addr_location *al,
847                                 symbol_filter_t filter)
848 {
849         thread__find_addr_map(thread, machine, cpumode, type, addr, al);
850         if (al->map != NULL)
851                 al->sym = map__find_symbol(al->map, al->addr, filter);
852         else
853                 al->sym = NULL;
854 }
855
856 int perf_event__preprocess_sample(const union perf_event *event,
857                                   struct machine *machine,
858                                   struct addr_location *al,
859                                   struct perf_sample *sample,
860                                   symbol_filter_t filter)
861 {
862         u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
863         struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
864
865         if (thread == NULL)
866                 return -1;
867
868         if (symbol_conf.comm_list &&
869             !strlist__has_entry(symbol_conf.comm_list, thread->comm))
870                 goto out_filtered;
871
872         dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
873         /*
874          * Have we already created the kernel maps for this machine?
875          *
876          * This should have happened earlier, when we processed the kernel MMAP
877          * events, but for older perf.data files there was no such thing, so do
878          * it now.
879          */
880         if (cpumode == PERF_RECORD_MISC_KERNEL &&
881             machine->vmlinux_maps[MAP__FUNCTION] == NULL)
882                 machine__create_kernel_maps(machine);
883
884         thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
885                               event->ip.ip, al);
886         dump_printf(" ...... dso: %s\n",
887                     al->map ? al->map->dso->long_name :
888                         al->level == 'H' ? "[hypervisor]" : "<not found>");
889         al->sym = NULL;
890         al->cpu = sample->cpu;
891
892         if (al->map) {
893                 struct dso *dso = al->map->dso;
894
895                 if (symbol_conf.dso_list &&
896                     (!dso || !(strlist__has_entry(symbol_conf.dso_list,
897                                                   dso->short_name) ||
898                                (dso->short_name != dso->long_name &&
899                                 strlist__has_entry(symbol_conf.dso_list,
900                                                    dso->long_name)))))
901                         goto out_filtered;
902
903                 al->sym = map__find_symbol(al->map, al->addr, filter);
904         }
905
906         if (symbol_conf.sym_list && al->sym &&
907             !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
908                 goto out_filtered;
909
910         return 0;
911
912 out_filtered:
913         al->filtered = true;
914         return 0;
915 }