perf tools: Fix comm for processes with named threads
[firefly-linux-kernel-4.4.55.git] / tools / perf / util / event.c
1 #include <linux/types.h>
2 #include "event.h"
3 #include "debug.h"
4 #include "sort.h"
5 #include "string.h"
6 #include "strlist.h"
7 #include "thread.h"
8 #include "thread_map.h"
9
10 static const char *perf_event__names[] = {
11         [0]                                     = "TOTAL",
12         [PERF_RECORD_MMAP]                      = "MMAP",
13         [PERF_RECORD_LOST]                      = "LOST",
14         [PERF_RECORD_COMM]                      = "COMM",
15         [PERF_RECORD_EXIT]                      = "EXIT",
16         [PERF_RECORD_THROTTLE]                  = "THROTTLE",
17         [PERF_RECORD_UNTHROTTLE]                = "UNTHROTTLE",
18         [PERF_RECORD_FORK]                      = "FORK",
19         [PERF_RECORD_READ]                      = "READ",
20         [PERF_RECORD_SAMPLE]                    = "SAMPLE",
21         [PERF_RECORD_HEADER_ATTR]               = "ATTR",
22         [PERF_RECORD_HEADER_EVENT_TYPE]         = "EVENT_TYPE",
23         [PERF_RECORD_HEADER_TRACING_DATA]       = "TRACING_DATA",
24         [PERF_RECORD_HEADER_BUILD_ID]           = "BUILD_ID",
25         [PERF_RECORD_FINISHED_ROUND]            = "FINISHED_ROUND",
26 };
27
28 const char *perf_event__name(unsigned int id)
29 {
30         if (id >= ARRAY_SIZE(perf_event__names))
31                 return "INVALID";
32         if (!perf_event__names[id])
33                 return "UNKNOWN";
34         return perf_event__names[id];
35 }
36
37 static struct perf_sample synth_sample = {
38         .pid       = -1,
39         .tid       = -1,
40         .time      = -1,
41         .stream_id = -1,
42         .cpu       = -1,
43         .period    = 1,
44 };
45
46 static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
47                                          union perf_event *event, pid_t pid,
48                                          int full, perf_event__handler_t process,
49                                          struct machine *machine)
50 {
51         char filename[PATH_MAX];
52         char bf[BUFSIZ];
53         FILE *fp;
54         size_t size = 0;
55         DIR *tasks;
56         struct dirent dirent, *next;
57         pid_t tgid = 0;
58
59         snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
60
61         fp = fopen(filename, "r");
62         if (fp == NULL) {
63 out_race:
64                 /*
65                  * We raced with a task exiting - just return:
66                  */
67                 pr_debug("couldn't open %s\n", filename);
68                 return 0;
69         }
70
71         memset(&event->comm, 0, sizeof(event->comm));
72
73         while (!event->comm.comm[0] || !event->comm.pid) {
74                 if (fgets(bf, sizeof(bf), fp) == NULL) {
75                         pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
76                         goto out;
77                 }
78
79                 if (memcmp(bf, "Name:", 5) == 0) {
80                         char *name = bf + 5;
81                         while (*name && isspace(*name))
82                                 ++name;
83                         size = strlen(name) - 1;
84                         memcpy(event->comm.comm, name, size++);
85                 } else if (memcmp(bf, "Tgid:", 5) == 0) {
86                         char *tgids = bf + 5;
87                         while (*tgids && isspace(*tgids))
88                                 ++tgids;
89                         tgid = event->comm.pid = atoi(tgids);
90                 }
91         }
92
93         event->comm.header.type = PERF_RECORD_COMM;
94         size = ALIGN(size, sizeof(u64));
95         memset(event->comm.comm + size, 0, machine->id_hdr_size);
96         event->comm.header.size = (sizeof(event->comm) -
97                                 (sizeof(event->comm.comm) - size) +
98                                 machine->id_hdr_size);
99         if (!full) {
100                 event->comm.tid = pid;
101
102                 process(tool, event, &synth_sample, machine);
103                 goto out;
104         }
105
106         snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
107
108         tasks = opendir(filename);
109         if (tasks == NULL)
110                 goto out_race;
111
112         while (!readdir_r(tasks, &dirent, &next) && next) {
113                 char *end;
114                 pid = strtol(dirent.d_name, &end, 10);
115                 if (*end)
116                         continue;
117
118                 event->comm.tid = pid;
119
120                 process(tool, event, &synth_sample, machine);
121         }
122
123         closedir(tasks);
124 out:
125         fclose(fp);
126
127         return tgid;
128 }
129
130 static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
131                                               union perf_event *event,
132                                               pid_t pid, pid_t tgid,
133                                               perf_event__handler_t process,
134                                               struct machine *machine)
135 {
136         char filename[PATH_MAX];
137         FILE *fp;
138
139         snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
140
141         fp = fopen(filename, "r");
142         if (fp == NULL) {
143                 /*
144                  * We raced with a task exiting - just return:
145                  */
146                 pr_debug("couldn't open %s\n", filename);
147                 return -1;
148         }
149
150         event->header.type = PERF_RECORD_MMAP;
151         /*
152          * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
153          */
154         event->header.misc = PERF_RECORD_MISC_USER;
155
156         while (1) {
157                 char bf[BUFSIZ], *pbf = bf;
158                 int n;
159                 size_t size;
160                 if (fgets(bf, sizeof(bf), fp) == NULL)
161                         break;
162
163                 /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
164                 n = hex2u64(pbf, &event->mmap.start);
165                 if (n < 0)
166                         continue;
167                 pbf += n + 1;
168                 n = hex2u64(pbf, &event->mmap.len);
169                 if (n < 0)
170                         continue;
171                 pbf += n + 3;
172                 if (*pbf == 'x') { /* vm_exec */
173                         char anonstr[] = "//anon\n";
174                         char *execname = strchr(bf, '/');
175
176                         /* Catch VDSO */
177                         if (execname == NULL)
178                                 execname = strstr(bf, "[vdso]");
179
180                         /* Catch anonymous mmaps */
181                         if ((execname == NULL) && !strstr(bf, "["))
182                                 execname = anonstr;
183
184                         if (execname == NULL)
185                                 continue;
186
187                         pbf += 3;
188                         n = hex2u64(pbf, &event->mmap.pgoff);
189
190                         size = strlen(execname);
191                         execname[size - 1] = '\0'; /* Remove \n */
192                         memcpy(event->mmap.filename, execname, size);
193                         size = ALIGN(size, sizeof(u64));
194                         event->mmap.len -= event->mmap.start;
195                         event->mmap.header.size = (sizeof(event->mmap) -
196                                                 (sizeof(event->mmap.filename) - size));
197                         memset(event->mmap.filename + size, 0, machine->id_hdr_size);
198                         event->mmap.header.size += machine->id_hdr_size;
199                         event->mmap.pid = tgid;
200                         event->mmap.tid = pid;
201
202                         process(tool, event, &synth_sample, machine);
203                 }
204         }
205
206         fclose(fp);
207         return 0;
208 }
209
210 int perf_event__synthesize_modules(struct perf_tool *tool,
211                                    perf_event__handler_t process,
212                                    struct machine *machine)
213 {
214         struct rb_node *nd;
215         struct map_groups *kmaps = &machine->kmaps;
216         union perf_event *event = zalloc((sizeof(event->mmap) +
217                                           machine->id_hdr_size));
218         if (event == NULL) {
219                 pr_debug("Not enough memory synthesizing mmap event "
220                          "for kernel modules\n");
221                 return -1;
222         }
223
224         event->header.type = PERF_RECORD_MMAP;
225
226         /*
227          * kernel uses 0 for user space maps, see kernel/perf_event.c
228          * __perf_event_mmap
229          */
230         if (machine__is_host(machine))
231                 event->header.misc = PERF_RECORD_MISC_KERNEL;
232         else
233                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
234
235         for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
236              nd; nd = rb_next(nd)) {
237                 size_t size;
238                 struct map *pos = rb_entry(nd, struct map, rb_node);
239
240                 if (pos->dso->kernel)
241                         continue;
242
243                 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
244                 event->mmap.header.type = PERF_RECORD_MMAP;
245                 event->mmap.header.size = (sizeof(event->mmap) -
246                                         (sizeof(event->mmap.filename) - size));
247                 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
248                 event->mmap.header.size += machine->id_hdr_size;
249                 event->mmap.start = pos->start;
250                 event->mmap.len   = pos->end - pos->start;
251                 event->mmap.pid   = machine->pid;
252
253                 memcpy(event->mmap.filename, pos->dso->long_name,
254                        pos->dso->long_name_len + 1);
255                 process(tool, event, &synth_sample, machine);
256         }
257
258         free(event);
259         return 0;
260 }
261
262 static int __event__synthesize_thread(union perf_event *comm_event,
263                                       union perf_event *mmap_event,
264                                       pid_t pid, int full,
265                                           perf_event__handler_t process,
266                                       struct perf_tool *tool,
267                                       struct machine *machine)
268 {
269         pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full,
270                                                  process, machine);
271         if (tgid == -1)
272                 return -1;
273         return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
274                                                   process, machine);
275 }
276
277 int perf_event__synthesize_thread_map(struct perf_tool *tool,
278                                       struct thread_map *threads,
279                                       perf_event__handler_t process,
280                                       struct machine *machine)
281 {
282         union perf_event *comm_event, *mmap_event;
283         int err = -1, thread, j;
284
285         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
286         if (comm_event == NULL)
287                 goto out;
288
289         mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
290         if (mmap_event == NULL)
291                 goto out_free_comm;
292
293         err = 0;
294         for (thread = 0; thread < threads->nr; ++thread) {
295                 if (__event__synthesize_thread(comm_event, mmap_event,
296                                                threads->map[thread], 0,
297                                                process, tool, machine)) {
298                         err = -1;
299                         break;
300                 }
301
302                 /*
303                  * comm.pid is set to thread group id by
304                  * perf_event__synthesize_comm
305                  */
306                 if ((int) comm_event->comm.pid != threads->map[thread]) {
307                         bool need_leader = true;
308
309                         /* is thread group leader in thread_map? */
310                         for (j = 0; j < threads->nr; ++j) {
311                                 if ((int) comm_event->comm.pid == threads->map[j]) {
312                                         need_leader = false;
313                                         break;
314                                 }
315                         }
316
317                         /* if not, generate events for it */
318                         if (need_leader &&
319                             __event__synthesize_thread(comm_event,
320                                                       mmap_event,
321                                                       comm_event->comm.pid, 0,
322                                                       process, tool, machine)) {
323                                 err = -1;
324                                 break;
325                         }
326                 }
327         }
328         free(mmap_event);
329 out_free_comm:
330         free(comm_event);
331 out:
332         return err;
333 }
334
335 int perf_event__synthesize_threads(struct perf_tool *tool,
336                                    perf_event__handler_t process,
337                                    struct machine *machine)
338 {
339         DIR *proc;
340         struct dirent dirent, *next;
341         union perf_event *comm_event, *mmap_event;
342         int err = -1;
343
344         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
345         if (comm_event == NULL)
346                 goto out;
347
348         mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
349         if (mmap_event == NULL)
350                 goto out_free_comm;
351
352         proc = opendir("/proc");
353         if (proc == NULL)
354                 goto out_free_mmap;
355
356         while (!readdir_r(proc, &dirent, &next) && next) {
357                 char *end;
358                 pid_t pid = strtol(dirent.d_name, &end, 10);
359
360                 if (*end) /* only interested in proper numerical dirents */
361                         continue;
362
363                 __event__synthesize_thread(comm_event, mmap_event, pid, 1,
364                                            process, tool, machine);
365         }
366
367         closedir(proc);
368         err = 0;
369 out_free_mmap:
370         free(mmap_event);
371 out_free_comm:
372         free(comm_event);
373 out:
374         return err;
375 }
376
377 struct process_symbol_args {
378         const char *name;
379         u64        start;
380 };
381
382 static int find_symbol_cb(void *arg, const char *name, char type,
383                           u64 start, u64 end __used)
384 {
385         struct process_symbol_args *args = arg;
386
387         /*
388          * Must be a function or at least an alias, as in PARISC64, where "_text" is
389          * an 'A' to the same address as "_stext".
390          */
391         if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
392               type == 'A') || strcmp(name, args->name))
393                 return 0;
394
395         args->start = start;
396         return 1;
397 }
398
399 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
400                                        perf_event__handler_t process,
401                                        struct machine *machine,
402                                        const char *symbol_name)
403 {
404         size_t size;
405         const char *filename, *mmap_name;
406         char path[PATH_MAX];
407         char name_buff[PATH_MAX];
408         struct map *map;
409         int err;
410         /*
411          * We should get this from /sys/kernel/sections/.text, but till that is
412          * available use this, and after it is use this as a fallback for older
413          * kernels.
414          */
415         struct process_symbol_args args = { .name = symbol_name, };
416         union perf_event *event = zalloc((sizeof(event->mmap) +
417                                           machine->id_hdr_size));
418         if (event == NULL) {
419                 pr_debug("Not enough memory synthesizing mmap event "
420                          "for kernel modules\n");
421                 return -1;
422         }
423
424         mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
425         if (machine__is_host(machine)) {
426                 /*
427                  * kernel uses PERF_RECORD_MISC_USER for user space maps,
428                  * see kernel/perf_event.c __perf_event_mmap
429                  */
430                 event->header.misc = PERF_RECORD_MISC_KERNEL;
431                 filename = "/proc/kallsyms";
432         } else {
433                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
434                 if (machine__is_default_guest(machine))
435                         filename = (char *) symbol_conf.default_guest_kallsyms;
436                 else {
437                         sprintf(path, "%s/proc/kallsyms", machine->root_dir);
438                         filename = path;
439                 }
440         }
441
442         if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
443                 return -ENOENT;
444
445         map = machine->vmlinux_maps[MAP__FUNCTION];
446         size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
447                         "%s%s", mmap_name, symbol_name) + 1;
448         size = ALIGN(size, sizeof(u64));
449         event->mmap.header.type = PERF_RECORD_MMAP;
450         event->mmap.header.size = (sizeof(event->mmap) -
451                         (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
452         event->mmap.pgoff = args.start;
453         event->mmap.start = map->start;
454         event->mmap.len   = map->end - event->mmap.start;
455         event->mmap.pid   = machine->pid;
456
457         err = process(tool, event, &synth_sample, machine);
458         free(event);
459
460         return err;
461 }
462
463 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
464 {
465         return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
466 }
467
468 int perf_event__process_comm(struct perf_tool *tool __used,
469                              union perf_event *event,
470                              struct perf_sample *sample __used,
471                              struct machine *machine)
472 {
473         struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
474
475         if (dump_trace)
476                 perf_event__fprintf_comm(event, stdout);
477
478         if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
479                 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
480                 return -1;
481         }
482
483         return 0;
484 }
485
486 int perf_event__process_lost(struct perf_tool *tool __used,
487                              union perf_event *event,
488                              struct perf_sample *sample __used,
489                              struct machine *machine __used)
490 {
491         dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
492                     event->lost.id, event->lost.lost);
493         return 0;
494 }
495
496 static void perf_event__set_kernel_mmap_len(union perf_event *event,
497                                             struct map **maps)
498 {
499         maps[MAP__FUNCTION]->start = event->mmap.start;
500         maps[MAP__FUNCTION]->end   = event->mmap.start + event->mmap.len;
501         /*
502          * Be a bit paranoid here, some perf.data file came with
503          * a zero sized synthesized MMAP event for the kernel.
504          */
505         if (maps[MAP__FUNCTION]->end == 0)
506                 maps[MAP__FUNCTION]->end = ~0ULL;
507 }
508
509 static int perf_event__process_kernel_mmap(struct perf_tool *tool __used,
510                                            union perf_event *event,
511                                            struct machine *machine)
512 {
513         struct map *map;
514         char kmmap_prefix[PATH_MAX];
515         enum dso_kernel_type kernel_type;
516         bool is_kernel_mmap;
517
518         machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
519         if (machine__is_host(machine))
520                 kernel_type = DSO_TYPE_KERNEL;
521         else
522                 kernel_type = DSO_TYPE_GUEST_KERNEL;
523
524         is_kernel_mmap = memcmp(event->mmap.filename,
525                                 kmmap_prefix,
526                                 strlen(kmmap_prefix)) == 0;
527         if (event->mmap.filename[0] == '/' ||
528             (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
529
530                 char short_module_name[1024];
531                 char *name, *dot;
532
533                 if (event->mmap.filename[0] == '/') {
534                         name = strrchr(event->mmap.filename, '/');
535                         if (name == NULL)
536                                 goto out_problem;
537
538                         ++name; /* skip / */
539                         dot = strrchr(name, '.');
540                         if (dot == NULL)
541                                 goto out_problem;
542                         snprintf(short_module_name, sizeof(short_module_name),
543                                         "[%.*s]", (int)(dot - name), name);
544                         strxfrchar(short_module_name, '-', '_');
545                 } else
546                         strcpy(short_module_name, event->mmap.filename);
547
548                 map = machine__new_module(machine, event->mmap.start,
549                                           event->mmap.filename);
550                 if (map == NULL)
551                         goto out_problem;
552
553                 name = strdup(short_module_name);
554                 if (name == NULL)
555                         goto out_problem;
556
557                 map->dso->short_name = name;
558                 map->dso->sname_alloc = 1;
559                 map->end = map->start + event->mmap.len;
560         } else if (is_kernel_mmap) {
561                 const char *symbol_name = (event->mmap.filename +
562                                 strlen(kmmap_prefix));
563                 /*
564                  * Should be there already, from the build-id table in
565                  * the header.
566                  */
567                 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
568                                                      kmmap_prefix);
569                 if (kernel == NULL)
570                         goto out_problem;
571
572                 kernel->kernel = kernel_type;
573                 if (__machine__create_kernel_maps(machine, kernel) < 0)
574                         goto out_problem;
575
576                 perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps);
577
578                 /*
579                  * Avoid using a zero address (kptr_restrict) for the ref reloc
580                  * symbol. Effectively having zero here means that at record
581                  * time /proc/sys/kernel/kptr_restrict was non zero.
582                  */
583                 if (event->mmap.pgoff != 0) {
584                         maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
585                                                          symbol_name,
586                                                          event->mmap.pgoff);
587                 }
588
589                 if (machine__is_default_guest(machine)) {
590                         /*
591                          * preload dso of guest kernel and modules
592                          */
593                         dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
594                                   NULL);
595                 }
596         }
597         return 0;
598 out_problem:
599         return -1;
600 }
601
602 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
603 {
604         return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
605                        event->mmap.pid, event->mmap.tid, event->mmap.start,
606                        event->mmap.len, event->mmap.pgoff, event->mmap.filename);
607 }
608
609 int perf_event__process_mmap(struct perf_tool *tool,
610                              union perf_event *event,
611                              struct perf_sample *sample __used,
612                              struct machine *machine)
613 {
614         struct thread *thread;
615         struct map *map;
616         u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
617         int ret = 0;
618
619         if (dump_trace)
620                 perf_event__fprintf_mmap(event, stdout);
621
622         if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
623             cpumode == PERF_RECORD_MISC_KERNEL) {
624                 ret = perf_event__process_kernel_mmap(tool, event, machine);
625                 if (ret < 0)
626                         goto out_problem;
627                 return 0;
628         }
629
630         thread = machine__findnew_thread(machine, event->mmap.pid);
631         if (thread == NULL)
632                 goto out_problem;
633         map = map__new(&machine->user_dsos, event->mmap.start,
634                         event->mmap.len, event->mmap.pgoff,
635                         event->mmap.pid, event->mmap.filename,
636                         MAP__FUNCTION);
637         if (map == NULL)
638                 goto out_problem;
639
640         thread__insert_map(thread, map);
641         return 0;
642
643 out_problem:
644         dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
645         return 0;
646 }
647
648 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
649 {
650         return fprintf(fp, "(%d:%d):(%d:%d)\n",
651                        event->fork.pid, event->fork.tid,
652                        event->fork.ppid, event->fork.ptid);
653 }
654
655 int perf_event__process_task(struct perf_tool *tool __used,
656                              union perf_event *event,
657                              struct perf_sample *sample __used,
658                               struct machine *machine)
659 {
660         struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
661         struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
662
663         if (dump_trace)
664                 perf_event__fprintf_task(event, stdout);
665
666         if (event->header.type == PERF_RECORD_EXIT) {
667                 machine__remove_thread(machine, thread);
668                 return 0;
669         }
670
671         if (thread == NULL || parent == NULL ||
672             thread__fork(thread, parent) < 0) {
673                 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
674                 return -1;
675         }
676
677         return 0;
678 }
679
680 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
681 {
682         size_t ret = fprintf(fp, "PERF_RECORD_%s",
683                              perf_event__name(event->header.type));
684
685         switch (event->header.type) {
686         case PERF_RECORD_COMM:
687                 ret += perf_event__fprintf_comm(event, fp);
688                 break;
689         case PERF_RECORD_FORK:
690         case PERF_RECORD_EXIT:
691                 ret += perf_event__fprintf_task(event, fp);
692                 break;
693         case PERF_RECORD_MMAP:
694                 ret += perf_event__fprintf_mmap(event, fp);
695                 break;
696         default:
697                 ret += fprintf(fp, "\n");
698         }
699
700         return ret;
701 }
702
703 int perf_event__process(struct perf_tool *tool, union perf_event *event,
704                         struct perf_sample *sample, struct machine *machine)
705 {
706         switch (event->header.type) {
707         case PERF_RECORD_COMM:
708                 perf_event__process_comm(tool, event, sample, machine);
709                 break;
710         case PERF_RECORD_MMAP:
711                 perf_event__process_mmap(tool, event, sample, machine);
712                 break;
713         case PERF_RECORD_FORK:
714         case PERF_RECORD_EXIT:
715                 perf_event__process_task(tool, event, sample, machine);
716                 break;
717         case PERF_RECORD_LOST:
718                 perf_event__process_lost(tool, event, sample, machine);
719         default:
720                 break;
721         }
722
723         return 0;
724 }
725
726 void thread__find_addr_map(struct thread *self,
727                            struct machine *machine, u8 cpumode,
728                            enum map_type type, u64 addr,
729                            struct addr_location *al)
730 {
731         struct map_groups *mg = &self->mg;
732
733         al->thread = self;
734         al->addr = addr;
735         al->cpumode = cpumode;
736         al->filtered = false;
737
738         if (machine == NULL) {
739                 al->map = NULL;
740                 return;
741         }
742
743         if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
744                 al->level = 'k';
745                 mg = &machine->kmaps;
746         } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
747                 al->level = '.';
748         } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
749                 al->level = 'g';
750                 mg = &machine->kmaps;
751         } else {
752                 /*
753                  * 'u' means guest os user space.
754                  * TODO: We don't support guest user space. Might support late.
755                  */
756                 if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
757                         al->level = 'u';
758                 else
759                         al->level = 'H';
760                 al->map = NULL;
761
762                 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
763                         cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
764                         !perf_guest)
765                         al->filtered = true;
766                 if ((cpumode == PERF_RECORD_MISC_USER ||
767                         cpumode == PERF_RECORD_MISC_KERNEL) &&
768                         !perf_host)
769                         al->filtered = true;
770
771                 return;
772         }
773 try_again:
774         al->map = map_groups__find(mg, type, al->addr);
775         if (al->map == NULL) {
776                 /*
777                  * If this is outside of all known maps, and is a negative
778                  * address, try to look it up in the kernel dso, as it might be
779                  * a vsyscall or vdso (which executes in user-mode).
780                  *
781                  * XXX This is nasty, we should have a symbol list in the
782                  * "[vdso]" dso, but for now lets use the old trick of looking
783                  * in the whole kernel symbol list.
784                  */
785                 if ((long long)al->addr < 0 &&
786                     cpumode == PERF_RECORD_MISC_USER &&
787                     machine && mg != &machine->kmaps) {
788                         mg = &machine->kmaps;
789                         goto try_again;
790                 }
791         } else
792                 al->addr = al->map->map_ip(al->map, al->addr);
793 }
794
795 void thread__find_addr_location(struct thread *thread, struct machine *machine,
796                                 u8 cpumode, enum map_type type, u64 addr,
797                                 struct addr_location *al,
798                                 symbol_filter_t filter)
799 {
800         thread__find_addr_map(thread, machine, cpumode, type, addr, al);
801         if (al->map != NULL)
802                 al->sym = map__find_symbol(al->map, al->addr, filter);
803         else
804                 al->sym = NULL;
805 }
806
807 int perf_event__preprocess_sample(const union perf_event *event,
808                                   struct machine *machine,
809                                   struct addr_location *al,
810                                   struct perf_sample *sample,
811                                   symbol_filter_t filter)
812 {
813         u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
814         struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
815
816         if (thread == NULL)
817                 return -1;
818
819         if (symbol_conf.comm_list &&
820             !strlist__has_entry(symbol_conf.comm_list, thread->comm))
821                 goto out_filtered;
822
823         dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
824         /*
825          * Have we already created the kernel maps for this machine?
826          *
827          * This should have happened earlier, when we processed the kernel MMAP
828          * events, but for older perf.data files there was no such thing, so do
829          * it now.
830          */
831         if (cpumode == PERF_RECORD_MISC_KERNEL &&
832             machine->vmlinux_maps[MAP__FUNCTION] == NULL)
833                 machine__create_kernel_maps(machine);
834
835         thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
836                               event->ip.ip, al);
837         dump_printf(" ...... dso: %s\n",
838                     al->map ? al->map->dso->long_name :
839                         al->level == 'H' ? "[hypervisor]" : "<not found>");
840         al->sym = NULL;
841         al->cpu = sample->cpu;
842
843         if (al->map) {
844                 struct dso *dso = al->map->dso;
845
846                 if (symbol_conf.dso_list &&
847                     (!dso || !(strlist__has_entry(symbol_conf.dso_list,
848                                                   dso->short_name) ||
849                                (dso->short_name != dso->long_name &&
850                                 strlist__has_entry(symbol_conf.dso_list,
851                                                    dso->long_name)))))
852                         goto out_filtered;
853
854                 al->sym = map__find_symbol(al->map, al->addr, filter);
855         }
856
857         if (symbol_conf.sym_list && al->sym &&
858             !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
859                 goto out_filtered;
860
861         return 0;
862
863 out_filtered:
864         al->filtered = true;
865         return 0;
866 }