11 const char *map_type__name[MAP__NR_TYPES] = {
12 [MAP__FUNCTION] = "Functions",
13 [MAP__VARIABLE] = "Variables",
16 static inline int is_anon_memory(const char *filename)
18 return strcmp(filename, "//anon") == 0;
21 static inline int is_no_dso_memory(const char *filename)
23 return !strcmp(filename, "[stack]") ||
24 !strcmp(filename, "[vdso]") ||
25 !strcmp(filename, "[heap]");
28 void map__init(struct map *self, enum map_type type,
29 u64 start, u64 end, u64 pgoff, struct dso *dso)
36 self->map_ip = map__map_ip;
37 self->unmap_ip = map__unmap_ip;
38 RB_CLEAR_NODE(&self->rb_node);
40 self->referenced = false;
41 self->erange_warned = false;
44 struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
45 u64 pgoff, u32 pid, char *filename,
48 struct map *self = malloc(sizeof(*self));
51 char newfilename[PATH_MAX];
55 anon = is_anon_memory(filename);
56 no_dso = is_no_dso_memory(filename);
59 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
60 filename = newfilename;
63 dso = __dsos__findnew(dsos__list, filename);
67 map__init(self, type, start, start + len, pgoff, dso);
70 self->map_ip = self->unmap_ip = identity__map_ip;
73 * Set memory without DSO as loaded. All map__find_*
74 * functions still return NULL, and we avoid the
75 * unnecessary map__load warning.
78 dso__set_loaded(dso, self->type);
87 void map__delete(struct map *self)
92 void map__fixup_start(struct map *self)
94 struct rb_root *symbols = &self->dso->symbols[self->type];
95 struct rb_node *nd = rb_first(symbols);
97 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
98 self->start = sym->start;
102 void map__fixup_end(struct map *self)
104 struct rb_root *symbols = &self->dso->symbols[self->type];
105 struct rb_node *nd = rb_last(symbols);
107 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
108 self->end = sym->end;
112 #define DSO__DELETED "(deleted)"
114 int map__load(struct map *self, symbol_filter_t filter)
116 const char *name = self->dso->long_name;
119 if (dso__loaded(self->dso, self->type))
122 nr = dso__load(self->dso, self, filter);
124 if (self->dso->has_build_id) {
125 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
127 build_id__sprintf(self->dso->build_id,
128 sizeof(self->dso->build_id),
130 pr_warning("%s with build id %s not found",
133 pr_warning("Failed to open %s", name);
135 pr_warning(", continuing without symbols\n");
137 } else if (nr == 0) {
138 const size_t len = strlen(name);
139 const size_t real_len = len - sizeof(DSO__DELETED);
141 if (len > sizeof(DSO__DELETED) &&
142 strcmp(name + real_len + 1, DSO__DELETED) == 0) {
143 pr_warning("%.*s was updated (is prelink enabled?). "
144 "Restart the long running apps that use it!\n",
145 (int)real_len, name);
147 pr_warning("no symbols found in %s, maybe install "
148 "a debug package?\n", name);
154 * Only applies to the kernel, as its symtabs aren't relative like the
157 if (self->dso->kernel)
158 map__reloc_vmlinux(self);
163 struct symbol *map__find_symbol(struct map *self, u64 addr,
164 symbol_filter_t filter)
166 if (map__load(self, filter) < 0)
169 return dso__find_symbol(self->dso, self->type, addr);
172 struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
173 symbol_filter_t filter)
175 if (map__load(self, filter) < 0)
178 if (!dso__sorted_by_name(self->dso, self->type))
179 dso__sort_by_name(self->dso, self->type);
181 return dso__find_symbol_by_name(self->dso, self->type, name);
184 struct map *map__clone(struct map *self)
186 struct map *map = malloc(sizeof(*self));
191 memcpy(map, self, sizeof(*self));
196 int map__overlap(struct map *l, struct map *r)
198 if (l->start > r->start) {
204 if (l->end > r->start)
210 size_t map__fprintf(struct map *self, FILE *fp)
212 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
213 self->start, self->end, self->pgoff, self->dso->name);
216 size_t map__fprintf_dsoname(struct map *map, FILE *fp)
220 if (map && map->dso && (map->dso->name || map->dso->long_name)) {
221 if (symbol_conf.show_kernel_path && map->dso->long_name)
222 dsoname = map->dso->long_name;
223 else if (map->dso->name)
224 dsoname = map->dso->name;
226 dsoname = "[unknown]";
228 return fprintf(fp, "%s", dsoname);
232 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
233 * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
235 u64 map__rip_2objdump(struct map *map, u64 rip)
237 u64 addr = map->dso->adjust_symbols ?
238 map->unmap_ip(map, rip) : /* RIP -> IP */
243 u64 map__objdump_2ip(struct map *map, u64 addr)
245 u64 ip = map->dso->adjust_symbols ?
247 map->unmap_ip(map, addr); /* RIP -> IP */
251 void map_groups__init(struct map_groups *mg)
254 for (i = 0; i < MAP__NR_TYPES; ++i) {
255 mg->maps[i] = RB_ROOT;
256 INIT_LIST_HEAD(&mg->removed_maps[i]);
261 static void maps__delete(struct rb_root *maps)
263 struct rb_node *next = rb_first(maps);
266 struct map *pos = rb_entry(next, struct map, rb_node);
268 next = rb_next(&pos->rb_node);
269 rb_erase(&pos->rb_node, maps);
274 static void maps__delete_removed(struct list_head *maps)
278 list_for_each_entry_safe(pos, n, maps, node) {
279 list_del(&pos->node);
284 void map_groups__exit(struct map_groups *mg)
288 for (i = 0; i < MAP__NR_TYPES; ++i) {
289 maps__delete(&mg->maps[i]);
290 maps__delete_removed(&mg->removed_maps[i]);
294 void map_groups__flush(struct map_groups *mg)
298 for (type = 0; type < MAP__NR_TYPES; type++) {
299 struct rb_root *root = &mg->maps[type];
300 struct rb_node *next = rb_first(root);
303 struct map *pos = rb_entry(next, struct map, rb_node);
304 next = rb_next(&pos->rb_node);
305 rb_erase(&pos->rb_node, root);
307 * We may have references to this map, for
308 * instance in some hist_entry instances, so
309 * just move them to a separate list.
311 list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
316 struct symbol *map_groups__find_symbol(struct map_groups *mg,
317 enum map_type type, u64 addr,
319 symbol_filter_t filter)
321 struct map *map = map_groups__find(mg, type, addr);
326 return map__find_symbol(map, map->map_ip(map, addr), filter);
332 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
336 symbol_filter_t filter)
340 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
341 struct map *pos = rb_entry(nd, struct map, rb_node);
342 struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
354 size_t __map_groups__fprintf_maps(struct map_groups *mg,
355 enum map_type type, int verbose, FILE *fp)
357 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
360 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
361 struct map *pos = rb_entry(nd, struct map, rb_node);
362 printed += fprintf(fp, "Map:");
363 printed += map__fprintf(pos, fp);
365 printed += dso__fprintf(pos->dso, type, fp);
366 printed += fprintf(fp, "--\n");
373 size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
375 size_t printed = 0, i;
376 for (i = 0; i < MAP__NR_TYPES; ++i)
377 printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
381 static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
383 int verbose, FILE *fp)
388 list_for_each_entry(pos, &mg->removed_maps[type], node) {
389 printed += fprintf(fp, "Map:");
390 printed += map__fprintf(pos, fp);
392 printed += dso__fprintf(pos->dso, type, fp);
393 printed += fprintf(fp, "--\n");
399 static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
400 int verbose, FILE *fp)
402 size_t printed = 0, i;
403 for (i = 0; i < MAP__NR_TYPES; ++i)
404 printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
408 size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
410 size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
411 printed += fprintf(fp, "Removed maps:\n");
412 return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
415 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
416 int verbose, FILE *fp)
418 struct rb_root *root = &mg->maps[map->type];
419 struct rb_node *next = rb_first(root);
423 struct map *pos = rb_entry(next, struct map, rb_node);
424 next = rb_next(&pos->rb_node);
426 if (!map__overlap(pos, map))
430 fputs("overlapping maps:\n", fp);
431 map__fprintf(map, fp);
432 map__fprintf(pos, fp);
435 rb_erase(&pos->rb_node, root);
437 * Now check if we need to create new maps for areas not
438 * overlapped by the new map:
440 if (map->start > pos->start) {
441 struct map *before = map__clone(pos);
443 if (before == NULL) {
448 before->end = map->start - 1;
449 map_groups__insert(mg, before);
451 map__fprintf(before, fp);
454 if (map->end < pos->end) {
455 struct map *after = map__clone(pos);
462 after->start = map->end + 1;
463 map_groups__insert(mg, after);
465 map__fprintf(after, fp);
469 * If we have references, just move them to a separate list.
472 list_add_tail(&pos->node, &mg->removed_maps[map->type]);
484 * XXX This should not really _copy_ te maps, but refcount them.
486 int map_groups__clone(struct map_groups *mg,
487 struct map_groups *parent, enum map_type type)
490 for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
491 struct map *map = rb_entry(nd, struct map, rb_node);
492 struct map *new = map__clone(map);
495 map_groups__insert(mg, new);
500 static u64 map__reloc_map_ip(struct map *map, u64 ip)
502 return ip + (s64)map->pgoff;
505 static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
507 return ip - (s64)map->pgoff;
510 void map__reloc_vmlinux(struct map *self)
512 struct kmap *kmap = map__kmap(self);
515 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
518 reloc = (kmap->ref_reloc_sym->unrelocated_addr -
519 kmap->ref_reloc_sym->addr);
524 self->map_ip = map__reloc_map_ip;
525 self->unmap_ip = map__reloc_unmap_ip;
529 void maps__insert(struct rb_root *maps, struct map *map)
531 struct rb_node **p = &maps->rb_node;
532 struct rb_node *parent = NULL;
533 const u64 ip = map->start;
538 m = rb_entry(parent, struct map, rb_node);
545 rb_link_node(&map->rb_node, parent, p);
546 rb_insert_color(&map->rb_node, maps);
549 void maps__remove(struct rb_root *self, struct map *map)
551 rb_erase(&map->rb_node, self);
554 struct map *maps__find(struct rb_root *maps, u64 ip)
556 struct rb_node **p = &maps->rb_node;
557 struct rb_node *parent = NULL;
562 m = rb_entry(parent, struct map, rb_node);
565 else if (ip > m->end)
574 int machine__init(struct machine *self, const char *root_dir, pid_t pid)
576 map_groups__init(&self->kmaps);
577 RB_CLEAR_NODE(&self->rb_node);
578 INIT_LIST_HEAD(&self->user_dsos);
579 INIT_LIST_HEAD(&self->kernel_dsos);
581 self->threads = RB_ROOT;
582 INIT_LIST_HEAD(&self->dead_threads);
583 self->last_match = NULL;
585 self->kmaps.machine = self;
587 self->root_dir = strdup(root_dir);
588 return self->root_dir == NULL ? -ENOMEM : 0;
591 static void dsos__delete(struct list_head *self)
595 list_for_each_entry_safe(pos, n, self, node) {
596 list_del(&pos->node);
601 void machine__exit(struct machine *self)
603 map_groups__exit(&self->kmaps);
604 dsos__delete(&self->user_dsos);
605 dsos__delete(&self->kernel_dsos);
606 free(self->root_dir);
607 self->root_dir = NULL;
610 void machine__delete(struct machine *self)
616 struct machine *machines__add(struct rb_root *self, pid_t pid,
617 const char *root_dir)
619 struct rb_node **p = &self->rb_node;
620 struct rb_node *parent = NULL;
621 struct machine *pos, *machine = malloc(sizeof(*machine));
626 if (machine__init(machine, root_dir, pid) != 0) {
633 pos = rb_entry(parent, struct machine, rb_node);
640 rb_link_node(&machine->rb_node, parent, p);
641 rb_insert_color(&machine->rb_node, self);
646 struct machine *machines__find(struct rb_root *self, pid_t pid)
648 struct rb_node **p = &self->rb_node;
649 struct rb_node *parent = NULL;
650 struct machine *machine;
651 struct machine *default_machine = NULL;
655 machine = rb_entry(parent, struct machine, rb_node);
656 if (pid < machine->pid)
658 else if (pid > machine->pid)
663 default_machine = machine;
666 return default_machine;
669 struct machine *machines__findnew(struct rb_root *self, pid_t pid)
672 const char *root_dir = "";
673 struct machine *machine = machines__find(self, pid);
675 if (machine && (machine->pid == pid))
678 if ((pid != HOST_KERNEL_ID) &&
679 (pid != DEFAULT_GUEST_KERNEL_ID) &&
680 (symbol_conf.guestmount)) {
681 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
682 if (access(path, R_OK)) {
683 pr_err("Can't access file %s\n", path);
690 machine = machines__add(self, pid, root_dir);
696 void machines__process(struct rb_root *self, machine__process_t process, void *data)
700 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
701 struct machine *pos = rb_entry(nd, struct machine, rb_node);
706 char *machine__mmap_name(struct machine *self, char *bf, size_t size)
708 if (machine__is_host(self))
709 snprintf(bf, size, "[%s]", "kernel.kallsyms");
710 else if (machine__is_default_guest(self))
711 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
713 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid);