10 static bool hists__filter_entry_by_dso(struct hists *hists,
11 struct hist_entry *he);
12 static bool hists__filter_entry_by_thread(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_symbol(struct hists *hists,
15 struct hist_entry *he);
17 struct callchain_param callchain_param = {
18 .mode = CHAIN_GRAPH_REL,
20 .order = ORDER_CALLEE,
24 u16 hists__col_len(struct hists *hists, enum hist_column col)
26 return hists->col_len[col];
29 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
31 hists->col_len[col] = len;
34 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
36 if (len > hists__col_len(hists, col)) {
37 hists__set_col_len(hists, col, len);
43 void hists__reset_col_len(struct hists *hists)
47 for (col = 0; col < HISTC_NR_COLS; ++col)
48 hists__set_col_len(hists, col, 0);
51 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
53 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
55 if (hists__col_len(hists, dso) < unresolved_col_width &&
56 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
57 !symbol_conf.dso_list)
58 hists__set_col_len(hists, dso, unresolved_col_width);
61 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
63 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
68 * +4 accounts for '[x] ' priv level info
69 * +2 accounts for 0x prefix on raw addresses
70 * +3 accounts for ' y ' symtab origin info
73 symlen = h->ms.sym->namelen + 4;
75 symlen += BITS_PER_LONG / 4 + 2 + 3;
76 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
78 symlen = unresolved_col_width + 4 + 2;
79 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
80 hists__set_unres_dso_col_len(hists, HISTC_DSO);
83 len = thread__comm_len(h->thread);
84 if (hists__new_col_len(hists, HISTC_COMM, len))
85 hists__set_col_len(hists, HISTC_THREAD, len + 6);
88 len = dso__name_len(h->ms.map->dso);
89 hists__new_col_len(hists, HISTC_DSO, len);
93 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
96 if (h->branch_info->from.sym) {
97 symlen = (int)h->branch_info->from.sym->namelen + 4;
99 symlen += BITS_PER_LONG / 4 + 2 + 3;
100 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
102 symlen = dso__name_len(h->branch_info->from.map->dso);
103 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
105 symlen = unresolved_col_width + 4 + 2;
106 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
107 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
110 if (h->branch_info->to.sym) {
111 symlen = (int)h->branch_info->to.sym->namelen + 4;
113 symlen += BITS_PER_LONG / 4 + 2 + 3;
114 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116 symlen = dso__name_len(h->branch_info->to.map->dso);
117 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
119 symlen = unresolved_col_width + 4 + 2;
120 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
121 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
126 if (h->mem_info->daddr.sym) {
127 symlen = (int)h->mem_info->daddr.sym->namelen + 4
128 + unresolved_col_width + 2;
129 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
131 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
134 symlen = unresolved_col_width + 4 + 2;
135 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
138 if (h->mem_info->daddr.map) {
139 symlen = dso__name_len(h->mem_info->daddr.map->dso);
140 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
143 symlen = unresolved_col_width + 4 + 2;
144 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
147 symlen = unresolved_col_width + 4 + 2;
148 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
149 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
152 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
153 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
154 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
155 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
156 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
157 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
160 hists__new_col_len(hists, HISTC_TRANSACTION,
161 hist_entry__transaction_len());
164 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
166 struct rb_node *next = rb_first(&hists->entries);
167 struct hist_entry *n;
170 hists__reset_col_len(hists);
172 while (next && row++ < max_rows) {
173 n = rb_entry(next, struct hist_entry, rb_node);
175 hists__calc_col_len(hists, n);
176 next = rb_next(&n->rb_node);
180 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
181 unsigned int cpumode, u64 period)
184 case PERF_RECORD_MISC_KERNEL:
185 he_stat->period_sys += period;
187 case PERF_RECORD_MISC_USER:
188 he_stat->period_us += period;
190 case PERF_RECORD_MISC_GUEST_KERNEL:
191 he_stat->period_guest_sys += period;
193 case PERF_RECORD_MISC_GUEST_USER:
194 he_stat->period_guest_us += period;
201 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
205 he_stat->period += period;
206 he_stat->weight += weight;
207 he_stat->nr_events += 1;
210 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
212 dest->period += src->period;
213 dest->period_sys += src->period_sys;
214 dest->period_us += src->period_us;
215 dest->period_guest_sys += src->period_guest_sys;
216 dest->period_guest_us += src->period_guest_us;
217 dest->nr_events += src->nr_events;
218 dest->weight += src->weight;
221 static void he_stat__decay(struct he_stat *he_stat)
223 he_stat->period = (he_stat->period * 7) / 8;
224 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
225 /* XXX need decay for weight too? */
228 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
230 u64 prev_period = he->stat.period;
233 if (prev_period == 0)
236 he_stat__decay(&he->stat);
237 if (symbol_conf.cumulate_callchain)
238 he_stat__decay(he->stat_acc);
240 diff = prev_period - he->stat.period;
242 hists->stats.total_period -= diff;
244 hists->stats.total_non_filtered_period -= diff;
246 return he->stat.period == 0;
249 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
251 struct rb_node *next = rb_first(&hists->entries);
252 struct hist_entry *n;
255 n = rb_entry(next, struct hist_entry, rb_node);
256 next = rb_next(&n->rb_node);
258 * We may be annotating this, for instance, so keep it here in
259 * case some it gets new samples, we'll eventually free it when
260 * the user stops browsing and it agains gets fully decayed.
262 if (((zap_user && n->level == '.') ||
263 (zap_kernel && n->level != '.') ||
264 hists__decay_entry(hists, n)) &&
266 rb_erase(&n->rb_node, &hists->entries);
268 if (sort__need_collapse)
269 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
273 --hists->nr_non_filtered_entries;
281 * histogram, sorted on item, collects periods
284 static struct hist_entry *hist_entry__new(struct hist_entry *template,
287 size_t callchain_size = 0;
288 struct hist_entry *he;
290 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
291 callchain_size = sizeof(struct callchain_root);
293 he = zalloc(sizeof(*he) + callchain_size);
298 if (symbol_conf.cumulate_callchain) {
299 he->stat_acc = malloc(sizeof(he->stat));
300 if (he->stat_acc == NULL) {
304 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
306 memset(&he->stat, 0, sizeof(he->stat));
310 he->ms.map->referenced = true;
312 if (he->branch_info) {
314 * This branch info is (a part of) allocated from
315 * sample__resolve_bstack() and will be freed after
316 * adding new entries. So we need to save a copy.
318 he->branch_info = malloc(sizeof(*he->branch_info));
319 if (he->branch_info == NULL) {
325 memcpy(he->branch_info, template->branch_info,
326 sizeof(*he->branch_info));
328 if (he->branch_info->from.map)
329 he->branch_info->from.map->referenced = true;
330 if (he->branch_info->to.map)
331 he->branch_info->to.map->referenced = true;
335 if (he->mem_info->iaddr.map)
336 he->mem_info->iaddr.map->referenced = true;
337 if (he->mem_info->daddr.map)
338 he->mem_info->daddr.map->referenced = true;
341 if (symbol_conf.use_callchain)
342 callchain_init(he->callchain);
344 INIT_LIST_HEAD(&he->pairs.node);
350 static u8 symbol__parent_filter(const struct symbol *parent)
352 if (symbol_conf.exclude_other && parent == NULL)
353 return 1 << HIST_FILTER__PARENT;
357 static struct hist_entry *add_hist_entry(struct hists *hists,
358 struct hist_entry *entry,
359 struct addr_location *al,
363 struct rb_node *parent = NULL;
364 struct hist_entry *he;
366 u64 period = entry->stat.period;
367 u64 weight = entry->stat.weight;
369 p = &hists->entries_in->rb_node;
373 he = rb_entry(parent, struct hist_entry, rb_node_in);
376 * Make sure that it receives arguments in a same order as
377 * hist_entry__collapse() so that we can use an appropriate
378 * function when searching an entry regardless which sort
381 cmp = hist_entry__cmp(he, entry);
385 he_stat__add_period(&he->stat, period, weight);
386 if (symbol_conf.cumulate_callchain)
387 he_stat__add_period(he->stat_acc, period, weight);
390 * This mem info was allocated from sample__resolve_mem
391 * and will not be used anymore.
393 zfree(&entry->mem_info);
395 /* If the map of an existing hist_entry has
396 * become out-of-date due to an exec() or
397 * similar, update it. Otherwise we will
398 * mis-adjust symbol addresses when computing
399 * the history counter to increment.
401 if (he->ms.map != entry->ms.map) {
402 he->ms.map = entry->ms.map;
404 he->ms.map->referenced = true;
415 he = hist_entry__new(entry, sample_self);
419 rb_link_node(&he->rb_node_in, parent, p);
420 rb_insert_color(&he->rb_node_in, hists->entries_in);
423 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
424 if (symbol_conf.cumulate_callchain)
425 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
429 struct hist_entry *__hists__add_entry(struct hists *hists,
430 struct addr_location *al,
431 struct symbol *sym_parent,
432 struct branch_info *bi,
434 u64 period, u64 weight, u64 transaction,
437 struct hist_entry entry = {
438 .thread = al->thread,
439 .comm = thread__comm(al->thread),
445 .cpumode = al->cpumode,
453 .parent = sym_parent,
454 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
458 .transaction = transaction,
461 return add_hist_entry(hists, &entry, al, sample_self);
465 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
466 struct addr_location *al __maybe_unused)
472 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
473 struct addr_location *al __maybe_unused)
479 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
481 struct perf_sample *sample = iter->sample;
484 mi = sample__resolve_mem(sample, al);
493 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
496 struct mem_info *mi = iter->priv;
497 struct hist_entry *he;
502 cost = iter->sample->weight;
507 * must pass period=weight in order to get the correct
508 * sorting from hists__collapse_resort() which is solely
509 * based on periods. We want sorting be done on nr_events * weight
510 * and this is indirectly achieved by passing period=weight here
511 * and the he_stat__add_period() function.
513 he = __hists__add_entry(&iter->evsel->hists, al, iter->parent, NULL, mi,
514 cost, cost, 0, true);
523 iter_finish_mem_entry(struct hist_entry_iter *iter,
524 struct addr_location *al __maybe_unused)
526 struct perf_evsel *evsel = iter->evsel;
527 struct hist_entry *he = iter->he;
533 hists__inc_nr_samples(&evsel->hists, he->filtered);
535 err = hist_entry__append_callchain(he, iter->sample);
539 * We don't need to free iter->priv (mem_info) here since
540 * the mem info was either already freed in add_hist_entry() or
541 * passed to a new hist entry by hist_entry__new().
550 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
552 struct branch_info *bi;
553 struct perf_sample *sample = iter->sample;
555 bi = sample__resolve_bstack(sample, al);
560 iter->total = sample->branch_stack->nr;
567 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
568 struct addr_location *al __maybe_unused)
570 /* to avoid calling callback function */
577 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
579 struct branch_info *bi = iter->priv;
585 if (iter->curr >= iter->total)
588 al->map = bi[i].to.map;
589 al->sym = bi[i].to.sym;
590 al->addr = bi[i].to.addr;
595 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
597 struct branch_info *bi;
598 struct perf_evsel *evsel = iter->evsel;
599 struct hist_entry *he = NULL;
605 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
609 * The report shows the percentage of total branches captured
610 * and not events sampled. Thus we use a pseudo period of 1.
612 he = __hists__add_entry(&evsel->hists, al, iter->parent, &bi[i], NULL,
617 hists__inc_nr_samples(&evsel->hists, he->filtered);
626 iter_finish_branch_entry(struct hist_entry_iter *iter,
627 struct addr_location *al __maybe_unused)
632 return iter->curr >= iter->total ? 0 : -1;
636 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
637 struct addr_location *al __maybe_unused)
643 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
645 struct perf_evsel *evsel = iter->evsel;
646 struct perf_sample *sample = iter->sample;
647 struct hist_entry *he;
649 he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
650 sample->period, sample->weight,
651 sample->transaction, true);
660 iter_finish_normal_entry(struct hist_entry_iter *iter,
661 struct addr_location *al __maybe_unused)
663 struct hist_entry *he = iter->he;
664 struct perf_evsel *evsel = iter->evsel;
665 struct perf_sample *sample = iter->sample;
672 hists__inc_nr_samples(&evsel->hists, he->filtered);
674 return hist_entry__append_callchain(he, sample);
678 iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
679 struct addr_location *al __maybe_unused)
681 struct hist_entry **he_cache;
683 callchain_cursor_commit(&callchain_cursor);
686 * This is for detecting cycles or recursions so that they're
687 * cumulated only one time to prevent entries more than 100%
690 he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
691 if (he_cache == NULL)
694 iter->priv = he_cache;
701 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
702 struct addr_location *al)
704 struct perf_evsel *evsel = iter->evsel;
705 struct perf_sample *sample = iter->sample;
706 struct hist_entry **he_cache = iter->priv;
707 struct hist_entry *he;
710 he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
711 sample->period, sample->weight,
712 sample->transaction, true);
717 he_cache[iter->curr++] = he;
719 callchain_append(he->callchain, &callchain_cursor, sample->period);
722 * We need to re-initialize the cursor since callchain_append()
723 * advanced the cursor to the end.
725 callchain_cursor_commit(&callchain_cursor);
727 hists__inc_nr_samples(&evsel->hists, he->filtered);
733 iter_next_cumulative_entry(struct hist_entry_iter *iter,
734 struct addr_location *al)
736 struct callchain_cursor_node *node;
738 node = callchain_cursor_current(&callchain_cursor);
742 return fill_callchain_info(al, node, iter->hide_unresolved);
746 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
747 struct addr_location *al)
749 struct perf_evsel *evsel = iter->evsel;
750 struct perf_sample *sample = iter->sample;
751 struct hist_entry **he_cache = iter->priv;
752 struct hist_entry *he;
753 struct hist_entry he_tmp = {
755 .thread = al->thread,
756 .comm = thread__comm(al->thread),
762 .parent = iter->parent,
765 struct callchain_cursor cursor;
767 callchain_cursor_snapshot(&cursor, &callchain_cursor);
769 callchain_cursor_advance(&callchain_cursor);
772 * Check if there's duplicate entries in the callchain.
773 * It's possible that it has cycles or recursive calls.
775 for (i = 0; i < iter->curr; i++) {
776 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
777 /* to avoid calling callback function */
783 he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
784 sample->period, sample->weight,
785 sample->transaction, false);
790 he_cache[iter->curr++] = he;
792 callchain_append(he->callchain, &cursor, sample->period);
797 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
798 struct addr_location *al __maybe_unused)
806 const struct hist_iter_ops hist_iter_mem = {
807 .prepare_entry = iter_prepare_mem_entry,
808 .add_single_entry = iter_add_single_mem_entry,
809 .next_entry = iter_next_nop_entry,
810 .add_next_entry = iter_add_next_nop_entry,
811 .finish_entry = iter_finish_mem_entry,
814 const struct hist_iter_ops hist_iter_branch = {
815 .prepare_entry = iter_prepare_branch_entry,
816 .add_single_entry = iter_add_single_branch_entry,
817 .next_entry = iter_next_branch_entry,
818 .add_next_entry = iter_add_next_branch_entry,
819 .finish_entry = iter_finish_branch_entry,
822 const struct hist_iter_ops hist_iter_normal = {
823 .prepare_entry = iter_prepare_normal_entry,
824 .add_single_entry = iter_add_single_normal_entry,
825 .next_entry = iter_next_nop_entry,
826 .add_next_entry = iter_add_next_nop_entry,
827 .finish_entry = iter_finish_normal_entry,
830 const struct hist_iter_ops hist_iter_cumulative = {
831 .prepare_entry = iter_prepare_cumulative_entry,
832 .add_single_entry = iter_add_single_cumulative_entry,
833 .next_entry = iter_next_cumulative_entry,
834 .add_next_entry = iter_add_next_cumulative_entry,
835 .finish_entry = iter_finish_cumulative_entry,
838 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
839 struct perf_evsel *evsel, struct perf_sample *sample,
840 int max_stack_depth, void *arg)
844 err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
850 iter->sample = sample;
852 err = iter->ops->prepare_entry(iter, al);
856 err = iter->ops->add_single_entry(iter, al);
860 if (iter->he && iter->add_entry_cb) {
861 err = iter->add_entry_cb(iter, al, true, arg);
866 while (iter->ops->next_entry(iter, al)) {
867 err = iter->ops->add_next_entry(iter, al);
871 if (iter->he && iter->add_entry_cb) {
872 err = iter->add_entry_cb(iter, al, false, arg);
879 err2 = iter->ops->finish_entry(iter, al);
887 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
889 struct perf_hpp_fmt *fmt;
892 perf_hpp__for_each_sort_list(fmt) {
893 if (perf_hpp__should_skip(fmt))
896 cmp = fmt->cmp(left, right);
905 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
907 struct perf_hpp_fmt *fmt;
910 perf_hpp__for_each_sort_list(fmt) {
911 if (perf_hpp__should_skip(fmt))
914 cmp = fmt->collapse(left, right);
922 void hist_entry__free(struct hist_entry *he)
924 zfree(&he->branch_info);
925 zfree(&he->mem_info);
926 zfree(&he->stat_acc);
927 free_srcline(he->srcline);
932 * collapse the histogram
935 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
936 struct rb_root *root,
937 struct hist_entry *he)
939 struct rb_node **p = &root->rb_node;
940 struct rb_node *parent = NULL;
941 struct hist_entry *iter;
946 iter = rb_entry(parent, struct hist_entry, rb_node_in);
948 cmp = hist_entry__collapse(iter, he);
951 he_stat__add_stat(&iter->stat, &he->stat);
952 if (symbol_conf.cumulate_callchain)
953 he_stat__add_stat(iter->stat_acc, he->stat_acc);
955 if (symbol_conf.use_callchain) {
956 callchain_cursor_reset(&callchain_cursor);
957 callchain_merge(&callchain_cursor,
961 hist_entry__free(he);
971 rb_link_node(&he->rb_node_in, parent, p);
972 rb_insert_color(&he->rb_node_in, root);
976 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
978 struct rb_root *root;
980 pthread_mutex_lock(&hists->lock);
982 root = hists->entries_in;
983 if (++hists->entries_in > &hists->entries_in_array[1])
984 hists->entries_in = &hists->entries_in_array[0];
986 pthread_mutex_unlock(&hists->lock);
991 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
993 hists__filter_entry_by_dso(hists, he);
994 hists__filter_entry_by_thread(hists, he);
995 hists__filter_entry_by_symbol(hists, he);
998 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1000 struct rb_root *root;
1001 struct rb_node *next;
1002 struct hist_entry *n;
1004 if (!sort__need_collapse)
1007 root = hists__get_rotate_entries_in(hists);
1008 next = rb_first(root);
1013 n = rb_entry(next, struct hist_entry, rb_node_in);
1014 next = rb_next(&n->rb_node_in);
1016 rb_erase(&n->rb_node_in, root);
1017 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1019 * If it wasn't combined with one of the entries already
1020 * collapsed, we need to apply the filters that may have
1021 * been set by, say, the hist_browser.
1023 hists__apply_filters(hists, n);
1026 ui_progress__update(prog, 1);
1030 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1032 struct perf_hpp_fmt *fmt;
1035 perf_hpp__for_each_sort_list(fmt) {
1036 if (perf_hpp__should_skip(fmt))
1039 cmp = fmt->sort(a, b);
1047 static void hists__reset_filter_stats(struct hists *hists)
1049 hists->nr_non_filtered_entries = 0;
1050 hists->stats.total_non_filtered_period = 0;
1053 void hists__reset_stats(struct hists *hists)
1055 hists->nr_entries = 0;
1056 hists->stats.total_period = 0;
1058 hists__reset_filter_stats(hists);
1061 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1063 hists->nr_non_filtered_entries++;
1064 hists->stats.total_non_filtered_period += h->stat.period;
1067 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1070 hists__inc_filter_stats(hists, h);
1072 hists->nr_entries++;
1073 hists->stats.total_period += h->stat.period;
1076 static void __hists__insert_output_entry(struct rb_root *entries,
1077 struct hist_entry *he,
1078 u64 min_callchain_hits)
1080 struct rb_node **p = &entries->rb_node;
1081 struct rb_node *parent = NULL;
1082 struct hist_entry *iter;
1084 if (symbol_conf.use_callchain)
1085 callchain_param.sort(&he->sorted_chain, he->callchain,
1086 min_callchain_hits, &callchain_param);
1088 while (*p != NULL) {
1090 iter = rb_entry(parent, struct hist_entry, rb_node);
1092 if (hist_entry__sort(he, iter) > 0)
1095 p = &(*p)->rb_right;
1098 rb_link_node(&he->rb_node, parent, p);
1099 rb_insert_color(&he->rb_node, entries);
1102 void hists__output_resort(struct hists *hists)
1104 struct rb_root *root;
1105 struct rb_node *next;
1106 struct hist_entry *n;
1107 u64 min_callchain_hits;
1109 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1111 if (sort__need_collapse)
1112 root = &hists->entries_collapsed;
1114 root = hists->entries_in;
1116 next = rb_first(root);
1117 hists->entries = RB_ROOT;
1119 hists__reset_stats(hists);
1120 hists__reset_col_len(hists);
1123 n = rb_entry(next, struct hist_entry, rb_node_in);
1124 next = rb_next(&n->rb_node_in);
1126 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
1127 hists__inc_stats(hists, n);
1130 hists__calc_col_len(hists, n);
1134 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1135 enum hist_filter filter)
1137 h->filtered &= ~(1 << filter);
1141 /* force fold unfiltered entry for simplicity */
1142 h->ms.unfolded = false;
1145 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1147 hists__inc_filter_stats(hists, h);
1148 hists__calc_col_len(hists, h);
1152 static bool hists__filter_entry_by_dso(struct hists *hists,
1153 struct hist_entry *he)
1155 if (hists->dso_filter != NULL &&
1156 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1157 he->filtered |= (1 << HIST_FILTER__DSO);
1164 void hists__filter_by_dso(struct hists *hists)
1168 hists->stats.nr_non_filtered_samples = 0;
1170 hists__reset_filter_stats(hists);
1171 hists__reset_col_len(hists);
1173 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1174 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1176 if (symbol_conf.exclude_other && !h->parent)
1179 if (hists__filter_entry_by_dso(hists, h))
1182 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1186 static bool hists__filter_entry_by_thread(struct hists *hists,
1187 struct hist_entry *he)
1189 if (hists->thread_filter != NULL &&
1190 he->thread != hists->thread_filter) {
1191 he->filtered |= (1 << HIST_FILTER__THREAD);
1198 void hists__filter_by_thread(struct hists *hists)
1202 hists->stats.nr_non_filtered_samples = 0;
1204 hists__reset_filter_stats(hists);
1205 hists__reset_col_len(hists);
1207 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1208 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1210 if (hists__filter_entry_by_thread(hists, h))
1213 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1217 static bool hists__filter_entry_by_symbol(struct hists *hists,
1218 struct hist_entry *he)
1220 if (hists->symbol_filter_str != NULL &&
1221 (!he->ms.sym || strstr(he->ms.sym->name,
1222 hists->symbol_filter_str) == NULL)) {
1223 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1230 void hists__filter_by_symbol(struct hists *hists)
1234 hists->stats.nr_non_filtered_samples = 0;
1236 hists__reset_filter_stats(hists);
1237 hists__reset_col_len(hists);
1239 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1240 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1242 if (hists__filter_entry_by_symbol(hists, h))
1245 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1249 void events_stats__inc(struct events_stats *stats, u32 type)
1251 ++stats->nr_events[0];
1252 ++stats->nr_events[type];
1255 void hists__inc_nr_events(struct hists *hists, u32 type)
1257 events_stats__inc(&hists->stats, type);
1260 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1262 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1264 hists->stats.nr_non_filtered_samples++;
1267 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1268 struct hist_entry *pair)
1270 struct rb_root *root;
1272 struct rb_node *parent = NULL;
1273 struct hist_entry *he;
1276 if (sort__need_collapse)
1277 root = &hists->entries_collapsed;
1279 root = hists->entries_in;
1283 while (*p != NULL) {
1285 he = rb_entry(parent, struct hist_entry, rb_node_in);
1287 cmp = hist_entry__collapse(he, pair);
1295 p = &(*p)->rb_right;
1298 he = hist_entry__new(pair, true);
1300 memset(&he->stat, 0, sizeof(he->stat));
1302 rb_link_node(&he->rb_node_in, parent, p);
1303 rb_insert_color(&he->rb_node_in, root);
1304 hists__inc_stats(hists, he);
1311 static struct hist_entry *hists__find_entry(struct hists *hists,
1312 struct hist_entry *he)
1316 if (sort__need_collapse)
1317 n = hists->entries_collapsed.rb_node;
1319 n = hists->entries_in->rb_node;
1322 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1323 int64_t cmp = hist_entry__collapse(iter, he);
1337 * Look for pairs to link to the leader buckets (hist_entries):
1339 void hists__match(struct hists *leader, struct hists *other)
1341 struct rb_root *root;
1343 struct hist_entry *pos, *pair;
1345 if (sort__need_collapse)
1346 root = &leader->entries_collapsed;
1348 root = leader->entries_in;
1350 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1351 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1352 pair = hists__find_entry(other, pos);
1355 hist_entry__add_pair(pair, pos);
1360 * Look for entries in the other hists that are not present in the leader, if
1361 * we find them, just add a dummy entry on the leader hists, with period=0,
1362 * nr_events=0, to serve as the list header.
1364 int hists__link(struct hists *leader, struct hists *other)
1366 struct rb_root *root;
1368 struct hist_entry *pos, *pair;
1370 if (sort__need_collapse)
1371 root = &other->entries_collapsed;
1373 root = other->entries_in;
1375 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1376 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1378 if (!hist_entry__has_pairs(pos)) {
1379 pair = hists__add_dummy_entry(leader, pos);
1382 hist_entry__add_pair(pos, pair);
1389 u64 hists__total_period(struct hists *hists)
1391 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1392 hists->stats.total_period;
1395 int parse_filter_percentage(const struct option *opt __maybe_unused,
1396 const char *arg, int unset __maybe_unused)
1398 if (!strcmp(arg, "relative"))
1399 symbol_conf.filter_relative = true;
1400 else if (!strcmp(arg, "absolute"))
1401 symbol_conf.filter_relative = false;
1408 int perf_hist_config(const char *var, const char *value)
1410 if (!strcmp(var, "hist.percentage"))
1411 return parse_filter_percentage(NULL, value, 0);