1 #include <linux/list.h>
2 #include <linux/compiler.h>
3 #include <linux/string.h>
4 #include "ordered-events.h"
10 #define pr_N(n, fmt, ...) \
11 eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
13 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
15 static void queue_event(struct ordered_events *oe, struct ordered_event *new)
17 struct ordered_event *last = oe->last;
18 u64 timestamp = new->timestamp;
24 pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
27 list_add(&new->list, &oe->events);
28 oe->max_timestamp = timestamp;
33 * last event might point to some random place in the list as it's
34 * the last queued event. We expect that the new event is close to
37 if (last->timestamp <= timestamp) {
38 while (last->timestamp <= timestamp) {
40 if (p == &oe->events) {
41 list_add_tail(&new->list, &oe->events);
42 oe->max_timestamp = timestamp;
45 last = list_entry(p, struct ordered_event, list);
47 list_add_tail(&new->list, &last->list);
49 while (last->timestamp > timestamp) {
51 if (p == &oe->events) {
52 list_add(&new->list, &oe->events);
55 last = list_entry(p, struct ordered_event, list);
57 list_add(&new->list, &last->list);
61 static union perf_event *__dup_event(struct ordered_events *oe,
62 union perf_event *event)
64 union perf_event *new_event = NULL;
66 if (oe->cur_alloc_size < oe->max_alloc_size) {
67 new_event = memdup(event, event->header.size);
69 oe->cur_alloc_size += event->header.size;
75 static union perf_event *dup_event(struct ordered_events *oe,
76 union perf_event *event)
78 return oe->copy_on_queue ? __dup_event(oe, event) : event;
81 static void free_dup_event(struct ordered_events *oe, union perf_event *event)
83 if (oe->copy_on_queue) {
84 oe->cur_alloc_size -= event->header.size;
89 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
90 static struct ordered_event *alloc_event(struct ordered_events *oe,
91 union perf_event *event)
93 struct list_head *cache = &oe->cache;
94 struct ordered_event *new = NULL;
95 union perf_event *new_event;
97 new_event = dup_event(oe, event);
101 if (!list_empty(cache)) {
102 new = list_entry(cache->next, struct ordered_event, list);
103 list_del(&new->list);
104 } else if (oe->buffer) {
105 new = oe->buffer + oe->buffer_idx;
106 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
108 } else if (oe->cur_alloc_size < oe->max_alloc_size) {
109 size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
111 oe->buffer = malloc(size);
113 free_dup_event(oe, new_event);
117 pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
118 oe->cur_alloc_size, size, oe->max_alloc_size);
120 oe->cur_alloc_size += size;
121 list_add(&oe->buffer->list, &oe->to_free);
123 /* First entry is abused to maintain the to_free list. */
125 new = oe->buffer + 1;
127 pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
130 new->event = new_event;
134 struct ordered_event *
135 ordered_events__new(struct ordered_events *oe, u64 timestamp,
136 union perf_event *event)
138 struct ordered_event *new;
140 new = alloc_event(oe, event);
142 new->timestamp = timestamp;
143 queue_event(oe, new);
149 void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
151 list_move(&event->list, &oe->cache);
153 free_dup_event(oe, event->event);
156 static int __ordered_events__flush(struct ordered_events *oe)
158 struct list_head *head = &oe->events;
159 struct ordered_event *tmp, *iter;
160 struct perf_sample sample;
161 u64 limit = oe->next_flush;
162 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
163 bool show_progress = limit == ULLONG_MAX;
164 struct ui_progress prog;
171 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
173 list_for_each_entry_safe(iter, tmp, head, list) {
177 if (iter->timestamp > limit)
180 ret = perf_evlist__parse_sample(oe->evlist, iter->event, &sample);
182 pr_err("Can't parse sample, err = %d\n", ret);
184 ret = machines__deliver_event(oe->machines, oe->evlist, iter->event,
185 &sample, oe->tool, iter->file_offset);
190 ordered_events__delete(oe, iter);
191 oe->last_flush = iter->timestamp;
194 ui_progress__update(&prog, 1);
197 if (list_empty(head))
199 else if (last_ts <= limit)
200 oe->last = list_entry(head->prev, struct ordered_event, list);
205 int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
207 static const char * const str[] = {
215 if (oe->nr_events == 0)
219 case OE_FLUSH__FINAL:
220 oe->next_flush = ULLONG_MAX;
225 struct ordered_event *first, *last;
226 struct list_head *head = &oe->events;
228 first = list_entry(head->next, struct ordered_event, list);
231 /* Warn if we are called before any event got allocated. */
232 if (WARN_ONCE(!last || list_empty(head), "empty queue"))
235 oe->next_flush = first->timestamp;
236 oe->next_flush += (last->timestamp - first->timestamp) / 2;
240 case OE_FLUSH__ROUND:
246 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
247 str[how], oe->nr_events);
248 pr_oe_time(oe->max_timestamp, "max_timestamp\n");
250 err = __ordered_events__flush(oe);
253 if (how == OE_FLUSH__ROUND)
254 oe->next_flush = oe->max_timestamp;
256 oe->last_flush_type = how;
259 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
260 str[how], oe->nr_events);
261 pr_oe_time(oe->last_flush, "last_flush\n");
266 void ordered_events__init(struct ordered_events *oe, struct machines *machines,
267 struct perf_evlist *evlist, struct perf_tool *tool)
269 INIT_LIST_HEAD(&oe->events);
270 INIT_LIST_HEAD(&oe->cache);
271 INIT_LIST_HEAD(&oe->to_free);
272 oe->max_alloc_size = (u64) -1;
273 oe->cur_alloc_size = 0;
275 oe->machines = machines;
279 void ordered_events__free(struct ordered_events *oe)
281 while (!list_empty(&oe->to_free)) {
282 struct ordered_event *event;
284 event = list_entry(oe->to_free.next, struct ordered_event, list);
285 list_del(&event->list);
286 free_dup_event(oe, event->event);