perf ordered_events: Shorten function signatures
[firefly-linux-kernel-4.4.55.git] / tools / perf / util / ordered-events.c
1 #include <linux/list.h>
2 #include <linux/compiler.h>
3 #include <linux/string.h>
4 #include "ordered-events.h"
5 #include "evlist.h"
6 #include "session.h"
7 #include "asm/bug.h"
8 #include "debug.h"
9
10 #define pr_N(n, fmt, ...) \
11         eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
12
13 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
14
15 static void queue_event(struct ordered_events *oe, struct ordered_event *new)
16 {
17         struct ordered_event *last = oe->last;
18         u64 timestamp = new->timestamp;
19         struct list_head *p;
20
21         ++oe->nr_events;
22         oe->last = new;
23
24         pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
25
26         if (!last) {
27                 list_add(&new->list, &oe->events);
28                 oe->max_timestamp = timestamp;
29                 return;
30         }
31
32         /*
33          * last event might point to some random place in the list as it's
34          * the last queued event. We expect that the new event is close to
35          * this.
36          */
37         if (last->timestamp <= timestamp) {
38                 while (last->timestamp <= timestamp) {
39                         p = last->list.next;
40                         if (p == &oe->events) {
41                                 list_add_tail(&new->list, &oe->events);
42                                 oe->max_timestamp = timestamp;
43                                 return;
44                         }
45                         last = list_entry(p, struct ordered_event, list);
46                 }
47                 list_add_tail(&new->list, &last->list);
48         } else {
49                 while (last->timestamp > timestamp) {
50                         p = last->list.prev;
51                         if (p == &oe->events) {
52                                 list_add(&new->list, &oe->events);
53                                 return;
54                         }
55                         last = list_entry(p, struct ordered_event, list);
56                 }
57                 list_add(&new->list, &last->list);
58         }
59 }
60
61 static union perf_event *__dup_event(struct ordered_events *oe,
62                                      union perf_event *event)
63 {
64         union perf_event *new_event = NULL;
65
66         if (oe->cur_alloc_size < oe->max_alloc_size) {
67                 new_event = memdup(event, event->header.size);
68                 if (new_event)
69                         oe->cur_alloc_size += event->header.size;
70         }
71
72         return new_event;
73 }
74
75 static union perf_event *dup_event(struct ordered_events *oe,
76                                    union perf_event *event)
77 {
78         return oe->copy_on_queue ? __dup_event(oe, event) : event;
79 }
80
81 static void free_dup_event(struct ordered_events *oe, union perf_event *event)
82 {
83         if (oe->copy_on_queue) {
84                 oe->cur_alloc_size -= event->header.size;
85                 free(event);
86         }
87 }
88
89 #define MAX_SAMPLE_BUFFER       (64 * 1024 / sizeof(struct ordered_event))
90 static struct ordered_event *alloc_event(struct ordered_events *oe,
91                                          union perf_event *event)
92 {
93         struct list_head *cache = &oe->cache;
94         struct ordered_event *new = NULL;
95         union perf_event *new_event;
96
97         new_event = dup_event(oe, event);
98         if (!new_event)
99                 return NULL;
100
101         if (!list_empty(cache)) {
102                 new = list_entry(cache->next, struct ordered_event, list);
103                 list_del(&new->list);
104         } else if (oe->buffer) {
105                 new = oe->buffer + oe->buffer_idx;
106                 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
107                         oe->buffer = NULL;
108         } else if (oe->cur_alloc_size < oe->max_alloc_size) {
109                 size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
110
111                 oe->buffer = malloc(size);
112                 if (!oe->buffer) {
113                         free_dup_event(oe, new_event);
114                         return NULL;
115                 }
116
117                 pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
118                    oe->cur_alloc_size, size, oe->max_alloc_size);
119
120                 oe->cur_alloc_size += size;
121                 list_add(&oe->buffer->list, &oe->to_free);
122
123                 /* First entry is abused to maintain the to_free list. */
124                 oe->buffer_idx = 2;
125                 new = oe->buffer + 1;
126         } else {
127                 pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
128         }
129
130         new->event = new_event;
131         return new;
132 }
133
134 struct ordered_event *
135 ordered_events__new(struct ordered_events *oe, u64 timestamp,
136                     union perf_event *event)
137 {
138         struct ordered_event *new;
139
140         new = alloc_event(oe, event);
141         if (new) {
142                 new->timestamp = timestamp;
143                 queue_event(oe, new);
144         }
145
146         return new;
147 }
148
149 void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
150 {
151         list_move(&event->list, &oe->cache);
152         oe->nr_events--;
153         free_dup_event(oe, event->event);
154 }
155
156 static int __ordered_events__flush(struct ordered_events *oe)
157 {
158         struct list_head *head = &oe->events;
159         struct ordered_event *tmp, *iter;
160         struct perf_sample sample;
161         u64 limit = oe->next_flush;
162         u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
163         bool show_progress = limit == ULLONG_MAX;
164         struct ui_progress prog;
165         int ret;
166
167         if (!limit)
168                 return 0;
169
170         if (show_progress)
171                 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
172
173         list_for_each_entry_safe(iter, tmp, head, list) {
174                 if (session_done())
175                         return 0;
176
177                 if (iter->timestamp > limit)
178                         break;
179
180                 ret = perf_evlist__parse_sample(oe->evlist, iter->event, &sample);
181                 if (ret)
182                         pr_err("Can't parse sample, err = %d\n", ret);
183                 else {
184                         ret = machines__deliver_event(oe->machines, oe->evlist, iter->event,
185                                                       &sample, oe->tool, iter->file_offset);
186                         if (ret)
187                                 return ret;
188                 }
189
190                 ordered_events__delete(oe, iter);
191                 oe->last_flush = iter->timestamp;
192
193                 if (show_progress)
194                         ui_progress__update(&prog, 1);
195         }
196
197         if (list_empty(head))
198                 oe->last = NULL;
199         else if (last_ts <= limit)
200                 oe->last = list_entry(head->prev, struct ordered_event, list);
201
202         return 0;
203 }
204
205 int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
206 {
207         static const char * const str[] = {
208                 "NONE",
209                 "FINAL",
210                 "ROUND",
211                 "HALF ",
212         };
213         int err;
214
215         if (oe->nr_events == 0)
216                 return 0;
217
218         switch (how) {
219         case OE_FLUSH__FINAL:
220                 oe->next_flush = ULLONG_MAX;
221                 break;
222
223         case OE_FLUSH__HALF:
224         {
225                 struct ordered_event *first, *last;
226                 struct list_head *head = &oe->events;
227
228                 first = list_entry(head->next, struct ordered_event, list);
229                 last = oe->last;
230
231                 /* Warn if we are called before any event got allocated. */
232                 if (WARN_ONCE(!last || list_empty(head), "empty queue"))
233                         return 0;
234
235                 oe->next_flush  = first->timestamp;
236                 oe->next_flush += (last->timestamp - first->timestamp) / 2;
237                 break;
238         }
239
240         case OE_FLUSH__ROUND:
241         case OE_FLUSH__NONE:
242         default:
243                 break;
244         };
245
246         pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE  %s, nr_events %u\n",
247                    str[how], oe->nr_events);
248         pr_oe_time(oe->max_timestamp, "max_timestamp\n");
249
250         err = __ordered_events__flush(oe);
251
252         if (!err) {
253                 if (how == OE_FLUSH__ROUND)
254                         oe->next_flush = oe->max_timestamp;
255
256                 oe->last_flush_type = how;
257         }
258
259         pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
260                    str[how], oe->nr_events);
261         pr_oe_time(oe->last_flush, "last_flush\n");
262
263         return err;
264 }
265
266 void ordered_events__init(struct ordered_events *oe, struct machines *machines,
267                           struct perf_evlist *evlist, struct perf_tool *tool)
268 {
269         INIT_LIST_HEAD(&oe->events);
270         INIT_LIST_HEAD(&oe->cache);
271         INIT_LIST_HEAD(&oe->to_free);
272         oe->max_alloc_size = (u64) -1;
273         oe->cur_alloc_size = 0;
274         oe->evlist         = evlist;
275         oe->machines       = machines;
276         oe->tool           = tool;
277 }
278
279 void ordered_events__free(struct ordered_events *oe)
280 {
281         while (!list_empty(&oe->to_free)) {
282                 struct ordered_event *event;
283
284                 event = list_entry(oe->to_free.next, struct ordered_event, list);
285                 list_del(&event->list);
286                 free_dup_event(oe, event->event);
287                 free(event);
288         }
289 }