1c7208840823ca5134b4e0189f4aac42b3f75140
[firefly-linux-kernel-4.4.55.git] / tools / perf / util / cs-etm.c
1 /*
2  * Copyright(C) 2016 Linaro Limited. All rights reserved.
3  * Author: Tor Jeremiassen <tor.jeremiassen@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/bitops.h>
21 #include <linux/log2.h>
22
23 #include "perf.h"
24 #include "thread_map.h"
25 #include "thread.h"
26 #include "thread-stack.h"
27 #include "callchain.h"
28 #include "auxtrace.h"
29 #include "evlist.h"
30 #include "machine.h"
31 #include "util.h"
32 #include "color.h"
33 #include "cs-etm.h"
34 #include "cs-etm-decoder/cs-etm-decoder.h"
35 #include "debug.h"
36
37 #include <stdlib.h>
38
39 #define KiB(x) ((x) * 1024)
40 #define MiB(x) ((x) * 1024 * 1024)
41 #define MAX_TIMESTAMP (~0ULL)
42
43 struct cs_etm_auxtrace {
44         struct auxtrace         auxtrace;
45         struct auxtrace_queues  queues;
46         struct auxtrace_heap    heap;
47         u64                    **metadata;
48         u32                     auxtrace_type;
49         struct perf_session    *session;
50         struct machine         *machine;
51         struct perf_evsel      *switch_evsel;
52         struct thread          *unknown_thread;
53         uint32_t                num_cpu;
54         bool                    timeless_decoding;
55         bool                    sampling_mode;
56         bool                    snapshot_mode;
57         bool                    data_queued;
58         bool                    sync_switch;
59         bool                    synth_needs_swap;
60         int                     have_sched_switch;
61
62         bool                    sample_instructions;
63         u64                     instructions_sample_type;
64         u64                     instructions_sample_period;
65         u64                     instructions_id;
66         struct itrace_synth_opts synth_opts;
67         unsigned                pmu_type;
68 };
69
70 struct cs_etm_queue {
71         struct cs_etm_auxtrace *etm;
72         unsigned                queue_nr;
73         struct auxtrace_buffer *buffer;
74         const struct           cs_etm_state *state;
75         struct ip_callchain    *chain;
76         union perf_event       *event_buf;
77         bool                    on_heap;
78         bool                    step_through_buffers;
79         bool                    use_buffer_pid_tid;
80         pid_t                   pid, tid;
81         int                     cpu;
82         struct thread          *thread;
83         u64                     time;
84         u64                     timestamp;
85         bool                    stop;
86         bool                    have_sample;
87         struct cs_etm_decoder  *decoder;
88         u64                     offset;
89         bool                    eot;
90         bool                    kernel_mapped;
91 };
92
93 static int cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq);
94 static int cs_etm__update_queues(struct cs_etm_auxtrace *);
95 static int cs_etm__process_queues(struct cs_etm_auxtrace *, u64);
96 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *, pid_t, u64);
97 static uint32_t cs_etm__mem_access(struct cs_etm_queue *, uint64_t , size_t , uint8_t *);
98
99 static void cs_etm__packet_dump(const char *pkt_string)
100 {
101         const char *color = PERF_COLOR_BLUE;
102
103         color_fprintf(stdout,color, "  %s\n", pkt_string);
104         fflush(stdout);
105 }
106
107 static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
108                               struct auxtrace_buffer *buffer)
109 {
110         const char *color = PERF_COLOR_BLUE;
111         struct cs_etm_decoder_params d_params;
112         struct cs_etm_trace_params *t_params;
113         struct cs_etm_decoder *decoder;
114         size_t buffer_used = 0;
115         size_t i;
116
117         fprintf(stdout,"\n");
118         color_fprintf(stdout, color,
119                      ". ... CoreSight ETM Trace data: size %zu bytes\n",
120                      buffer->size);
121
122         t_params = zalloc(sizeof(struct cs_etm_trace_params) * etm->num_cpu);
123         for (i = 0; i < etm->num_cpu; ++i) {
124                 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
125                 t_params[i].reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
126                 t_params[i].reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
127                 t_params[i].reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
128                 t_params[i].reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
129                 t_params[i].reg_configr = etm->metadata[i][CS_ETMV4_TRCCONFIGR];
130                 t_params[i].reg_traceidr = etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
131   //[CS_ETMV4_TRCAUTHSTATUS] = "   TRCAUTHSTATUS                  %"PRIx64"\n",
132         }
133         d_params.packet_printer = cs_etm__packet_dump;
134         d_params.operation = CS_ETM_OPERATION_PRINT;
135         d_params.formatted = true;
136         d_params.fsyncs = false;
137         d_params.hsyncs = false;
138         d_params.frame_aligned = true;
139
140         decoder = cs_etm_decoder__new(etm->num_cpu,&d_params, t_params);
141
142         zfree(&t_params);
143
144         if (decoder == NULL) {
145                 return; 
146         }
147         do {
148             size_t consumed;
149             cs_etm_decoder__process_data_block(decoder,buffer->offset,&(((uint8_t *)buffer->data)[buffer_used]),buffer->size - buffer_used, &consumed);
150             buffer_used += consumed;
151         } while(buffer_used < buffer->size);
152         cs_etm_decoder__free(decoder);
153 }
154                               
155 static int cs_etm__flush_events(struct perf_session *session, struct perf_tool *tool){
156         struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
157                                                    struct cs_etm_auxtrace,
158                                                    auxtrace);
159
160         int ret;
161
162         if (dump_trace)
163                 return 0;
164
165         if (!tool->ordered_events)
166                 return -EINVAL;
167
168         ret = cs_etm__update_queues(etm);
169
170         if (ret < 0) 
171                 return ret;
172
173         if (etm->timeless_decoding)
174                 return cs_etm__process_timeless_queues(etm,-1,MAX_TIMESTAMP - 1);
175
176         return cs_etm__process_queues(etm, MAX_TIMESTAMP);
177 }
178
179 static void  cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
180                                     struct auxtrace_queue *queue)
181 {
182         struct cs_etm_queue *etmq = queue->priv;
183
184         if ((queue->tid == -1) || (etm->have_sched_switch)) {
185                 etmq->tid = machine__get_current_tid(etm->machine, etmq->cpu);
186                 thread__zput(etmq->thread);
187         }
188
189         if ((!etmq->thread) && (etmq->tid != -1)) {
190                 etmq->thread = machine__find_thread(etm->machine,-1,etmq->tid);
191         }
192
193         if (etmq->thread) {
194                 etmq->pid = etmq->thread->pid_;
195                 if (queue->cpu == -1) {
196                         etmq->cpu = etmq->thread->cpu;
197                 }
198         }
199 }
200
201 static void cs_etm__free_queue(void *priv)
202 {
203         struct cs_etm_queue *etmq = priv;
204
205         if (!etmq)
206                 return;
207
208         thread__zput(etmq->thread);
209         cs_etm_decoder__free(etmq->decoder);
210         zfree(&etmq->event_buf);
211         zfree(&etmq->chain);
212         free(etmq);
213 }
214
215 static void cs_etm__free_events(struct perf_session *session)
216 {
217         struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
218                                                    struct cs_etm_auxtrace,
219                                                    auxtrace);
220
221         struct auxtrace_queues *queues = &(aux->queues);
222
223         unsigned i;
224
225         for (i = 0; i < queues->nr_queues; ++i) {
226                 cs_etm__free_queue(queues->queue_array[i].priv);
227                 queues->queue_array[i].priv = 0;
228         }
229
230         auxtrace_queues__free(queues);
231
232 }
233
234 static void cs_etm__free(struct perf_session *session)
235 {
236
237         size_t i;
238         struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
239                                                    struct cs_etm_auxtrace,
240                                                    auxtrace);
241         auxtrace_heap__free(&aux->heap);
242         cs_etm__free_events(session);
243         session->auxtrace = NULL;
244         //thread__delete(aux->unknown_thread);
245         for (i = 0; i < aux->num_cpu; ++i) {
246                 zfree(&aux->metadata[i]);
247         }
248         zfree(&aux->metadata);
249         free(aux);
250 }
251
252 static void cs_etm__use_buffer_pid_tid(struct cs_etm_queue *etmq,
253                                       struct auxtrace_queue *queue,
254                                       struct auxtrace_buffer *buffer)
255 {
256         if ((queue->cpu == -1) && (buffer->cpu != -1)) {
257                 etmq->cpu = buffer->cpu;
258         }
259
260         etmq->pid = buffer->pid;
261         etmq->tid = buffer->tid;
262
263         thread__zput(etmq->thread);
264
265         if (etmq->tid != -1) {
266                 if (etmq->pid != -1) {
267                         etmq->thread = machine__findnew_thread(etmq->etm->machine,
268                                                                etmq->pid,
269                                                                etmq->tid);
270                 } else {
271                         etmq->thread = machine__findnew_thread(etmq->etm->machine,
272                                                                -1,
273                                                                etmq->tid);
274                 }
275         }
276 }
277
278
279 static int cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
280 {
281         struct auxtrace_buffer *aux_buffer = etmq->buffer;
282         struct auxtrace_buffer *old_buffer = aux_buffer;
283         struct auxtrace_queue *queue;
284
285         if (etmq->stop) {
286                 buff->len = 0;
287                 return 0;
288         }
289
290         queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
291
292         aux_buffer = auxtrace_buffer__next(queue,aux_buffer);
293
294         if (!aux_buffer) {
295                 if (old_buffer) {
296                         auxtrace_buffer__drop_data(old_buffer);
297                 }
298                 buff->len = 0;
299                 return 0;
300         }
301
302         etmq->buffer = aux_buffer;
303
304         if (!aux_buffer->data) {
305                 int fd = perf_data_file__fd(etmq->etm->session->file);
306
307                 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
308                 if (!aux_buffer->data)
309                         return -ENOMEM;
310         }
311
312         if (old_buffer)
313                 auxtrace_buffer__drop_data(old_buffer);
314
315         if (aux_buffer->use_data) {
316                 buff->offset = aux_buffer->offset;
317                 buff->len = aux_buffer->use_size;
318                 buff->buf = aux_buffer->use_data;
319         } else {
320                 buff->offset = aux_buffer->offset;
321                 buff->len = aux_buffer->size;
322                 buff->buf = aux_buffer->data;
323         }
324         /*
325         buff->offset = 0;
326         buff->len = sizeof(cstrace);
327         buff->buf = cstrace;
328         */
329         etmq->stop = true;
330
331         buff->ref_timestamp = aux_buffer->reference;
332
333         if (etmq->use_buffer_pid_tid && 
334             ((etmq->pid != aux_buffer->pid) || 
335              (etmq->tid != aux_buffer->tid))) {
336                 cs_etm__use_buffer_pid_tid(etmq,queue,aux_buffer);
337         }
338
339         if (etmq->step_through_buffers)
340                 etmq->stop = true;
341
342         if (buff->len == 0) 
343                 return cs_etm__get_trace(buff,etmq);
344
345         return 0;
346 }
347
348 static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
349                                                unsigned int queue_nr)
350 {
351         struct cs_etm_decoder_params d_params;
352         struct cs_etm_trace_params   *t_params;
353         struct cs_etm_queue *etmq;
354         size_t i;
355
356         etmq = zalloc(sizeof(struct cs_etm_queue));
357         if (!etmq)
358                 return NULL;
359
360         if (etm->synth_opts.callchain) {
361                 size_t sz = sizeof(struct ip_callchain);
362
363                 sz += etm->synth_opts.callchain_sz * sizeof(u64);
364                 etmq->chain = zalloc(sz);
365                 if (!etmq->chain)
366                         goto out_free;
367         } else {
368                 etmq->chain = NULL;
369         }
370
371         etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
372         if (!etmq->event_buf)
373                 goto out_free;
374
375         etmq->etm = etm;
376         etmq->queue_nr = queue_nr;
377         etmq->pid = -1;
378         etmq->tid = -1;
379         etmq->cpu = -1;
380         etmq->stop = false;
381         etmq->kernel_mapped = false;
382
383         t_params = zalloc(sizeof(struct cs_etm_trace_params)*etm->num_cpu);
384
385         for (i = 0; i < etm->num_cpu; ++i) {
386                 t_params[i].reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
387                 t_params[i].reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
388                 t_params[i].reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
389                 t_params[i].reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
390                 t_params[i].reg_configr = etm->metadata[i][CS_ETMV4_TRCCONFIGR];
391                 t_params[i].reg_traceidr = etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
392                 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
393         }
394         d_params.packet_printer = cs_etm__packet_dump;
395         d_params.operation = CS_ETM_OPERATION_DECODE;    
396         d_params.formatted = true;
397         d_params.fsyncs = false;
398         d_params.hsyncs = false;
399         d_params.frame_aligned = true;
400         d_params.data = etmq;
401
402         etmq->decoder = cs_etm_decoder__new(etm->num_cpu,&d_params,t_params);
403
404
405         zfree(&t_params);
406
407         if (!etmq->decoder)
408                 goto out_free;
409
410         etmq->offset = 0;
411         etmq->eot = false;
412
413         return etmq;
414
415 out_free:
416         zfree(&etmq->event_buf);
417         zfree(&etmq->chain);
418         free(etmq);
419         return NULL;
420 }
421
422 static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm, 
423                               struct auxtrace_queue *queue,
424                               unsigned int queue_nr)
425 {
426         struct cs_etm_queue *etmq = queue->priv;
427
428         if (list_empty(&(queue->head))) 
429                 return 0;
430
431         if (etmq == NULL) {
432                 etmq = cs_etm__alloc_queue(etm,queue_nr);
433
434                 if (etmq == NULL) {
435                         return -ENOMEM;
436                 }
437
438                 queue->priv = etmq;
439
440                 if (queue->cpu != -1) {
441                         etmq->cpu = queue->cpu;
442                 }
443
444                 etmq->tid = queue->tid;
445
446                 if (etm->sampling_mode) {
447                         if (etm->timeless_decoding)
448                                 etmq->step_through_buffers = true;
449                         if (etm->timeless_decoding || !etm->have_sched_switch)
450                                 etmq->use_buffer_pid_tid = true;
451                 }
452         }
453         
454         if (!etmq->on_heap && 
455             (!etm->sync_switch)) {
456                 const struct cs_etm_state *state;
457                 int ret = 0;
458
459                 if (etm->timeless_decoding)
460                         return ret;
461
462                 //cs_etm__log("queue %u getting timestamp\n",queue_nr);
463                 //cs_etm__log("queue %u decoding cpu %d pid %d tid %d\n",
464                            //queue_nr, etmq->cpu, etmq->pid, etmq->tid);
465                 (void) state;
466                 return ret;
467                 /*
468                 while (1) {
469                         state = cs_etm_decoder__decode(etmq->decoder);
470                         if (state->err) {
471                                 if (state->err == CS_ETM_ERR_NODATA) {
472                                         //cs_etm__log("queue %u has no timestamp\n",
473                                                    //queue_nr);
474                                         return 0;
475                                 }
476                                 continue;
477                         }
478                         if (state->timestamp)
479                                 break;
480                 }
481
482                 etmq->timestamp = state->timestamp;
483                 //cs_etm__log("queue %u timestamp 0x%"PRIx64 "\n",
484                            //queue_nr, etmq->timestamp);
485                 etmq->state = state;
486                 etmq->have_sample = true;
487                 //cs_etm__sample_flags(etmq);
488                 ret = auxtrace_heap__add(&etm->heap, queue_nr, etmq->timestamp);
489                 if (ret)
490                         return ret;
491                 etmq->on_heap = true;
492                 */
493         }
494         
495         return 0;
496 }
497
498
499 static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
500 {
501         unsigned int i;
502         int ret;
503
504         for (i = 0; i < etm->queues.nr_queues; i++) {
505                 ret = cs_etm__setup_queue(etm, &(etm->queues.queue_array[i]),i);
506                 if (ret)
507                         return ret;
508         }
509         return 0;
510 }
511
512 #if 0
513 struct cs_etm_cache_entry {
514         struct auxtrace_cache_entry     entry;
515         uint64_t                        icount;
516         uint64_t                        bcount;
517 };
518
519 static size_t cs_etm__cache_divisor(void)
520 {
521         static size_t d = 64;
522
523         return d;
524 }
525
526 static size_t cs_etm__cache_size(struct dso *dso,
527                                 struct machine *machine)
528 {
529         off_t size;
530
531         size = dso__data_size(dso,machine);
532         size /= cs_etm__cache_divisor();
533
534         if (size < 1000) 
535                 return 10;
536
537         if (size > (1 << 21)) 
538                 return 21;
539
540         return 32 - __builtin_clz(size);
541 }
542
543 static struct auxtrace_cache *cs_etm__cache(struct dso *dso,
544                                            struct machine *machine)
545 {
546         struct auxtrace_cache *c;
547         size_t bits;
548
549         if (dso->auxtrace_cache)
550                 return dso->auxtrace_cache;
551
552         bits = cs_etm__cache_size(dso,machine);
553
554         c = auxtrace_cache__new(bits, sizeof(struct cs_etm_cache_entry), 200);
555
556         dso->auxtrace_cache = c;
557
558         return c;
559 }
560
561 static int cs_etm__cache_add(struct dso *dso, struct machine *machine,
562                             uint64_t offset, uint64_t icount, uint64_t bcount)
563 {
564         struct auxtrace_cache *c = cs_etm__cache(dso, machine);
565         struct cs_etm_cache_entry *e;
566         int err;
567
568         if (!c)
569                 return -ENOMEM;
570
571         e = auxtrace_cache__alloc_entry(c);
572         if (!e)
573                 return -ENOMEM;
574
575         e->icount = icount;
576         e->bcount = bcount;
577
578         err = auxtrace_cache__add(c, offset, &e->entry);
579
580         if (err)
581                 auxtrace_cache__free_entry(c, e);
582
583         return err;
584 }
585
586 static struct cs_etm_cache_entry *cs_etm__cache_lookup(struct dso *dso,
587                                                       struct machine *machine,
588                                                       uint64_t offset)
589 {
590         struct auxtrace_cache *c = cs_etm__cache(dso, machine);
591
592         if (!c)
593                 return NULL;
594
595         return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
596 }
597 #endif
598
599 static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
600                                            struct cs_etm_packet *packet)
601 {
602         int ret = 0;
603         struct cs_etm_auxtrace *etm = etmq->etm;
604         union perf_event *event = etmq->event_buf;
605         struct perf_sample sample = {.ip = 0,};
606         uint64_t start_addr = packet->start_addr;
607         uint64_t end_addr = packet->end_addr;
608
609         event->sample.header.type = PERF_RECORD_SAMPLE;
610         event->sample.header.misc = PERF_RECORD_MISC_USER;
611         event->sample.header.size = sizeof(struct perf_event_header);
612
613
614         sample.ip = start_addr;
615         sample.pid = etmq->pid;
616         sample.tid = etmq->tid;
617         sample.addr = end_addr;
618         sample.id = etmq->etm->instructions_id;
619         sample.stream_id = etmq->etm->instructions_id;
620         sample.period = (end_addr - start_addr) >> 2; 
621         sample.cpu = etmq->cpu;
622         sample.flags = 0; // etmq->flags;
623         sample.insn_len = 1; // etmq->insn_len;
624
625         //etmq->last_insn_cnt = etmq->state->tot_insn_cnt;
626
627 #if 0
628         {
629                 struct   addr_location al;
630                 uint64_t offset;
631                 struct   thread *thread;
632                 struct   machine *machine = etmq->etm->machine;
633                 uint8_t  cpumode;
634                 struct   cs_etm_cache_entry *e;
635                 uint8_t  buf[256];
636                 size_t   bufsz;
637
638                 thread = etmq->thread;
639
640                 if (!thread) {
641                         thread = etmq->etm->unknown_thread;
642                 }
643
644                 if (start_addr > 0xffffffc000000000UL) {
645                         cpumode = PERF_RECORD_MISC_KERNEL;
646                 } else {
647                         cpumode = PERF_RECORD_MISC_USER;
648                 }
649
650                 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, start_addr,&al);
651                 if (!al.map || !al.map->dso) {
652                         goto endTest;
653                 }
654                 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
655                     dso__data_status_seen(al.map->dso,DSO_DATA_STATUS_SEEN_ITRACE)) {
656                         goto endTest;
657                 }
658
659                 offset = al.map->map_ip(al.map,start_addr);
660
661
662                 e = cs_etm__cache_lookup(al.map->dso, machine, offset);
663
664                 if (e) {
665                   (void) e;
666                 } else {
667                         int len;
668                         map__load(al.map, machine->symbol_filter);
669
670                         bufsz = sizeof(buf);
671                         len = dso__data_read_offset(al.map->dso, machine,
672                                                     offset, buf, bufsz);
673
674                         if (len <= 0) {
675                                 goto endTest;
676                         }
677
678                         cs_etm__cache_add(al.map->dso, machine, offset, (end_addr - start_addr) >> 2, end_addr - start_addr);
679
680                 }
681 endTest:
682                 (void) offset;
683         }
684 #endif
685
686         ret = perf_session__deliver_synth_event(etm->session,event, &sample);
687
688         if (ret) {
689                 pr_err("CS ETM Trace: failed to deliver instruction event, error %d\n", ret);
690
691         }
692         return ret;
693 }
694
695 struct cs_etm_synth {
696         struct perf_tool dummy_tool;
697         struct perf_session *session;
698 };
699
700
701 static int cs_etm__event_synth(struct perf_tool *tool,
702                               union perf_event *event,
703                               struct perf_sample *sample,
704                               struct machine *machine)
705 {
706         struct cs_etm_synth *cs_etm_synth =
707                       container_of(tool, struct cs_etm_synth, dummy_tool);
708
709         (void) sample;
710         (void) machine;
711
712         return perf_session__deliver_synth_event(cs_etm_synth->session, event, NULL);
713
714 }
715
716
717 static int cs_etm__synth_event(struct perf_session *session,
718                               struct perf_event_attr *attr, u64 id)
719 {
720         struct cs_etm_synth cs_etm_synth;
721
722         memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
723         cs_etm_synth.session = session;
724
725         return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
726                                            &id, cs_etm__event_synth);
727 }
728
729 static int cs_etm__synth_events(struct cs_etm_auxtrace *etm, 
730                                struct perf_session *session)
731 {
732         struct perf_evlist *evlist = session->evlist;
733         struct perf_evsel *evsel;
734         struct perf_event_attr attr;
735         bool found = false;
736         u64 id;
737         int err;
738
739         evlist__for_each(evlist, evsel) {
740
741                 if (evsel->attr.type == etm->pmu_type) {
742                         found = true;
743                         break;
744                 }
745         }
746
747         if (!found) {
748                 pr_debug("There are no selected events with Core Sight Trace data\n");
749                 return 0;
750         }
751
752         memset(&attr, 0, sizeof(struct perf_event_attr));
753         attr.size = sizeof(struct perf_event_attr);
754         attr.type = PERF_TYPE_HARDWARE;
755         attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
756         attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
757                             PERF_SAMPLE_PERIOD;
758         if (etm->timeless_decoding) 
759                 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
760         else
761                 attr.sample_type |= PERF_SAMPLE_TIME;
762
763         attr.exclude_user = evsel->attr.exclude_user;
764         attr.exclude_kernel = evsel->attr.exclude_kernel;
765         attr.exclude_hv = evsel->attr.exclude_hv;
766         attr.exclude_host = evsel->attr.exclude_host;
767         attr.exclude_guest = evsel->attr.exclude_guest;
768         attr.sample_id_all = evsel->attr.sample_id_all;
769         attr.read_format = evsel->attr.read_format;
770
771         id = evsel->id[0] + 1000000000;
772
773         if (!id)
774                 id = 1;
775
776         if (etm->synth_opts.instructions) {
777                 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
778                 attr.sample_period = etm->synth_opts.period;
779                 etm->instructions_sample_period = attr.sample_period;
780                 err = cs_etm__synth_event(session, &attr, id);
781
782                 if (err) {
783                         pr_err("%s: failed to synthesize 'instructions' event type\n",
784                                __func__);
785                         return err;
786                 }
787                 etm->sample_instructions = true;
788                 etm->instructions_sample_type = attr.sample_type;
789                 etm->instructions_id = id;
790                 id += 1;
791         }
792
793         etm->synth_needs_swap = evsel->needs_swap;
794         return 0;
795 }
796
797 static int cs_etm__sample(struct cs_etm_queue *etmq)
798 {
799         //const struct cs_etm_state *state = etmq->state;
800         struct cs_etm_packet packet;
801         //struct cs_etm_auxtrace *etm = etmq->etm;
802         int err;
803
804         if (!etmq->have_sample)
805                 return 0;
806
807         etmq->have_sample = false;
808
809         err = cs_etm_decoder__get_packet(etmq->decoder,&packet);
810         // if there is no sample, it returns err = -1, no real error
811
812         if (!err && packet.sample_type & CS_ETM_RANGE) {
813                 err = cs_etm__synth_instruction_sample(etmq,&packet);
814                 if (err)
815                         return err;
816         }
817         return 0;
818 }
819
820 static int cs_etm__run_decoder(struct cs_etm_queue *etmq, u64 *timestamp)
821 {
822         struct cs_etm_buffer buffer = {.buf = 0,};
823         size_t buffer_used = 0;
824         int err = 0;
825
826         err = cs_etm__get_trace(&buffer,etmq);
827         if (err)
828                 return err;
829
830         do {
831             size_t processed = 0;
832             etmq->state = cs_etm_decoder__process_data_block(etmq->decoder,
833                                                etmq->offset,
834                                                &buffer.buf[buffer_used],
835                                                buffer.len-buffer_used,
836                                                &processed);
837             err = etmq->state->err;
838             etmq->offset += processed;
839             buffer_used += processed;
840             if (!err) {
841                 etmq->have_sample = true;
842                 cs_etm__sample(etmq);
843             }
844         } while (!etmq->eot && (buffer.len > buffer_used));
845
846         (void) timestamp;
847
848         return err;
849 }
850
851 static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
852 {
853   if (etm->queues.new_data) {
854         etm->queues.new_data = false;
855         return cs_etm__setup_queues(etm);
856   }
857   return 0;
858 }
859
860 static int cs_etm__process_queues(struct cs_etm_auxtrace *etm, u64 timestamp)
861 {
862         unsigned int queue_nr;
863         u64 ts;
864         int ret;
865
866         while (1) {
867                 struct auxtrace_queue *queue;
868                 struct cs_etm_queue *etmq;
869         
870                 if (!etm->heap.heap_cnt)
871                         return 0;
872         
873                 if (etm->heap.heap_array[0].ordinal >= timestamp)
874                         return 0;
875         
876                 queue_nr = etm->heap.heap_array[0].queue_nr;
877                 queue = &etm->queues.queue_array[queue_nr];
878                 etmq = queue->priv;
879         
880                 //cs_etm__log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
881                            //queue_nr, etm->heap.heap_array[0].ordinal,
882                            //timestamp);
883
884                 auxtrace_heap__pop(&etm->heap);
885
886                 if (etm->heap.heap_cnt) {
887                         ts = etm->heap.heap_array[0].ordinal + 1;
888                         if (ts > timestamp)
889                                 ts = timestamp;
890                 } else {
891                         ts = timestamp;
892                 }
893
894                 cs_etm__set_pid_tid_cpu(etm, queue);
895
896                 ret = cs_etm__run_decoder(etmq, &ts);
897
898                 if (ret < 0) {
899                         auxtrace_heap__add(&etm->heap, queue_nr, ts);
900                         return ret;
901                 }
902
903                 if (!ret) {
904                         ret = auxtrace_heap__add(&etm->heap, queue_nr, ts);
905                         if (ret < 0)
906                                 return ret;
907                 } else {
908                         etmq->on_heap = false;
909                 }
910         }
911         return 0;
912 }
913
914 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
915                                           pid_t tid,
916                                           u64 time_)
917 {
918         struct auxtrace_queues *queues = &etm->queues;
919         unsigned int i;
920         u64 ts = 0;
921         
922         for (i = 0; i < queues->nr_queues; ++i) {
923                 struct auxtrace_queue *queue = &(etm->queues.queue_array[i]);
924                 struct cs_etm_queue *etmq = queue->priv;
925
926                 if (etmq && ((tid == -1) || (etmq->tid == tid))) {
927                         etmq->time = time_;
928                         cs_etm__set_pid_tid_cpu(etm, queue);
929                         cs_etm__run_decoder(etmq,&ts);
930
931                 }
932         }
933         return 0;
934 }
935
936 static struct cs_etm_queue *cs_etm__cpu_to_etmq(struct cs_etm_auxtrace *etm, 
937                                                int cpu)
938 {
939         unsigned q,j;
940
941         if (etm->queues.nr_queues == 0)
942                 return NULL;
943
944         if (cpu < 0)
945                 q = 0;
946         else if ((unsigned) cpu >= etm->queues.nr_queues)
947                 q = etm->queues.nr_queues - 1;
948         else 
949                 q = cpu;
950
951         if (etm->queues.queue_array[q].cpu == cpu)
952                 return etm->queues.queue_array[q].priv;
953
954         for (j = 0; q > 0; j++) {
955                 if (etm->queues.queue_array[--q].cpu == cpu)
956                         return etm->queues.queue_array[q].priv;
957         }
958
959         for (; j < etm->queues.nr_queues; j++) {
960                 if (etm->queues.queue_array[j].cpu == cpu)
961                         return etm->queues.queue_array[j].priv;
962
963         }
964
965         return NULL;
966 }
967
968 static uint32_t cs_etm__mem_access(struct cs_etm_queue *etmq, uint64_t address, size_t size, uint8_t *buffer)
969 {
970         struct   addr_location al;
971         uint64_t offset;
972         struct   thread *thread;
973         struct   machine *machine;
974         uint8_t  cpumode;
975         int len;
976
977         if (etmq == NULL)
978                 return -1;
979
980         machine = etmq->etm->machine;
981         thread = etmq->thread;
982         if (address > 0xffffffc000000000UL) {
983                 cpumode = PERF_RECORD_MISC_KERNEL;
984         } else {
985                 cpumode = PERF_RECORD_MISC_USER;
986         }
987
988         thread__find_addr_map(thread, cpumode, MAP__FUNCTION, address,&al);
989
990         if (!al.map || !al.map->dso) {
991                 return 0;
992         }
993
994         if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
995             dso__data_status_seen(al.map->dso,DSO_DATA_STATUS_SEEN_ITRACE)) {
996                 return 0;
997         }
998
999         offset = al.map->map_ip(al.map,address);
1000
1001         map__load(al.map, machine->symbol_filter);
1002
1003         len = dso__data_read_offset(al.map->dso, machine,
1004                                     offset, buffer, size);
1005
1006         if (len <= 0) {
1007                 return 0;
1008         }
1009
1010         return len;
1011 }
1012
1013 static bool check_need_swap(int file_endian)
1014 {
1015         const int data = 1;
1016         u8 *check = (u8 *)&data;
1017         int host_endian;
1018
1019         if (check[0] == 1)
1020                 host_endian = ELFDATA2LSB;
1021         else
1022                 host_endian = ELFDATA2MSB;
1023
1024         return host_endian != file_endian;
1025 }
1026
1027 static int cs_etm__read_elf_info(const char *fname, uint64_t *foffset, uint64_t *fstart, uint64_t *fsize)
1028 {
1029         FILE *fp;
1030         u8 e_ident[EI_NIDENT];
1031         int ret = -1;
1032         bool need_swap = false;
1033         size_t buf_size;
1034         void *buf;
1035         int i;
1036
1037         fp = fopen(fname, "r");
1038         if (fp == NULL)
1039                 return -1;
1040
1041         if (fread(e_ident, sizeof(e_ident), 1, fp) != 1)
1042                 goto out;
1043
1044         if (memcmp(e_ident, ELFMAG, SELFMAG) ||
1045             e_ident[EI_VERSION] != EV_CURRENT)
1046                 goto out;
1047
1048         need_swap = check_need_swap(e_ident[EI_DATA]);
1049
1050         /* for simplicity */
1051         fseek(fp, 0, SEEK_SET);
1052
1053         if (e_ident[EI_CLASS] == ELFCLASS32) {
1054                 Elf32_Ehdr ehdr;
1055                 Elf32_Phdr *phdr;
1056
1057                 if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
1058                         goto out;
1059
1060                 if (need_swap) {
1061                         ehdr.e_phoff = bswap_32(ehdr.e_phoff);
1062                         ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
1063                         ehdr.e_phnum = bswap_16(ehdr.e_phnum);
1064                 }
1065
1066                 buf_size = ehdr.e_phentsize * ehdr.e_phnum;
1067                 buf = malloc(buf_size);
1068                 if (buf == NULL)
1069                         goto out;
1070
1071                 fseek(fp, ehdr.e_phoff, SEEK_SET);
1072                 if (fread(buf, buf_size, 1, fp) != 1)
1073                         goto out_free;
1074
1075                 for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
1076
1077                         if (need_swap) {
1078                                 phdr->p_type = bswap_32(phdr->p_type);
1079                                 phdr->p_offset = bswap_32(phdr->p_offset);
1080                                 phdr->p_filesz = bswap_32(phdr->p_filesz);
1081                         }
1082
1083                         if (phdr->p_type != PT_LOAD)
1084                                 continue;
1085
1086                         *foffset = phdr->p_offset;
1087                         *fstart = phdr->p_vaddr;
1088                         *fsize = phdr->p_filesz;
1089                         ret = 0;
1090                         break;
1091                 }
1092         } else {
1093                 Elf64_Ehdr ehdr;
1094                 Elf64_Phdr *phdr;
1095
1096                 if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
1097                         goto out;
1098
1099                 if (need_swap) {
1100                         ehdr.e_phoff = bswap_64(ehdr.e_phoff);
1101                         ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
1102                         ehdr.e_phnum = bswap_16(ehdr.e_phnum);
1103                 }
1104
1105                 buf_size = ehdr.e_phentsize * ehdr.e_phnum;
1106                 buf = malloc(buf_size);
1107                 if (buf == NULL)
1108                         goto out;
1109
1110                 fseek(fp, ehdr.e_phoff, SEEK_SET);
1111                 if (fread(buf, buf_size, 1, fp) != 1)
1112                         goto out_free;
1113
1114                 for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
1115
1116                         if (need_swap) {
1117                                 phdr->p_type = bswap_32(phdr->p_type);
1118                                 phdr->p_offset = bswap_64(phdr->p_offset);
1119                                 phdr->p_filesz = bswap_64(phdr->p_filesz);
1120                         }
1121
1122                         if (phdr->p_type != PT_LOAD)
1123                                 continue;
1124
1125                         *foffset = phdr->p_offset;
1126                         *fstart = phdr->p_vaddr;
1127                         *fsize = phdr->p_filesz;
1128                         ret = 0;
1129                         break;
1130                 }
1131         }
1132 out_free:
1133         free(buf);
1134 out:
1135         fclose(fp);
1136         return ret;
1137 }
1138
1139 static int cs_etm__process_event(struct perf_session *session,
1140                                 union perf_event *event,
1141                                 struct perf_sample *sample,
1142                                 struct perf_tool *tool)
1143 {
1144         struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1145                                                    struct cs_etm_auxtrace,
1146                                                    auxtrace);
1147
1148         u64 timestamp;
1149         int err = 0;
1150
1151         if (dump_trace) 
1152                 return 0;
1153
1154         if (!tool->ordered_events) {
1155                 pr_err("CoreSight ETM Trace requires ordered events\n");
1156                 return -EINVAL;
1157         }
1158
1159         if (sample->time && (sample->time != (u64)-1))
1160                 timestamp = sample->time;
1161         else
1162                 timestamp = 0;
1163
1164         if (timestamp || etm->timeless_decoding) {
1165                 err = cs_etm__update_queues(etm);
1166                 if (err)
1167                         return err;
1168
1169         }
1170
1171         if (event->header.type == PERF_RECORD_MMAP2) {
1172                 struct dso *dso;
1173                 int cpu;
1174                 struct cs_etm_queue *etmq;
1175
1176                 cpu = sample->cpu;
1177
1178                 etmq = cs_etm__cpu_to_etmq(etm,cpu);
1179
1180                 if (!etmq) {
1181                         return -1;
1182                 }
1183
1184                 dso = dsos__find(&(etm->machine->dsos),event->mmap2.filename,false);
1185                 if (NULL != dso) {
1186                         err = cs_etm_decoder__add_mem_access_cb(
1187                             etmq->decoder,
1188                             event->mmap2.start, 
1189                             event->mmap2.len, 
1190                             cs_etm__mem_access);
1191                 }
1192
1193                 if ((symbol_conf.vmlinux_name != NULL) && (!etmq->kernel_mapped)) {
1194                         uint64_t foffset;
1195                         uint64_t fstart;
1196                         uint64_t fsize;
1197
1198                         err = cs_etm__read_elf_info(symbol_conf.vmlinux_name,
1199                                                       &foffset,&fstart,&fsize);
1200
1201                         if (!err) {
1202                                 cs_etm_decoder__add_bin_file(
1203                                         etmq->decoder,
1204                                         foffset,
1205                                         fstart,
1206                                         fsize & ~0x1ULL,
1207                                         symbol_conf.vmlinux_name);
1208
1209                                 etmq->kernel_mapped = true;
1210                         }
1211                 }
1212
1213         }
1214
1215         if (etm->timeless_decoding) {
1216                 if (event->header.type == PERF_RECORD_EXIT) {
1217                         err = cs_etm__process_timeless_queues(etm,
1218                                                              event->fork.tid,
1219                                                              sample->time);
1220                 }
1221         } else if (timestamp) {
1222                 err = cs_etm__process_queues(etm, timestamp);
1223         }
1224
1225         //cs_etm__log("event %s (%u): cpu %d time%"PRIu64" tsc %#"PRIx64"\n",
1226                    //perf_event__name(event->header.type), event->header.type,
1227                    //sample->cpu, sample->time, timestamp);
1228         return err;
1229 }
1230
1231 static int cs_etm__process_auxtrace_event(struct perf_session *session,
1232                                   union perf_event *event,
1233                                   struct perf_tool *tool)
1234 {
1235         struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1236                                                    struct cs_etm_auxtrace,
1237                                                    auxtrace);
1238
1239         (void) tool;
1240
1241         if (!etm->data_queued) {
1242                 struct auxtrace_buffer *buffer;
1243                 off_t  data_offset;
1244                 int fd = perf_data_file__fd(session->file);
1245                 bool is_pipe = perf_data_file__is_pipe(session->file);
1246                 int err;
1247
1248                 if (is_pipe) {
1249                         data_offset = 0;
1250                 } else {
1251                         data_offset = lseek(fd, 0, SEEK_CUR);
1252                         if (data_offset == -1) {
1253                                 return -errno;
1254                         }
1255                 }
1256
1257                 err = auxtrace_queues__add_event(&etm->queues,
1258                                                  session,
1259                                                  event,
1260                                                  data_offset,
1261                                                  &buffer);
1262                 if (err)
1263                         return err;
1264
1265                 if (dump_trace)
1266                 {
1267                         if (auxtrace_buffer__get_data(buffer,fd)) {
1268                                 cs_etm__dump_event(etm,buffer);
1269                                 auxtrace_buffer__put_data(buffer);
1270                         }
1271                 }
1272         } 
1273
1274         return 0;
1275
1276 }
1277
1278 static const char * const cs_etm_global_header_fmts[] = {
1279   [CS_HEADER_VERSION_0]    = "   Header version                 %"PRIx64"\n",
1280   [CS_PMU_TYPE_CPUS]       = "   PMU type/num cpus              %"PRIx64"\n",
1281   [CS_ETM_SNAPSHOT]        = "   Snapshot                       %"PRIx64"\n",
1282 };
1283
1284 static const char * const cs_etm_priv_fmts[] = {
1285   [CS_ETM_MAGIC]           = "   Magic number                   %"PRIx64"\n",
1286   [CS_ETM_CPU]             = "   CPU                            %"PRIx64"\n",
1287   [CS_ETM_ETMCR]           = "   ETMCR                          %"PRIx64"\n",
1288   [CS_ETM_ETMTRACEIDR]     = "   ETMTRACEIDR                    %"PRIx64"\n",
1289   [CS_ETM_ETMCCER]         = "   ETMCCER                        %"PRIx64"\n",
1290   [CS_ETM_ETMIDR]          = "   ETMIDR                         %"PRIx64"\n",
1291 };
1292
1293 static const char * const cs_etmv4_priv_fmts[] = {
1294   [CS_ETM_MAGIC]           = "   Magic number                   %"PRIx64"\n",
1295   [CS_ETM_CPU]             = "   CPU                            %"PRIx64"\n",
1296   [CS_ETMV4_TRCCONFIGR]    = "   TRCCONFIGR                     %"PRIx64"\n",
1297   [CS_ETMV4_TRCTRACEIDR]   = "   TRCTRACEIDR                    %"PRIx64"\n",
1298   [CS_ETMV4_TRCIDR0]       = "   TRCIDR0                        %"PRIx64"\n",
1299   [CS_ETMV4_TRCIDR1]       = "   TRCIDR1                        %"PRIx64"\n",
1300   [CS_ETMV4_TRCIDR2]       = "   TRCIDR2                        %"PRIx64"\n",
1301   [CS_ETMV4_TRCIDR8]       = "   TRCIDR8                        %"PRIx64"\n",
1302   [CS_ETMV4_TRCAUTHSTATUS] = "   TRCAUTHSTATUS                  %"PRIx64"\n",
1303 };
1304
1305 static void cs_etm__print_auxtrace_info(u64 *val, size_t num)
1306 {
1307         unsigned i,j,cpu;
1308
1309         for (i = 0, cpu = 0; cpu < num; ++cpu) {
1310
1311                 if (val[i] == __perf_cs_etmv3_magic) {
1312                         for (j = 0; j < CS_ETM_PRIV_MAX; ++j, ++i) {
1313                                 fprintf(stdout,cs_etm_priv_fmts[j],val[i]);
1314                         }
1315                 } else if (val[i] == __perf_cs_etmv4_magic) {
1316                         for (j = 0; j < CS_ETMV4_PRIV_MAX; ++j, ++i) {
1317                                 fprintf(stdout,cs_etmv4_priv_fmts[j],val[i]);
1318                         }
1319                 } else {
1320                         // failure.. return
1321                         return;
1322                 }
1323         }
1324 }
1325
1326 int cs_etm__process_auxtrace_info(union perf_event *event,
1327                                  struct perf_session *session)
1328 {
1329         struct auxtrace_info_event *auxtrace_info = &(event->auxtrace_info);
1330         size_t event_header_size = sizeof(struct perf_event_header);
1331         size_t info_header_size = 8;
1332         size_t total_size = auxtrace_info->header.size;
1333         size_t priv_size = 0;
1334         size_t num_cpu;
1335         struct cs_etm_auxtrace *etm = 0;
1336         int err = 0;
1337         u64 *ptr;
1338         u64 *hdr = NULL;
1339         u64 **metadata = NULL;
1340         size_t i,j,k;
1341         unsigned pmu_type;
1342
1343         if (total_size < (event_header_size + info_header_size))
1344                 return -EINVAL;
1345
1346         priv_size = total_size - event_header_size - info_header_size;
1347
1348         // First the global part
1349
1350         ptr = (u64 *) auxtrace_info->priv;
1351         if (ptr[0] == 0) {
1352                 hdr = zalloc(sizeof(u64 *) * CS_HEADER_VERSION_0_MAX);
1353                 if (hdr == NULL) {
1354                         return -EINVAL;
1355                 }
1356                 for (i = 0; i < CS_HEADER_VERSION_0_MAX; ++i) {
1357                         hdr[i] = ptr[i];
1358                 }
1359                 num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
1360                 pmu_type = (unsigned) ((hdr[CS_PMU_TYPE_CPUS] >> 32) & 0xffffffff);
1361         } else {
1362                 return -EINVAL;
1363         }
1364
1365         metadata = zalloc(sizeof(u64 *) * num_cpu);
1366
1367         if (metadata == NULL) {
1368                 return -EINVAL;
1369         }
1370
1371         for (j = 0; j < num_cpu; ++j) {
1372                 if (ptr[i] == __perf_cs_etmv3_magic) {
1373                         metadata[j] = zalloc(sizeof(u64)*CS_ETM_PRIV_MAX);
1374                         if (metadata == NULL)
1375                                 return -EINVAL;
1376                         for (k = 0; k < CS_ETM_PRIV_MAX; k++) {
1377                                 metadata[j][k] = ptr[i+k];
1378                         }
1379                         i += CS_ETM_PRIV_MAX;
1380                 } else if (ptr[i] == __perf_cs_etmv4_magic) {
1381                         metadata[j] = zalloc(sizeof(u64)*CS_ETMV4_PRIV_MAX);
1382                         if (metadata == NULL)
1383                                 return -EINVAL;
1384                         for (k = 0; k < CS_ETMV4_PRIV_MAX; k++) {
1385                                 metadata[j][k] = ptr[i+k];
1386                         }
1387                         i += CS_ETMV4_PRIV_MAX;
1388                 }
1389         }
1390
1391         if (i*8 != priv_size)
1392                 return -EINVAL;
1393
1394         if (dump_trace)
1395                 cs_etm__print_auxtrace_info(auxtrace_info->priv,num_cpu);
1396
1397         etm = zalloc(sizeof(struct cs_etm_auxtrace));
1398
1399         etm->num_cpu = num_cpu;
1400         etm->pmu_type = pmu_type;
1401         etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
1402
1403         if (!etm)
1404                 return -ENOMEM;
1405
1406
1407         err = auxtrace_queues__init(&etm->queues);
1408         if (err)
1409                 goto err_free;
1410
1411         etm->unknown_thread = thread__new(999999999,999999999);
1412         if (etm->unknown_thread == NULL) {
1413                 err = -ENOMEM;
1414                 goto err_free_queues;
1415         }
1416         err = thread__set_comm(etm->unknown_thread, "unknown", 0);
1417         if (err) {
1418                 goto err_delete_thread;
1419         }
1420
1421         if (thread__init_map_groups(etm->unknown_thread,
1422                                     etm->machine)) {
1423                 err = -ENOMEM;
1424                 goto err_delete_thread;
1425         }
1426
1427         etm->timeless_decoding = true;
1428         etm->sampling_mode = false;
1429         etm->metadata = metadata;
1430         etm->session = session;
1431         etm->machine = &session->machines.host;
1432         etm->auxtrace_type = auxtrace_info->type;
1433
1434         etm->auxtrace.process_event = cs_etm__process_event;
1435         etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
1436         etm->auxtrace.flush_events = cs_etm__flush_events;
1437         etm->auxtrace.free_events  = cs_etm__free_events;
1438         etm->auxtrace.free         = cs_etm__free;
1439         session->auxtrace = &(etm->auxtrace);
1440
1441         if (dump_trace)
1442                 return 0;
1443
1444         if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
1445                 etm->synth_opts = *session->itrace_synth_opts;
1446         } else {
1447                 itrace_synth_opts__set_default(&etm->synth_opts);
1448         }
1449         etm->synth_opts.branches = false;
1450         etm->synth_opts.callchain = false;
1451         etm->synth_opts.calls = false;
1452         etm->synth_opts.returns = false;
1453
1454         err = cs_etm__synth_events(etm, session);
1455         if (err)
1456                 goto err_delete_thread;
1457
1458         err = auxtrace_queues__process_index(&etm->queues, session);
1459         if (err)
1460                 goto err_delete_thread;
1461
1462         etm->data_queued = etm->queues.populated;
1463
1464         return 0;
1465
1466 err_delete_thread:
1467         thread__delete(etm->unknown_thread);
1468 err_free_queues:
1469         auxtrace_queues__free(&etm->queues);
1470         session->auxtrace = NULL;
1471 err_free:
1472         free(etm);
1473         return err;
1474 }