4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
8 * Data type definitions, declarations, prototypes.
10 * Started by: Thomas Gleixner and Ingo Molnar
12 * For licencing details see kernel-base/COPYING
14 #ifndef _UAPI_LINUX_PERF_EVENT_H
15 #define _UAPI_LINUX_PERF_EVENT_H
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
22 * User-space ABI bits:
29 PERF_TYPE_HARDWARE = 0,
30 PERF_TYPE_SOFTWARE = 1,
31 PERF_TYPE_TRACEPOINT = 2,
32 PERF_TYPE_HW_CACHE = 3,
34 PERF_TYPE_BREAKPOINT = 5,
36 PERF_TYPE_MAX, /* non-ABI */
40 * Generalized performance event event_id types, used by the
41 * attr.event_id parameter of the sys_perf_event_open()
46 * Common hardware events, generalized by the kernel:
48 PERF_COUNT_HW_CPU_CYCLES = 0,
49 PERF_COUNT_HW_INSTRUCTIONS = 1,
50 PERF_COUNT_HW_CACHE_REFERENCES = 2,
51 PERF_COUNT_HW_CACHE_MISSES = 3,
52 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
53 PERF_COUNT_HW_BRANCH_MISSES = 5,
54 PERF_COUNT_HW_BUS_CYCLES = 6,
55 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
56 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
57 PERF_COUNT_HW_REF_CPU_CYCLES = 9,
59 PERF_COUNT_HW_MAX, /* non-ABI */
63 * Generalized hardware cache events:
65 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
66 * { read, write, prefetch } x
67 * { accesses, misses }
69 enum perf_hw_cache_id {
70 PERF_COUNT_HW_CACHE_L1D = 0,
71 PERF_COUNT_HW_CACHE_L1I = 1,
72 PERF_COUNT_HW_CACHE_LL = 2,
73 PERF_COUNT_HW_CACHE_DTLB = 3,
74 PERF_COUNT_HW_CACHE_ITLB = 4,
75 PERF_COUNT_HW_CACHE_BPU = 5,
76 PERF_COUNT_HW_CACHE_NODE = 6,
78 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
81 enum perf_hw_cache_op_id {
82 PERF_COUNT_HW_CACHE_OP_READ = 0,
83 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
84 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
86 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
89 enum perf_hw_cache_op_result_id {
90 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
91 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
93 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
97 * Special "software" events provided by the kernel, even if the hardware
98 * does not support performance events. These events measure various
99 * physical and sw events of the kernel (and allow the profiling of them as
103 PERF_COUNT_SW_CPU_CLOCK = 0,
104 PERF_COUNT_SW_TASK_CLOCK = 1,
105 PERF_COUNT_SW_PAGE_FAULTS = 2,
106 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
107 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
108 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
109 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
110 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
111 PERF_COUNT_SW_EMULATION_FAULTS = 8,
112 PERF_COUNT_SW_DUMMY = 9,
113 PERF_COUNT_SW_BPF_OUTPUT = 10,
115 PERF_COUNT_SW_MAX, /* non-ABI */
119 * Bits that can be set in attr.sample_type to request information
120 * in the overflow packets.
122 enum perf_event_sample_format {
123 PERF_SAMPLE_IP = 1U << 0,
124 PERF_SAMPLE_TID = 1U << 1,
125 PERF_SAMPLE_TIME = 1U << 2,
126 PERF_SAMPLE_ADDR = 1U << 3,
127 PERF_SAMPLE_READ = 1U << 4,
128 PERF_SAMPLE_CALLCHAIN = 1U << 5,
129 PERF_SAMPLE_ID = 1U << 6,
130 PERF_SAMPLE_CPU = 1U << 7,
131 PERF_SAMPLE_PERIOD = 1U << 8,
132 PERF_SAMPLE_STREAM_ID = 1U << 9,
133 PERF_SAMPLE_RAW = 1U << 10,
134 PERF_SAMPLE_BRANCH_STACK = 1U << 11,
135 PERF_SAMPLE_REGS_USER = 1U << 12,
136 PERF_SAMPLE_STACK_USER = 1U << 13,
137 PERF_SAMPLE_WEIGHT = 1U << 14,
138 PERF_SAMPLE_DATA_SRC = 1U << 15,
139 PERF_SAMPLE_IDENTIFIER = 1U << 16,
140 PERF_SAMPLE_TRANSACTION = 1U << 17,
141 PERF_SAMPLE_REGS_INTR = 1U << 18,
143 PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */
147 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
149 * If the user does not pass priv level information via branch_sample_type,
150 * the kernel uses the event's priv level. Branch and event priv levels do
151 * not have to match. Branch priv level is checked for permissions.
153 * The branch types can be combined, however BRANCH_ANY covers all types
154 * of branches and therefore it supersedes all the other types.
156 enum perf_branch_sample_type_shift {
157 PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
158 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
159 PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
161 PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
162 PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
163 PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
164 PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
165 PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
166 PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
167 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
168 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
170 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
171 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
172 PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
174 PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
177 enum perf_branch_sample_type {
178 PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
179 PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
180 PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
182 PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
183 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
184 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
185 PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
186 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
187 PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
188 PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
189 PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
191 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
192 PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
193 PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
195 PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
198 #define PERF_SAMPLE_BRANCH_PLM_ALL \
199 (PERF_SAMPLE_BRANCH_USER|\
200 PERF_SAMPLE_BRANCH_KERNEL|\
201 PERF_SAMPLE_BRANCH_HV)
204 * Values to determine ABI of the registers dump.
206 enum perf_sample_regs_abi {
207 PERF_SAMPLE_REGS_ABI_NONE = 0,
208 PERF_SAMPLE_REGS_ABI_32 = 1,
209 PERF_SAMPLE_REGS_ABI_64 = 2,
213 * Values for the memory transaction event qualifier, mostly for
214 * abort events. Multiple bits can be set.
217 PERF_TXN_ELISION = (1 << 0), /* From elision */
218 PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
219 PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
220 PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
221 PERF_TXN_RETRY = (1 << 4), /* Retry possible */
222 PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
223 PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
224 PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
226 PERF_TXN_MAX = (1 << 8), /* non-ABI */
228 /* bits 32..63 are reserved for the abort code */
230 PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
231 PERF_TXN_ABORT_SHIFT = 32,
235 * The format of the data returned by read() on a perf event fd,
236 * as specified by attr.read_format:
238 * struct read_format {
240 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
241 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
242 * { u64 id; } && PERF_FORMAT_ID
243 * } && !PERF_FORMAT_GROUP
246 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
247 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
249 * { u64 id; } && PERF_FORMAT_ID
251 * } && PERF_FORMAT_GROUP
254 enum perf_event_read_format {
255 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
256 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
257 PERF_FORMAT_ID = 1U << 2,
258 PERF_FORMAT_GROUP = 1U << 3,
260 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
263 #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
264 #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
265 #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
266 #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
267 /* add: sample_stack_user */
268 #define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
269 #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
272 * Hardware event_id to monitor via a performance monitoring event:
274 struct perf_event_attr {
277 * Major type: hardware/software/tracepoint/etc.
282 * Size of the attr structure, for fwd/bwd compat.
287 * Type specific configuration information.
299 __u64 disabled : 1, /* off by default */
300 inherit : 1, /* children inherit it */
301 pinned : 1, /* must always be on PMU */
302 exclusive : 1, /* only group on PMU */
303 exclude_user : 1, /* don't count user */
304 exclude_kernel : 1, /* ditto kernel */
305 exclude_hv : 1, /* ditto hypervisor */
306 exclude_idle : 1, /* don't count when idle */
307 mmap : 1, /* include mmap data */
308 comm : 1, /* include comm data */
309 freq : 1, /* use freq, not period */
310 inherit_stat : 1, /* per task counts */
311 enable_on_exec : 1, /* next exec enables */
312 task : 1, /* trace fork/exit */
313 watermark : 1, /* wakeup_watermark */
317 * 0 - SAMPLE_IP can have arbitrary skid
318 * 1 - SAMPLE_IP must have constant skid
319 * 2 - SAMPLE_IP requested to have 0 skid
320 * 3 - SAMPLE_IP must have 0 skid
322 * See also PERF_RECORD_MISC_EXACT_IP
324 precise_ip : 2, /* skid constraint */
325 mmap_data : 1, /* non-exec mmap data */
326 sample_id_all : 1, /* sample_type all events */
328 exclude_host : 1, /* don't count in host */
329 exclude_guest : 1, /* don't count in guest */
331 exclude_callchain_kernel : 1, /* exclude kernel callchains */
332 exclude_callchain_user : 1, /* exclude user callchains */
333 mmap2 : 1, /* include mmap with inode data */
334 comm_exec : 1, /* flag comm events that are due to an exec */
335 use_clockid : 1, /* use @clockid for time fields */
336 context_switch : 1, /* context switch data */
340 __u32 wakeup_events; /* wakeup every n events */
341 __u32 wakeup_watermark; /* bytes before wakeup */
347 __u64 config1; /* extension of config */
351 __u64 config2; /* extension of config1 */
353 __u64 branch_sample_type; /* enum perf_branch_sample_type */
356 * Defines set of user regs to dump on samples.
357 * See asm/perf_regs.h for details.
359 __u64 sample_regs_user;
362 * Defines size of the user stack to dump on samples.
364 __u32 sample_stack_user;
368 * Defines set of regs to dump for each sample
370 * - precise = 0: PMU interrupt
371 * - precise > 0: sampled instruction
373 * See asm/perf_regs.h for details.
375 __u64 sample_regs_intr;
378 * Wakeup watermark for AUX area
381 __u32 __reserved_2; /* align to __u64 */
384 #define perf_flags(attr) (*(&(attr)->read_format + 1))
387 * Ioctls that can be done on a perf event fd:
389 #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
390 #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
391 #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
392 #define PERF_EVENT_IOC_RESET _IO ('$', 3)
393 #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
394 #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
395 #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
396 #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
397 #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
399 enum perf_event_ioc_flags {
400 PERF_IOC_FLAG_GROUP = 1U << 0,
404 * Structure of the page that can be mapped via mmap
406 struct perf_event_mmap_page {
407 __u32 version; /* version number of this structure */
408 __u32 compat_version; /* lowest version this is compat with */
411 * Bits needed to read the hw events in user-space.
413 * u32 seq, time_mult, time_shift, index, width;
414 * u64 count, enabled, running;
415 * u64 cyc, time_offset;
422 * enabled = pc->time_enabled;
423 * running = pc->time_running;
425 * if (pc->cap_usr_time && enabled != running) {
427 * time_offset = pc->time_offset;
428 * time_mult = pc->time_mult;
429 * time_shift = pc->time_shift;
433 * count = pc->offset;
434 * if (pc->cap_user_rdpmc && index) {
435 * width = pc->pmc_width;
436 * pmc = rdpmc(index - 1);
440 * } while (pc->lock != seq);
442 * NOTE: for obvious reason this only works on self-monitoring
445 __u32 lock; /* seqlock for synchronization */
446 __u32 index; /* hardware event identifier */
447 __s64 offset; /* add to hardware event value */
448 __u64 time_enabled; /* time event active */
449 __u64 time_running; /* time event on cpu */
453 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
454 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
456 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
457 cap_user_time : 1, /* The time_* fields are used */
458 cap_user_time_zero : 1, /* The time_zero field is used */
464 * If cap_user_rdpmc this field provides the bit-width of the value
465 * read using the rdpmc() or equivalent instruction. This can be used
466 * to sign extend the result like:
468 * pmc <<= 64 - width;
469 * pmc >>= 64 - width; // signed shift right
475 * If cap_usr_time the below fields can be used to compute the time
476 * delta since time_enabled (in ns) using rdtsc or similar.
481 * quot = (cyc >> time_shift);
482 * rem = cyc & (((u64)1 << time_shift) - 1);
483 * delta = time_offset + quot * time_mult +
484 * ((rem * time_mult) >> time_shift);
486 * Where time_offset,time_mult,time_shift and cyc are read in the
487 * seqcount loop described above. This delta can then be added to
488 * enabled and possible running (if index), improving the scaling:
494 * quot = count / running;
495 * rem = count % running;
496 * count = quot * enabled + (rem * enabled) / running;
502 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
503 * from sample timestamps.
505 * time = timestamp - time_zero;
506 * quot = time / time_mult;
507 * rem = time % time_mult;
508 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
512 * quot = cyc >> time_shift;
513 * rem = cyc & (((u64)1 << time_shift) - 1);
514 * timestamp = time_zero + quot * time_mult +
515 * ((rem * time_mult) >> time_shift);
518 __u32 size; /* Header size up to __reserved[] fields. */
521 * Hole for extension of the self monitor capabilities
524 __u8 __reserved[118*8+4]; /* align to 1k. */
527 * Control data for the mmap() data buffer.
529 * User-space reading the @data_head value should issue an smp_rmb(),
530 * after reading this value.
532 * When the mapping is PROT_WRITE the @data_tail value should be
533 * written by userspace to reflect the last read data, after issueing
534 * an smp_mb() to separate the data read from the ->data_tail store.
535 * In this case the kernel will not over-write unread data.
537 * See perf_output_put_handle() for the data ordering.
539 * data_{offset,size} indicate the location and size of the perf record
540 * buffer within the mmapped area.
542 __u64 data_head; /* head in the data section */
543 __u64 data_tail; /* user-space written tail */
544 __u64 data_offset; /* where the buffer starts */
545 __u64 data_size; /* data buffer size */
548 * AUX area is defined by aux_{offset,size} fields that should be set
549 * by the userspace, so that
551 * aux_offset >= data_offset + data_size
553 * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
555 * Ring buffer pointers aux_{head,tail} have the same semantics as
556 * data_{head,tail} and same ordering rules apply.
564 #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
565 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
566 #define PERF_RECORD_MISC_KERNEL (1 << 0)
567 #define PERF_RECORD_MISC_USER (2 << 0)
568 #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
569 #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
570 #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
573 * Indicates that /proc/PID/maps parsing are truncated by time out.
575 #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
577 * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
578 * different events so can reuse the same bit position.
579 * Ditto PERF_RECORD_MISC_SWITCH_OUT.
581 #define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
582 #define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
583 #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
585 * Indicates that the content of PERF_SAMPLE_IP points to
586 * the actual instruction that triggered the event. See also
587 * perf_event_attr::precise_ip.
589 #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
591 * Reserve the last bit to indicate some extended misc field
593 #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
595 struct perf_event_header {
601 enum perf_event_type {
604 * If perf_event_attr.sample_id_all is set then all event types will
605 * have the sample_type selected fields related to where/when
606 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
607 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
608 * just after the perf_event_header and the fields already present for
609 * the existing fields, i.e. at the end of the payload. That way a newer
610 * perf.data file will be supported by older perf tools, with these new
611 * optional fields being ignored.
614 * { u32 pid, tid; } && PERF_SAMPLE_TID
615 * { u64 time; } && PERF_SAMPLE_TIME
616 * { u64 id; } && PERF_SAMPLE_ID
617 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
618 * { u32 cpu, res; } && PERF_SAMPLE_CPU
619 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
620 * } && perf_event_attr::sample_id_all
622 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
623 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
624 * relative to header.size.
628 * The MMAP events record the PROT_EXEC mappings so that we can
629 * correlate userspace IPs to code. They have the following structure:
632 * struct perf_event_header header;
639 * struct sample_id sample_id;
642 PERF_RECORD_MMAP = 1,
646 * struct perf_event_header header;
649 * struct sample_id sample_id;
652 PERF_RECORD_LOST = 2,
656 * struct perf_event_header header;
660 * struct sample_id sample_id;
663 PERF_RECORD_COMM = 3,
667 * struct perf_event_header header;
671 * struct sample_id sample_id;
674 PERF_RECORD_EXIT = 4,
678 * struct perf_event_header header;
682 * struct sample_id sample_id;
685 PERF_RECORD_THROTTLE = 5,
686 PERF_RECORD_UNTHROTTLE = 6,
690 * struct perf_event_header header;
694 * struct sample_id sample_id;
697 PERF_RECORD_FORK = 7,
701 * struct perf_event_header header;
704 * struct read_format values;
705 * struct sample_id sample_id;
708 PERF_RECORD_READ = 8,
712 * struct perf_event_header header;
715 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
716 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
717 * # is fixed relative to header.
720 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
721 * { u64 ip; } && PERF_SAMPLE_IP
722 * { u32 pid, tid; } && PERF_SAMPLE_TID
723 * { u64 time; } && PERF_SAMPLE_TIME
724 * { u64 addr; } && PERF_SAMPLE_ADDR
725 * { u64 id; } && PERF_SAMPLE_ID
726 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
727 * { u32 cpu, res; } && PERF_SAMPLE_CPU
728 * { u64 period; } && PERF_SAMPLE_PERIOD
730 * { struct read_format values; } && PERF_SAMPLE_READ
733 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
736 * # The RAW record below is opaque data wrt the ABI
738 * # That is, the ABI doesn't make any promises wrt to
739 * # the stability of its content, it may vary depending
740 * # on event, hardware, kernel version and phase of
743 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
747 * char data[size];}&& PERF_SAMPLE_RAW
750 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
752 * { u64 abi; # enum perf_sample_regs_abi
753 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
757 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
759 * { u64 weight; } && PERF_SAMPLE_WEIGHT
760 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
761 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
762 * { u64 abi; # enum perf_sample_regs_abi
763 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
766 PERF_RECORD_SAMPLE = 9,
769 * The MMAP2 records are an augmented version of MMAP, they add
770 * maj, min, ino numbers to be used to uniquely identify each mapping
773 * struct perf_event_header header;
782 * u64 ino_generation;
785 * struct sample_id sample_id;
788 PERF_RECORD_MMAP2 = 10,
791 * Records that new data landed in the AUX buffer part.
794 * struct perf_event_header header;
799 * struct sample_id sample_id;
802 PERF_RECORD_AUX = 11,
805 * Indicates that instruction trace has started
808 * struct perf_event_header header;
813 PERF_RECORD_ITRACE_START = 12,
816 * Records the dropped/lost sample number.
819 * struct perf_event_header header;
822 * struct sample_id sample_id;
825 PERF_RECORD_LOST_SAMPLES = 13,
828 * Records a context switch in or out (flagged by
829 * PERF_RECORD_MISC_SWITCH_OUT). See also
830 * PERF_RECORD_SWITCH_CPU_WIDE.
833 * struct perf_event_header header;
834 * struct sample_id sample_id;
837 PERF_RECORD_SWITCH = 14,
840 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
841 * next_prev_tid that are the next (switching out) or previous
842 * (switching in) pid/tid.
845 * struct perf_event_header header;
848 * struct sample_id sample_id;
851 PERF_RECORD_SWITCH_CPU_WIDE = 15,
853 PERF_RECORD_MAX, /* non-ABI */
856 #define PERF_MAX_STACK_DEPTH 127
858 enum perf_callchain_context {
859 PERF_CONTEXT_HV = (__u64)-32,
860 PERF_CONTEXT_KERNEL = (__u64)-128,
861 PERF_CONTEXT_USER = (__u64)-512,
863 PERF_CONTEXT_GUEST = (__u64)-2048,
864 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
865 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
867 PERF_CONTEXT_MAX = (__u64)-4095,
871 * PERF_RECORD_AUX::flags bits
873 #define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
874 #define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
876 #define PERF_FLAG_FD_NO_GROUP (1UL << 0)
877 #define PERF_FLAG_FD_OUTPUT (1UL << 1)
878 #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
879 #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
881 union perf_mem_data_src {
884 __u64 mem_op:5, /* type of opcode */
885 mem_lvl:14, /* memory hierarchy level */
886 mem_snoop:5, /* snoop mode */
887 mem_lock:2, /* lock instr */
888 mem_dtlb:7, /* tlb access */
893 /* type of opcode (load/store/prefetch,code) */
894 #define PERF_MEM_OP_NA 0x01 /* not available */
895 #define PERF_MEM_OP_LOAD 0x02 /* load instruction */
896 #define PERF_MEM_OP_STORE 0x04 /* store instruction */
897 #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
898 #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
899 #define PERF_MEM_OP_SHIFT 0
901 /* memory hierarchy (memory level, hit or miss) */
902 #define PERF_MEM_LVL_NA 0x01 /* not available */
903 #define PERF_MEM_LVL_HIT 0x02 /* hit level */
904 #define PERF_MEM_LVL_MISS 0x04 /* miss level */
905 #define PERF_MEM_LVL_L1 0x08 /* L1 */
906 #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
907 #define PERF_MEM_LVL_L2 0x20 /* L2 */
908 #define PERF_MEM_LVL_L3 0x40 /* L3 */
909 #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
910 #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
911 #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
912 #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
913 #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
914 #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
915 #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
916 #define PERF_MEM_LVL_SHIFT 5
919 #define PERF_MEM_SNOOP_NA 0x01 /* not available */
920 #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
921 #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
922 #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
923 #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
924 #define PERF_MEM_SNOOP_SHIFT 19
926 /* locked instruction */
927 #define PERF_MEM_LOCK_NA 0x01 /* not available */
928 #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
929 #define PERF_MEM_LOCK_SHIFT 24
932 #define PERF_MEM_TLB_NA 0x01 /* not available */
933 #define PERF_MEM_TLB_HIT 0x02 /* hit level */
934 #define PERF_MEM_TLB_MISS 0x04 /* miss level */
935 #define PERF_MEM_TLB_L1 0x08 /* L1 */
936 #define PERF_MEM_TLB_L2 0x10 /* L2 */
937 #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
938 #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
939 #define PERF_MEM_TLB_SHIFT 26
941 #define PERF_MEM_S(a, s) \
942 (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
945 * single taken branch record layout:
947 * from: source instruction (may not always be a branch insn)
949 * mispred: branch target was mispredicted
950 * predicted: branch target was predicted
952 * support for mispred, predicted is optional. In case it
953 * is not supported mispred = predicted = 0.
955 * in_tx: running in a hardware transaction
956 * abort: aborting a hardware transaction
957 * cycles: cycles from last branch (or 0 if not supported)
959 struct perf_branch_entry {
962 __u64 mispred:1, /* target mispredicted */
963 predicted:1,/* target predicted */
964 in_tx:1, /* in transaction */
965 abort:1, /* transaction abort */
966 cycles:16, /* cycle count to last branch */
970 #endif /* _UAPI_LINUX_PERF_EVENT_H */