2 * linux/arch/arm/include/asm/pmu.h
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
15 #include <linux/interrupt.h>
16 #include <linux/perf_event.h>
19 * struct arm_pmu_platdata - ARM PMU platform data
21 * @handle_irq: an optional handler which will be called from the
22 * interrupt and passed the address of the low level handler,
23 * and can be used to implement any platform specific handling
24 * before or after calling it.
25 * @runtime_resume: an optional handler which will be called by the
26 * runtime PM framework following a call to pm_runtime_get().
27 * Note that if pm_runtime_get() is called more than once in
28 * succession this handler will only be called once.
29 * @runtime_suspend: an optional handler which will be called by the
30 * runtime PM framework following a call to pm_runtime_put().
31 * Note that if pm_runtime_get() is called more than once in
32 * succession this handler will only be called following the
33 * final call to pm_runtime_put() that actually disables the
36 struct arm_pmu_platdata {
37 irqreturn_t (*handle_irq)(int irq, void *dev,
38 irq_handler_t pmu_handler);
39 int (*runtime_resume)(struct device *dev);
40 int (*runtime_suspend)(struct device *dev);
43 #ifdef CONFIG_HW_PERF_EVENTS
46 * The ARMv7 CPU PMU supports up to 32 event counters.
48 #define ARMPMU_MAX_HWEVENTS 32
50 #define HW_OP_UNSUPPORTED 0xFFFF
51 #define C(_x) PERF_COUNT_HW_CACHE_##_x
52 #define CACHE_OP_UNSUPPORTED 0xFFFF
54 #define PERF_MAP_ALL_UNSUPPORTED \
55 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
57 #define PERF_CACHE_MAP_ALL_UNSUPPORTED \
58 [0 ... C(MAX) - 1] = { \
59 [0 ... C(OP_MAX) - 1] = { \
60 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
64 /* The events for a given PMU register set. */
65 struct pmu_hw_events {
67 * The events that are active on the PMU for the given index.
69 struct perf_event **events;
72 * A 1 bit for an index indicates that the counter is being used for
73 * an event. A 0 means that the counter can be used.
75 unsigned long *used_mask;
78 * Hardware lock to serialize accesses to PMU registers. Needed for the
79 * read/modify/write sequences.
81 raw_spinlock_t pmu_lock;
86 cpumask_t active_irqs;
88 irqreturn_t (*handle_irq)(int irq_num, void *dev);
89 void (*enable)(struct perf_event *event);
90 void (*disable)(struct perf_event *event);
91 int (*get_event_idx)(struct pmu_hw_events *hw_events,
92 struct perf_event *event);
93 void (*clear_event_idx)(struct pmu_hw_events *hw_events,
94 struct perf_event *event);
95 int (*set_event_filter)(struct hw_perf_event *evt,
96 struct perf_event_attr *attr);
97 u32 (*read_counter)(struct perf_event *event);
98 void (*write_counter)(struct perf_event *event, u32 val);
99 void (*start)(struct arm_pmu *);
100 void (*stop)(struct arm_pmu *);
101 void (*reset)(void *);
102 int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
103 void (*free_irq)(struct arm_pmu *);
104 int (*map_event)(struct perf_event *event);
106 atomic_t active_events;
107 struct mutex reserve_mutex;
109 struct platform_device *plat_device;
110 struct pmu_hw_events *(*get_hw_events)(void);
113 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
115 extern const struct dev_pm_ops armpmu_dev_pm_ops;
117 int armpmu_register(struct arm_pmu *armpmu, int type);
119 u64 armpmu_event_update(struct perf_event *event);
121 int armpmu_event_set_period(struct perf_event *event);
123 int armpmu_map_event(struct perf_event *event,
124 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
125 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
126 [PERF_COUNT_HW_CACHE_OP_MAX]
127 [PERF_COUNT_HW_CACHE_RESULT_MAX],
130 #endif /* CONFIG_HW_PERF_EVENTS */
132 #endif /* __ARM_PMU_H__ */