perf/x86/intel: Allocate space for storing LBR stack
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / cpu / perf_event.h
index 949d0083a29e63df2f53cdd229406e92832bb557..69c26b396cf43fa5ec47f82b4867a0f8893e8955 100644 (file)
@@ -516,8 +516,19 @@ struct x86_pmu {
        struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
 };
 
+struct x86_perf_task_context {
+       u64 lbr_from[MAX_LBR_ENTRIES];
+       u64 lbr_to[MAX_LBR_ENTRIES];
+       int lbr_callstack_users;
+       int lbr_stack_state;
+};
+
 enum {
-       PERF_SAMPLE_BRANCH_SELECT_MAP_SIZE = PERF_SAMPLE_BRANCH_MAX_SHIFT,
+       PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = PERF_SAMPLE_BRANCH_MAX_SHIFT,
+       PERF_SAMPLE_BRANCH_SELECT_MAP_SIZE,
+
+       PERF_SAMPLE_BRANCH_CALL_STACK =
+                               1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
 };
 
 #define x86_add_quirk(func_)                                           \
@@ -551,6 +562,12 @@ static struct perf_pmu_events_attr event_attr_##v = {                      \
 
 extern struct x86_pmu x86_pmu __read_mostly;
 
+static inline bool x86_pmu_has_lbr_callstack(void)
+{
+       return  x86_pmu.lbr_sel_map &&
+               x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
+}
+
 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
 
 int x86_perf_event_set_period(struct perf_event *event);
@@ -754,6 +771,8 @@ void intel_pmu_lbr_init_atom(void);
 
 void intel_pmu_lbr_init_snb(void);
 
+void intel_pmu_lbr_init_hsw(void);
+
 int intel_pmu_setup_lbr_filter(struct perf_event *event);
 
 int p4_pmu_init(void);