perf/x86/intel: Allocate space for storing LBR stack
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / cpu / perf_event.h
index 4e6cdb0ddc70c86472649dc6e1beee3af93ddcae..69c26b396cf43fa5ec47f82b4867a0f8893e8955 100644 (file)
@@ -71,6 +71,8 @@ struct event_constraint {
 #define PERF_X86_EVENT_COMMITTED       0x8 /* event passed commit_txn */
 #define PERF_X86_EVENT_PEBS_LD_HSW     0x10 /* haswell style datala, load */
 #define PERF_X86_EVENT_PEBS_NA_HSW     0x20 /* haswell style datala, unknown */
+#define PERF_X86_EVENT_RDPMC_ALLOWED   0x40 /* grant rdpmc permission */
+
 
 struct amd_nb {
        int nb_id;  /* NorthBridge id */
@@ -470,7 +472,8 @@ struct x86_pmu {
        void            (*cpu_dead)(int cpu);
 
        void            (*check_microcode)(void);
-       void            (*flush_branch_stack)(void);
+       void            (*sched_task)(struct perf_event_context *ctx,
+                                     bool sched_in);
 
        /*
         * Intel Arch Perfmon v2+
@@ -513,6 +516,21 @@ struct x86_pmu {
        struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
 };
 
+struct x86_perf_task_context {
+       u64 lbr_from[MAX_LBR_ENTRIES];
+       u64 lbr_to[MAX_LBR_ENTRIES];
+       int lbr_callstack_users;
+       int lbr_stack_state;
+};
+
+enum {
+       PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = PERF_SAMPLE_BRANCH_MAX_SHIFT,
+       PERF_SAMPLE_BRANCH_SELECT_MAP_SIZE,
+
+       PERF_SAMPLE_BRANCH_CALL_STACK =
+                               1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
+};
+
 #define x86_add_quirk(func_)                                           \
 do {                                                                   \
        static struct x86_pmu_quirk __quirk __initdata = {              \
@@ -544,6 +562,12 @@ static struct perf_pmu_events_attr event_attr_##v = {                      \
 
 extern struct x86_pmu x86_pmu __read_mostly;
 
+static inline bool x86_pmu_has_lbr_callstack(void)
+{
+       return  x86_pmu.lbr_sel_map &&
+               x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
+}
+
 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
 
 int x86_perf_event_set_period(struct perf_event *event);
@@ -725,6 +749,8 @@ void intel_pmu_pebs_disable_all(void);
 
 void intel_ds_init(void);
 
+void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
+
 void intel_pmu_lbr_reset(void);
 
 void intel_pmu_lbr_enable(struct perf_event *event);
@@ -745,6 +771,8 @@ void intel_pmu_lbr_init_atom(void);
 
 void intel_pmu_lbr_init_snb(void);
 
+void intel_pmu_lbr_init_hsw(void);
+
 int intel_pmu_setup_lbr_filter(struct perf_event *event);
 
 int p4_pmu_init(void);