Merge remote-tracking branches 'asoc/topic/sh', 'asoc/topic/sigmadsp', 'asoc/topic...
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore_snbep.c
1 /* SandyBridge-EP/IvyTown uncore support */
2 #include "perf_event_intel_uncore.h"
3
4
5 /* SNB-EP Box level control */
6 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
7 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
8 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
9 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
10 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
11                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
12                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
13 /* SNB-EP event control */
14 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
15 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
16 #define SNBEP_PMON_CTL_RST              (1 << 17)
17 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
18 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
19 #define SNBEP_PMON_CTL_EN               (1 << 22)
20 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
21 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
22 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
23                                          SNBEP_PMON_CTL_UMASK_MASK | \
24                                          SNBEP_PMON_CTL_EDGE_DET | \
25                                          SNBEP_PMON_CTL_INVERT | \
26                                          SNBEP_PMON_CTL_TRESH_MASK)
27
28 /* SNB-EP Ubox event control */
29 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
30 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
31                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
32                                  SNBEP_PMON_CTL_UMASK_MASK | \
33                                  SNBEP_PMON_CTL_EDGE_DET | \
34                                  SNBEP_PMON_CTL_INVERT | \
35                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
36
37 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
38 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
39                                                  SNBEP_CBO_PMON_CTL_TID_EN)
40
41 /* SNB-EP PCU event control */
42 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
43 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
44 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
45 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
46 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
47                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
48                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
49                                  SNBEP_PMON_CTL_EDGE_DET | \
50                                  SNBEP_PMON_CTL_EV_SEL_EXT | \
51                                  SNBEP_PMON_CTL_INVERT | \
52                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
53                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
54                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
55
56 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
57                                 (SNBEP_PMON_RAW_EVENT_MASK | \
58                                  SNBEP_PMON_CTL_EV_SEL_EXT)
59
60 /* SNB-EP pci control register */
61 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
62 #define SNBEP_PCI_PMON_CTL0                     0xd8
63 /* SNB-EP pci counter register */
64 #define SNBEP_PCI_PMON_CTR0                     0xa0
65
66 /* SNB-EP home agent register */
67 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
68 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
69 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
70 /* SNB-EP memory controller register */
71 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
72 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
73 /* SNB-EP QPI register */
74 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
75 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
76 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
78
79 /* SNB-EP Ubox register */
80 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
81 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
82
83 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
84 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
85
86 /* SNB-EP Cbo register */
87 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
88 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
89 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
90 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
91 #define SNBEP_CBO_MSR_OFFSET                    0x20
92
93 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
94 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
95 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
97
98 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
99         .event = (e),                           \
100         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
101         .config_mask = (m),                     \
102         .idx = (i)                              \
103 }
104
105 /* SNB-EP PCU register */
106 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
107 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
108 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
109 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
110 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
111 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
112 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
113
114 /* IVBEP event control */
115 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
116                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
117 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
118                                          SNBEP_PMON_CTL_UMASK_MASK | \
119                                          SNBEP_PMON_CTL_EDGE_DET | \
120                                          SNBEP_PMON_CTL_TRESH_MASK)
121 /* IVBEP Ubox */
122 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
123 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
124 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
125
126 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
127                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
128                                  SNBEP_PMON_CTL_UMASK_MASK | \
129                                  SNBEP_PMON_CTL_EDGE_DET | \
130                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
131 /* IVBEP Cbo */
132 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
133                                                  SNBEP_CBO_PMON_CTL_TID_EN)
134
135 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
136 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
137 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
143
144 /* IVBEP home agent */
145 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
146 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
147                                 (IVBEP_PMON_RAW_EVENT_MASK | \
148                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
149 /* IVBEP PCU */
150 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
151                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
152                                  SNBEP_PMON_CTL_EV_SEL_EXT | \
153                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
154                                  SNBEP_PMON_CTL_EDGE_DET | \
155                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
156                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
157                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
158 /* IVBEP QPI */
159 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
160                                 (IVBEP_PMON_RAW_EVENT_MASK | \
161                                  SNBEP_PMON_CTL_EV_SEL_EXT)
162
163 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
164                                 ((1ULL << (n)) - 1)))
165
166 /* Haswell-EP Ubox */
167 #define HSWEP_U_MSR_PMON_CTR0                   0x705
168 #define HSWEP_U_MSR_PMON_CTL0                   0x709
169 #define HSWEP_U_MSR_PMON_FILTER                 0x707
170
171 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
172 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
173
174 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
175 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
177                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
178                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
179
180 /* Haswell-EP CBo */
181 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
182 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
183 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
184 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
185 #define HSWEP_CBO_MSR_OFFSET                    0x10
186
187
188 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
189 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
196
197
198 /* Haswell-EP Sbox */
199 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
200 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
201 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
202 #define HSWEP_SBOX_MSR_OFFSET                   0xa
203 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
204                                                  SNBEP_CBO_PMON_CTL_TID_EN)
205
206 /* Haswell-EP PCU */
207 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
208 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
209 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
210 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
211
212
213 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
214 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
215 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
216 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
217 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
218 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
219 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
220 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
221 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
222 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
223 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
224 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
225 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
226 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
227 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
228 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
229 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
230 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
231 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
232 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
233 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
234 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
235 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
236 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
237 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
238 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
239 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
240 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
241 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
242 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
243 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
244 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
245 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
246 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
247 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
248 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
249 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
250 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
251 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
252 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
253 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
254 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
255 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
256 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
257 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
258 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
259 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
260 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
261 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
262
263 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
264 {
265         struct pci_dev *pdev = box->pci_dev;
266         int box_ctl = uncore_pci_box_ctl(box);
267         u32 config = 0;
268
269         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
270                 config |= SNBEP_PMON_BOX_CTL_FRZ;
271                 pci_write_config_dword(pdev, box_ctl, config);
272         }
273 }
274
275 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
276 {
277         struct pci_dev *pdev = box->pci_dev;
278         int box_ctl = uncore_pci_box_ctl(box);
279         u32 config = 0;
280
281         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
282                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
283                 pci_write_config_dword(pdev, box_ctl, config);
284         }
285 }
286
287 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
288 {
289         struct pci_dev *pdev = box->pci_dev;
290         struct hw_perf_event *hwc = &event->hw;
291
292         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
293 }
294
295 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
296 {
297         struct pci_dev *pdev = box->pci_dev;
298         struct hw_perf_event *hwc = &event->hw;
299
300         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
301 }
302
303 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
304 {
305         struct pci_dev *pdev = box->pci_dev;
306         struct hw_perf_event *hwc = &event->hw;
307         u64 count = 0;
308
309         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
310         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
311
312         return count;
313 }
314
315 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
316 {
317         struct pci_dev *pdev = box->pci_dev;
318
319         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
320 }
321
322 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
323 {
324         u64 config;
325         unsigned msr;
326
327         msr = uncore_msr_box_ctl(box);
328         if (msr) {
329                 rdmsrl(msr, config);
330                 config |= SNBEP_PMON_BOX_CTL_FRZ;
331                 wrmsrl(msr, config);
332         }
333 }
334
335 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
336 {
337         u64 config;
338         unsigned msr;
339
340         msr = uncore_msr_box_ctl(box);
341         if (msr) {
342                 rdmsrl(msr, config);
343                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
344                 wrmsrl(msr, config);
345         }
346 }
347
348 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
349 {
350         struct hw_perf_event *hwc = &event->hw;
351         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
352
353         if (reg1->idx != EXTRA_REG_NONE)
354                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
355
356         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
357 }
358
359 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
360                                         struct perf_event *event)
361 {
362         struct hw_perf_event *hwc = &event->hw;
363
364         wrmsrl(hwc->config_base, hwc->config);
365 }
366
367 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
368 {
369         unsigned msr = uncore_msr_box_ctl(box);
370
371         if (msr)
372                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
373 }
374
375 static struct attribute *snbep_uncore_formats_attr[] = {
376         &format_attr_event.attr,
377         &format_attr_umask.attr,
378         &format_attr_edge.attr,
379         &format_attr_inv.attr,
380         &format_attr_thresh8.attr,
381         NULL,
382 };
383
384 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
385         &format_attr_event.attr,
386         &format_attr_umask.attr,
387         &format_attr_edge.attr,
388         &format_attr_inv.attr,
389         &format_attr_thresh5.attr,
390         NULL,
391 };
392
393 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
394         &format_attr_event.attr,
395         &format_attr_umask.attr,
396         &format_attr_edge.attr,
397         &format_attr_tid_en.attr,
398         &format_attr_inv.attr,
399         &format_attr_thresh8.attr,
400         &format_attr_filter_tid.attr,
401         &format_attr_filter_nid.attr,
402         &format_attr_filter_state.attr,
403         &format_attr_filter_opc.attr,
404         NULL,
405 };
406
407 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
408         &format_attr_event_ext.attr,
409         &format_attr_occ_sel.attr,
410         &format_attr_edge.attr,
411         &format_attr_inv.attr,
412         &format_attr_thresh5.attr,
413         &format_attr_occ_invert.attr,
414         &format_attr_occ_edge.attr,
415         &format_attr_filter_band0.attr,
416         &format_attr_filter_band1.attr,
417         &format_attr_filter_band2.attr,
418         &format_attr_filter_band3.attr,
419         NULL,
420 };
421
422 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
423         &format_attr_event_ext.attr,
424         &format_attr_umask.attr,
425         &format_attr_edge.attr,
426         &format_attr_inv.attr,
427         &format_attr_thresh8.attr,
428         &format_attr_match_rds.attr,
429         &format_attr_match_rnid30.attr,
430         &format_attr_match_rnid4.attr,
431         &format_attr_match_dnid.attr,
432         &format_attr_match_mc.attr,
433         &format_attr_match_opc.attr,
434         &format_attr_match_vnw.attr,
435         &format_attr_match0.attr,
436         &format_attr_match1.attr,
437         &format_attr_mask_rds.attr,
438         &format_attr_mask_rnid30.attr,
439         &format_attr_mask_rnid4.attr,
440         &format_attr_mask_dnid.attr,
441         &format_attr_mask_mc.attr,
442         &format_attr_mask_opc.attr,
443         &format_attr_mask_vnw.attr,
444         &format_attr_mask0.attr,
445         &format_attr_mask1.attr,
446         NULL,
447 };
448
449 static struct uncore_event_desc snbep_uncore_imc_events[] = {
450         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
451         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
452         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
453         { /* end: all zeroes */ },
454 };
455
456 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
457         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
458         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
459         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
460         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
461         { /* end: all zeroes */ },
462 };
463
464 static struct attribute_group snbep_uncore_format_group = {
465         .name = "format",
466         .attrs = snbep_uncore_formats_attr,
467 };
468
469 static struct attribute_group snbep_uncore_ubox_format_group = {
470         .name = "format",
471         .attrs = snbep_uncore_ubox_formats_attr,
472 };
473
474 static struct attribute_group snbep_uncore_cbox_format_group = {
475         .name = "format",
476         .attrs = snbep_uncore_cbox_formats_attr,
477 };
478
479 static struct attribute_group snbep_uncore_pcu_format_group = {
480         .name = "format",
481         .attrs = snbep_uncore_pcu_formats_attr,
482 };
483
484 static struct attribute_group snbep_uncore_qpi_format_group = {
485         .name = "format",
486         .attrs = snbep_uncore_qpi_formats_attr,
487 };
488
489 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
490         .disable_box    = snbep_uncore_msr_disable_box,         \
491         .enable_box     = snbep_uncore_msr_enable_box,          \
492         .disable_event  = snbep_uncore_msr_disable_event,       \
493         .enable_event   = snbep_uncore_msr_enable_event,        \
494         .read_counter   = uncore_msr_read_counter
495
496 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
497         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
498         .init_box       = snbep_uncore_msr_init_box             \
499
500 static struct intel_uncore_ops snbep_uncore_msr_ops = {
501         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
502 };
503
504 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
505         .init_box       = snbep_uncore_pci_init_box,            \
506         .disable_box    = snbep_uncore_pci_disable_box,         \
507         .enable_box     = snbep_uncore_pci_enable_box,          \
508         .disable_event  = snbep_uncore_pci_disable_event,       \
509         .read_counter   = snbep_uncore_pci_read_counter
510
511 static struct intel_uncore_ops snbep_uncore_pci_ops = {
512         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
513         .enable_event   = snbep_uncore_pci_enable_event,        \
514 };
515
516 static struct event_constraint snbep_uncore_cbox_constraints[] = {
517         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
518         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
519         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
520         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
521         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
522         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
523         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
524         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
525         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
526         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
527         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
528         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
529         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
530         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
531         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
532         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
533         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
534         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
535         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
536         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
537         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
538         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
539         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
540         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
541         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
542         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
543         EVENT_CONSTRAINT_END
544 };
545
546 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
547         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
548         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
549         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
550         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
551         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
552         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
553         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
554         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
555         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
556         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
557         EVENT_CONSTRAINT_END
558 };
559
560 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
561         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
562         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
563         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
564         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
565         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
566         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
567         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
568         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
569         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
570         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
571         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
572         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
573         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
574         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
575         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
576         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
577         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
578         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
579         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
580         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
581         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
582         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
583         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
584         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
585         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
586         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
587         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
588         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
589         EVENT_CONSTRAINT_END
590 };
591
592 static struct intel_uncore_type snbep_uncore_ubox = {
593         .name           = "ubox",
594         .num_counters   = 2,
595         .num_boxes      = 1,
596         .perf_ctr_bits  = 44,
597         .fixed_ctr_bits = 48,
598         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
599         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
600         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
601         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
602         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
603         .ops            = &snbep_uncore_msr_ops,
604         .format_group   = &snbep_uncore_ubox_format_group,
605 };
606
607 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
608         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
609                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
610         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
611         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
612         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
613         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
614         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
615         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
616         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
617         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
618         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
619         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
620         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
621         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
622         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
623         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
624         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
625         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
626         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
627         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
628         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
629         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
630         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
631         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
632         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
633         EVENT_EXTRA_END
634 };
635
636 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
637 {
638         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
639         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
640         int i;
641
642         if (uncore_box_is_fake(box))
643                 return;
644
645         for (i = 0; i < 5; i++) {
646                 if (reg1->alloc & (0x1 << i))
647                         atomic_sub(1 << (i * 6), &er->ref);
648         }
649         reg1->alloc = 0;
650 }
651
652 static struct event_constraint *
653 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
654                             u64 (*cbox_filter_mask)(int fields))
655 {
656         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
657         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
658         int i, alloc = 0;
659         unsigned long flags;
660         u64 mask;
661
662         if (reg1->idx == EXTRA_REG_NONE)
663                 return NULL;
664
665         raw_spin_lock_irqsave(&er->lock, flags);
666         for (i = 0; i < 5; i++) {
667                 if (!(reg1->idx & (0x1 << i)))
668                         continue;
669                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
670                         continue;
671
672                 mask = cbox_filter_mask(0x1 << i);
673                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
674                     !((reg1->config ^ er->config) & mask)) {
675                         atomic_add(1 << (i * 6), &er->ref);
676                         er->config &= ~mask;
677                         er->config |= reg1->config & mask;
678                         alloc |= (0x1 << i);
679                 } else {
680                         break;
681                 }
682         }
683         raw_spin_unlock_irqrestore(&er->lock, flags);
684         if (i < 5)
685                 goto fail;
686
687         if (!uncore_box_is_fake(box))
688                 reg1->alloc |= alloc;
689
690         return NULL;
691 fail:
692         for (; i >= 0; i--) {
693                 if (alloc & (0x1 << i))
694                         atomic_sub(1 << (i * 6), &er->ref);
695         }
696         return &uncore_constraint_empty;
697 }
698
699 static u64 snbep_cbox_filter_mask(int fields)
700 {
701         u64 mask = 0;
702
703         if (fields & 0x1)
704                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
705         if (fields & 0x2)
706                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
707         if (fields & 0x4)
708                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
709         if (fields & 0x8)
710                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
711
712         return mask;
713 }
714
715 static struct event_constraint *
716 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
717 {
718         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
719 }
720
721 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
722 {
723         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
724         struct extra_reg *er;
725         int idx = 0;
726
727         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
728                 if (er->event != (event->hw.config & er->config_mask))
729                         continue;
730                 idx |= er->idx;
731         }
732
733         if (idx) {
734                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
735                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
736                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
737                 reg1->idx = idx;
738         }
739         return 0;
740 }
741
742 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
743         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
744         .hw_config              = snbep_cbox_hw_config,
745         .get_constraint         = snbep_cbox_get_constraint,
746         .put_constraint         = snbep_cbox_put_constraint,
747 };
748
749 static struct intel_uncore_type snbep_uncore_cbox = {
750         .name                   = "cbox",
751         .num_counters           = 4,
752         .num_boxes              = 8,
753         .perf_ctr_bits          = 44,
754         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
755         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
756         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
757         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
758         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
759         .num_shared_regs        = 1,
760         .constraints            = snbep_uncore_cbox_constraints,
761         .ops                    = &snbep_uncore_cbox_ops,
762         .format_group           = &snbep_uncore_cbox_format_group,
763 };
764
765 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
766 {
767         struct hw_perf_event *hwc = &event->hw;
768         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
769         u64 config = reg1->config;
770
771         if (new_idx > reg1->idx)
772                 config <<= 8 * (new_idx - reg1->idx);
773         else
774                 config >>= 8 * (reg1->idx - new_idx);
775
776         if (modify) {
777                 hwc->config += new_idx - reg1->idx;
778                 reg1->config = config;
779                 reg1->idx = new_idx;
780         }
781         return config;
782 }
783
784 static struct event_constraint *
785 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
786 {
787         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
788         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
789         unsigned long flags;
790         int idx = reg1->idx;
791         u64 mask, config1 = reg1->config;
792         bool ok = false;
793
794         if (reg1->idx == EXTRA_REG_NONE ||
795             (!uncore_box_is_fake(box) && reg1->alloc))
796                 return NULL;
797 again:
798         mask = 0xffULL << (idx * 8);
799         raw_spin_lock_irqsave(&er->lock, flags);
800         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
801             !((config1 ^ er->config) & mask)) {
802                 atomic_add(1 << (idx * 8), &er->ref);
803                 er->config &= ~mask;
804                 er->config |= config1 & mask;
805                 ok = true;
806         }
807         raw_spin_unlock_irqrestore(&er->lock, flags);
808
809         if (!ok) {
810                 idx = (idx + 1) % 4;
811                 if (idx != reg1->idx) {
812                         config1 = snbep_pcu_alter_er(event, idx, false);
813                         goto again;
814                 }
815                 return &uncore_constraint_empty;
816         }
817
818         if (!uncore_box_is_fake(box)) {
819                 if (idx != reg1->idx)
820                         snbep_pcu_alter_er(event, idx, true);
821                 reg1->alloc = 1;
822         }
823         return NULL;
824 }
825
826 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
827 {
828         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
829         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
830
831         if (uncore_box_is_fake(box) || !reg1->alloc)
832                 return;
833
834         atomic_sub(1 << (reg1->idx * 8), &er->ref);
835         reg1->alloc = 0;
836 }
837
838 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
839 {
840         struct hw_perf_event *hwc = &event->hw;
841         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
842         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
843
844         if (ev_sel >= 0xb && ev_sel <= 0xe) {
845                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
846                 reg1->idx = ev_sel - 0xb;
847                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
848         }
849         return 0;
850 }
851
852 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
853         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
854         .hw_config              = snbep_pcu_hw_config,
855         .get_constraint         = snbep_pcu_get_constraint,
856         .put_constraint         = snbep_pcu_put_constraint,
857 };
858
859 static struct intel_uncore_type snbep_uncore_pcu = {
860         .name                   = "pcu",
861         .num_counters           = 4,
862         .num_boxes              = 1,
863         .perf_ctr_bits          = 48,
864         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
865         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
866         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
867         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
868         .num_shared_regs        = 1,
869         .ops                    = &snbep_uncore_pcu_ops,
870         .format_group           = &snbep_uncore_pcu_format_group,
871 };
872
873 static struct intel_uncore_type *snbep_msr_uncores[] = {
874         &snbep_uncore_ubox,
875         &snbep_uncore_cbox,
876         &snbep_uncore_pcu,
877         NULL,
878 };
879
880 void snbep_uncore_cpu_init(void)
881 {
882         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
883                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
884         uncore_msr_uncores = snbep_msr_uncores;
885 }
886
887 enum {
888         SNBEP_PCI_QPI_PORT0_FILTER,
889         SNBEP_PCI_QPI_PORT1_FILTER,
890 };
891
892 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
893 {
894         struct hw_perf_event *hwc = &event->hw;
895         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
896         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
897
898         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
899                 reg1->idx = 0;
900                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
901                 reg1->config = event->attr.config1;
902                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
903                 reg2->config = event->attr.config2;
904         }
905         return 0;
906 }
907
908 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
909 {
910         struct pci_dev *pdev = box->pci_dev;
911         struct hw_perf_event *hwc = &event->hw;
912         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
913         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
914
915         if (reg1->idx != EXTRA_REG_NONE) {
916                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
917                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[box->phys_id][idx];
918                 if (filter_pdev) {
919                         pci_write_config_dword(filter_pdev, reg1->reg,
920                                                 (u32)reg1->config);
921                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
922                                                 (u32)(reg1->config >> 32));
923                         pci_write_config_dword(filter_pdev, reg2->reg,
924                                                 (u32)reg2->config);
925                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
926                                                 (u32)(reg2->config >> 32));
927                 }
928         }
929
930         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
931 }
932
933 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
934         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
935         .enable_event           = snbep_qpi_enable_event,
936         .hw_config              = snbep_qpi_hw_config,
937         .get_constraint         = uncore_get_constraint,
938         .put_constraint         = uncore_put_constraint,
939 };
940
941 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
942         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
943         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
944         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
945         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
946         .ops            = &snbep_uncore_pci_ops,                \
947         .format_group   = &snbep_uncore_format_group
948
949 static struct intel_uncore_type snbep_uncore_ha = {
950         .name           = "ha",
951         .num_counters   = 4,
952         .num_boxes      = 1,
953         .perf_ctr_bits  = 48,
954         SNBEP_UNCORE_PCI_COMMON_INIT(),
955 };
956
957 static struct intel_uncore_type snbep_uncore_imc = {
958         .name           = "imc",
959         .num_counters   = 4,
960         .num_boxes      = 4,
961         .perf_ctr_bits  = 48,
962         .fixed_ctr_bits = 48,
963         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
964         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
965         .event_descs    = snbep_uncore_imc_events,
966         SNBEP_UNCORE_PCI_COMMON_INIT(),
967 };
968
969 static struct intel_uncore_type snbep_uncore_qpi = {
970         .name                   = "qpi",
971         .num_counters           = 4,
972         .num_boxes              = 2,
973         .perf_ctr_bits          = 48,
974         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
975         .event_ctl              = SNBEP_PCI_PMON_CTL0,
976         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
977         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
978         .num_shared_regs        = 1,
979         .ops                    = &snbep_uncore_qpi_ops,
980         .event_descs            = snbep_uncore_qpi_events,
981         .format_group           = &snbep_uncore_qpi_format_group,
982 };
983
984
985 static struct intel_uncore_type snbep_uncore_r2pcie = {
986         .name           = "r2pcie",
987         .num_counters   = 4,
988         .num_boxes      = 1,
989         .perf_ctr_bits  = 44,
990         .constraints    = snbep_uncore_r2pcie_constraints,
991         SNBEP_UNCORE_PCI_COMMON_INIT(),
992 };
993
994 static struct intel_uncore_type snbep_uncore_r3qpi = {
995         .name           = "r3qpi",
996         .num_counters   = 3,
997         .num_boxes      = 2,
998         .perf_ctr_bits  = 44,
999         .constraints    = snbep_uncore_r3qpi_constraints,
1000         SNBEP_UNCORE_PCI_COMMON_INIT(),
1001 };
1002
1003 enum {
1004         SNBEP_PCI_UNCORE_HA,
1005         SNBEP_PCI_UNCORE_IMC,
1006         SNBEP_PCI_UNCORE_QPI,
1007         SNBEP_PCI_UNCORE_R2PCIE,
1008         SNBEP_PCI_UNCORE_R3QPI,
1009 };
1010
1011 static struct intel_uncore_type *snbep_pci_uncores[] = {
1012         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1013         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1014         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1015         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1016         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1017         NULL,
1018 };
1019
1020 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1021         { /* Home Agent */
1022                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1023                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1024         },
1025         { /* MC Channel 0 */
1026                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1027                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1028         },
1029         { /* MC Channel 1 */
1030                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1031                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1032         },
1033         { /* MC Channel 2 */
1034                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1035                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1036         },
1037         { /* MC Channel 3 */
1038                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1039                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1040         },
1041         { /* QPI Port 0 */
1042                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1043                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1044         },
1045         { /* QPI Port 1 */
1046                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1047                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1048         },
1049         { /* R2PCIe */
1050                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1051                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1052         },
1053         { /* R3QPI Link 0 */
1054                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1055                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1056         },
1057         { /* R3QPI Link 1 */
1058                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1059                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1060         },
1061         { /* QPI Port 0 filter  */
1062                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1063                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1064                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1065         },
1066         { /* QPI Port 0 filter  */
1067                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1068                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1069                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1070         },
1071         { /* end: all zeroes */ }
1072 };
1073
1074 static struct pci_driver snbep_uncore_pci_driver = {
1075         .name           = "snbep_uncore",
1076         .id_table       = snbep_uncore_pci_ids,
1077 };
1078
1079 /*
1080  * build pci bus to socket mapping
1081  */
1082 static int snbep_pci2phy_map_init(int devid)
1083 {
1084         struct pci_dev *ubox_dev = NULL;
1085         int i, bus, nodeid;
1086         int err = 0;
1087         u32 config = 0;
1088
1089         while (1) {
1090                 /* find the UBOX device */
1091                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1092                 if (!ubox_dev)
1093                         break;
1094                 bus = ubox_dev->bus->number;
1095                 /* get the Node ID of the local register */
1096                 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1097                 if (err)
1098                         break;
1099                 nodeid = config;
1100                 /* get the Node ID mapping */
1101                 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1102                 if (err)
1103                         break;
1104                 /*
1105                  * every three bits in the Node ID mapping register maps
1106                  * to a particular node.
1107                  */
1108                 for (i = 0; i < 8; i++) {
1109                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1110                                 uncore_pcibus_to_physid[bus] = i;
1111                                 break;
1112                         }
1113                 }
1114         }
1115
1116         if (!err) {
1117                 /*
1118                  * For PCI bus with no UBOX device, find the next bus
1119                  * that has UBOX device and use its mapping.
1120                  */
1121                 i = -1;
1122                 for (bus = 255; bus >= 0; bus--) {
1123                         if (uncore_pcibus_to_physid[bus] >= 0)
1124                                 i = uncore_pcibus_to_physid[bus];
1125                         else
1126                                 uncore_pcibus_to_physid[bus] = i;
1127                 }
1128         }
1129
1130         if (ubox_dev)
1131                 pci_dev_put(ubox_dev);
1132
1133         return err ? pcibios_err_to_errno(err) : 0;
1134 }
1135
1136 int snbep_uncore_pci_init(void)
1137 {
1138         int ret = snbep_pci2phy_map_init(0x3ce0);
1139         if (ret)
1140                 return ret;
1141         uncore_pci_uncores = snbep_pci_uncores;
1142         uncore_pci_driver = &snbep_uncore_pci_driver;
1143         return 0;
1144 }
1145 /* end of Sandy Bridge-EP uncore support */
1146
1147 /* IvyTown uncore support */
1148 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1149 {
1150         unsigned msr = uncore_msr_box_ctl(box);
1151         if (msr)
1152                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1153 }
1154
1155 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1156 {
1157         struct pci_dev *pdev = box->pci_dev;
1158
1159         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1160 }
1161
1162 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1163         .init_box       = ivbep_uncore_msr_init_box,            \
1164         .disable_box    = snbep_uncore_msr_disable_box,         \
1165         .enable_box     = snbep_uncore_msr_enable_box,          \
1166         .disable_event  = snbep_uncore_msr_disable_event,       \
1167         .enable_event   = snbep_uncore_msr_enable_event,        \
1168         .read_counter   = uncore_msr_read_counter
1169
1170 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1171         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1172 };
1173
1174 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1175         .init_box       = ivbep_uncore_pci_init_box,
1176         .disable_box    = snbep_uncore_pci_disable_box,
1177         .enable_box     = snbep_uncore_pci_enable_box,
1178         .disable_event  = snbep_uncore_pci_disable_event,
1179         .enable_event   = snbep_uncore_pci_enable_event,
1180         .read_counter   = snbep_uncore_pci_read_counter,
1181 };
1182
1183 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1184         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1185         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1186         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1187         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1188         .ops            = &ivbep_uncore_pci_ops,                        \
1189         .format_group   = &ivbep_uncore_format_group
1190
1191 static struct attribute *ivbep_uncore_formats_attr[] = {
1192         &format_attr_event.attr,
1193         &format_attr_umask.attr,
1194         &format_attr_edge.attr,
1195         &format_attr_inv.attr,
1196         &format_attr_thresh8.attr,
1197         NULL,
1198 };
1199
1200 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1201         &format_attr_event.attr,
1202         &format_attr_umask.attr,
1203         &format_attr_edge.attr,
1204         &format_attr_inv.attr,
1205         &format_attr_thresh5.attr,
1206         NULL,
1207 };
1208
1209 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1210         &format_attr_event.attr,
1211         &format_attr_umask.attr,
1212         &format_attr_edge.attr,
1213         &format_attr_tid_en.attr,
1214         &format_attr_thresh8.attr,
1215         &format_attr_filter_tid.attr,
1216         &format_attr_filter_link.attr,
1217         &format_attr_filter_state2.attr,
1218         &format_attr_filter_nid2.attr,
1219         &format_attr_filter_opc2.attr,
1220         &format_attr_filter_nc.attr,
1221         &format_attr_filter_c6.attr,
1222         &format_attr_filter_isoc.attr,
1223         NULL,
1224 };
1225
1226 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1227         &format_attr_event_ext.attr,
1228         &format_attr_occ_sel.attr,
1229         &format_attr_edge.attr,
1230         &format_attr_thresh5.attr,
1231         &format_attr_occ_invert.attr,
1232         &format_attr_occ_edge.attr,
1233         &format_attr_filter_band0.attr,
1234         &format_attr_filter_band1.attr,
1235         &format_attr_filter_band2.attr,
1236         &format_attr_filter_band3.attr,
1237         NULL,
1238 };
1239
1240 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1241         &format_attr_event_ext.attr,
1242         &format_attr_umask.attr,
1243         &format_attr_edge.attr,
1244         &format_attr_thresh8.attr,
1245         &format_attr_match_rds.attr,
1246         &format_attr_match_rnid30.attr,
1247         &format_attr_match_rnid4.attr,
1248         &format_attr_match_dnid.attr,
1249         &format_attr_match_mc.attr,
1250         &format_attr_match_opc.attr,
1251         &format_attr_match_vnw.attr,
1252         &format_attr_match0.attr,
1253         &format_attr_match1.attr,
1254         &format_attr_mask_rds.attr,
1255         &format_attr_mask_rnid30.attr,
1256         &format_attr_mask_rnid4.attr,
1257         &format_attr_mask_dnid.attr,
1258         &format_attr_mask_mc.attr,
1259         &format_attr_mask_opc.attr,
1260         &format_attr_mask_vnw.attr,
1261         &format_attr_mask0.attr,
1262         &format_attr_mask1.attr,
1263         NULL,
1264 };
1265
1266 static struct attribute_group ivbep_uncore_format_group = {
1267         .name = "format",
1268         .attrs = ivbep_uncore_formats_attr,
1269 };
1270
1271 static struct attribute_group ivbep_uncore_ubox_format_group = {
1272         .name = "format",
1273         .attrs = ivbep_uncore_ubox_formats_attr,
1274 };
1275
1276 static struct attribute_group ivbep_uncore_cbox_format_group = {
1277         .name = "format",
1278         .attrs = ivbep_uncore_cbox_formats_attr,
1279 };
1280
1281 static struct attribute_group ivbep_uncore_pcu_format_group = {
1282         .name = "format",
1283         .attrs = ivbep_uncore_pcu_formats_attr,
1284 };
1285
1286 static struct attribute_group ivbep_uncore_qpi_format_group = {
1287         .name = "format",
1288         .attrs = ivbep_uncore_qpi_formats_attr,
1289 };
1290
1291 static struct intel_uncore_type ivbep_uncore_ubox = {
1292         .name           = "ubox",
1293         .num_counters   = 2,
1294         .num_boxes      = 1,
1295         .perf_ctr_bits  = 44,
1296         .fixed_ctr_bits = 48,
1297         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1298         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1299         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1300         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1301         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1302         .ops            = &ivbep_uncore_msr_ops,
1303         .format_group   = &ivbep_uncore_ubox_format_group,
1304 };
1305
1306 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1307         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1308                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1309         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1310         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1311         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1312         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1313         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1314         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1315         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1316         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1317         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1318         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1319         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1320         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1321         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1322         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1323         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1324         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1325         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1326         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1327         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1328         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1329         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1330         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1331         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1332         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1333         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1334         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1335         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1336         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1337         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1338         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1339         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1340         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1341         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1342         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1343         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1344         EVENT_EXTRA_END
1345 };
1346
1347 static u64 ivbep_cbox_filter_mask(int fields)
1348 {
1349         u64 mask = 0;
1350
1351         if (fields & 0x1)
1352                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1353         if (fields & 0x2)
1354                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1355         if (fields & 0x4)
1356                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1357         if (fields & 0x8)
1358                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1359         if (fields & 0x10) {
1360                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1361                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1362                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1363                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1364         }
1365
1366         return mask;
1367 }
1368
1369 static struct event_constraint *
1370 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1371 {
1372         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1373 }
1374
1375 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1376 {
1377         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1378         struct extra_reg *er;
1379         int idx = 0;
1380
1381         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1382                 if (er->event != (event->hw.config & er->config_mask))
1383                         continue;
1384                 idx |= er->idx;
1385         }
1386
1387         if (idx) {
1388                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1389                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1390                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1391                 reg1->idx = idx;
1392         }
1393         return 0;
1394 }
1395
1396 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1397 {
1398         struct hw_perf_event *hwc = &event->hw;
1399         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1400
1401         if (reg1->idx != EXTRA_REG_NONE) {
1402                 u64 filter = uncore_shared_reg_config(box, 0);
1403                 wrmsrl(reg1->reg, filter & 0xffffffff);
1404                 wrmsrl(reg1->reg + 6, filter >> 32);
1405         }
1406
1407         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1408 }
1409
1410 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1411         .init_box               = ivbep_uncore_msr_init_box,
1412         .disable_box            = snbep_uncore_msr_disable_box,
1413         .enable_box             = snbep_uncore_msr_enable_box,
1414         .disable_event          = snbep_uncore_msr_disable_event,
1415         .enable_event           = ivbep_cbox_enable_event,
1416         .read_counter           = uncore_msr_read_counter,
1417         .hw_config              = ivbep_cbox_hw_config,
1418         .get_constraint         = ivbep_cbox_get_constraint,
1419         .put_constraint         = snbep_cbox_put_constraint,
1420 };
1421
1422 static struct intel_uncore_type ivbep_uncore_cbox = {
1423         .name                   = "cbox",
1424         .num_counters           = 4,
1425         .num_boxes              = 15,
1426         .perf_ctr_bits          = 44,
1427         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1428         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1429         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1430         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1431         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1432         .num_shared_regs        = 1,
1433         .constraints            = snbep_uncore_cbox_constraints,
1434         .ops                    = &ivbep_uncore_cbox_ops,
1435         .format_group           = &ivbep_uncore_cbox_format_group,
1436 };
1437
1438 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1439         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1440         .hw_config              = snbep_pcu_hw_config,
1441         .get_constraint         = snbep_pcu_get_constraint,
1442         .put_constraint         = snbep_pcu_put_constraint,
1443 };
1444
1445 static struct intel_uncore_type ivbep_uncore_pcu = {
1446         .name                   = "pcu",
1447         .num_counters           = 4,
1448         .num_boxes              = 1,
1449         .perf_ctr_bits          = 48,
1450         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1451         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1452         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1453         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1454         .num_shared_regs        = 1,
1455         .ops                    = &ivbep_uncore_pcu_ops,
1456         .format_group           = &ivbep_uncore_pcu_format_group,
1457 };
1458
1459 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1460         &ivbep_uncore_ubox,
1461         &ivbep_uncore_cbox,
1462         &ivbep_uncore_pcu,
1463         NULL,
1464 };
1465
1466 void ivbep_uncore_cpu_init(void)
1467 {
1468         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1469                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1470         uncore_msr_uncores = ivbep_msr_uncores;
1471 }
1472
1473 static struct intel_uncore_type ivbep_uncore_ha = {
1474         .name           = "ha",
1475         .num_counters   = 4,
1476         .num_boxes      = 2,
1477         .perf_ctr_bits  = 48,
1478         IVBEP_UNCORE_PCI_COMMON_INIT(),
1479 };
1480
1481 static struct intel_uncore_type ivbep_uncore_imc = {
1482         .name           = "imc",
1483         .num_counters   = 4,
1484         .num_boxes      = 8,
1485         .perf_ctr_bits  = 48,
1486         .fixed_ctr_bits = 48,
1487         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1488         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1489         .event_descs    = snbep_uncore_imc_events,
1490         IVBEP_UNCORE_PCI_COMMON_INIT(),
1491 };
1492
1493 /* registers in IRP boxes are not properly aligned */
1494 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1495 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1496
1497 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1498 {
1499         struct pci_dev *pdev = box->pci_dev;
1500         struct hw_perf_event *hwc = &event->hw;
1501
1502         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1503                                hwc->config | SNBEP_PMON_CTL_EN);
1504 }
1505
1506 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1507 {
1508         struct pci_dev *pdev = box->pci_dev;
1509         struct hw_perf_event *hwc = &event->hw;
1510
1511         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1512 }
1513
1514 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1515 {
1516         struct pci_dev *pdev = box->pci_dev;
1517         struct hw_perf_event *hwc = &event->hw;
1518         u64 count = 0;
1519
1520         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1521         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1522
1523         return count;
1524 }
1525
1526 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1527         .init_box       = ivbep_uncore_pci_init_box,
1528         .disable_box    = snbep_uncore_pci_disable_box,
1529         .enable_box     = snbep_uncore_pci_enable_box,
1530         .disable_event  = ivbep_uncore_irp_disable_event,
1531         .enable_event   = ivbep_uncore_irp_enable_event,
1532         .read_counter   = ivbep_uncore_irp_read_counter,
1533 };
1534
1535 static struct intel_uncore_type ivbep_uncore_irp = {
1536         .name                   = "irp",
1537         .num_counters           = 4,
1538         .num_boxes              = 1,
1539         .perf_ctr_bits          = 48,
1540         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1541         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1542         .ops                    = &ivbep_uncore_irp_ops,
1543         .format_group           = &ivbep_uncore_format_group,
1544 };
1545
1546 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1547         .init_box       = ivbep_uncore_pci_init_box,
1548         .disable_box    = snbep_uncore_pci_disable_box,
1549         .enable_box     = snbep_uncore_pci_enable_box,
1550         .disable_event  = snbep_uncore_pci_disable_event,
1551         .enable_event   = snbep_qpi_enable_event,
1552         .read_counter   = snbep_uncore_pci_read_counter,
1553         .hw_config      = snbep_qpi_hw_config,
1554         .get_constraint = uncore_get_constraint,
1555         .put_constraint = uncore_put_constraint,
1556 };
1557
1558 static struct intel_uncore_type ivbep_uncore_qpi = {
1559         .name                   = "qpi",
1560         .num_counters           = 4,
1561         .num_boxes              = 3,
1562         .perf_ctr_bits          = 48,
1563         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1564         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1565         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1566         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1567         .num_shared_regs        = 1,
1568         .ops                    = &ivbep_uncore_qpi_ops,
1569         .format_group           = &ivbep_uncore_qpi_format_group,
1570 };
1571
1572 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1573         .name           = "r2pcie",
1574         .num_counters   = 4,
1575         .num_boxes      = 1,
1576         .perf_ctr_bits  = 44,
1577         .constraints    = snbep_uncore_r2pcie_constraints,
1578         IVBEP_UNCORE_PCI_COMMON_INIT(),
1579 };
1580
1581 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1582         .name           = "r3qpi",
1583         .num_counters   = 3,
1584         .num_boxes      = 2,
1585         .perf_ctr_bits  = 44,
1586         .constraints    = snbep_uncore_r3qpi_constraints,
1587         IVBEP_UNCORE_PCI_COMMON_INIT(),
1588 };
1589
1590 enum {
1591         IVBEP_PCI_UNCORE_HA,
1592         IVBEP_PCI_UNCORE_IMC,
1593         IVBEP_PCI_UNCORE_IRP,
1594         IVBEP_PCI_UNCORE_QPI,
1595         IVBEP_PCI_UNCORE_R2PCIE,
1596         IVBEP_PCI_UNCORE_R3QPI,
1597 };
1598
1599 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1600         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1601         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1602         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1603         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1604         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1605         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1606         NULL,
1607 };
1608
1609 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1610         { /* Home Agent 0 */
1611                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1612                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1613         },
1614         { /* Home Agent 1 */
1615                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1616                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1617         },
1618         { /* MC0 Channel 0 */
1619                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1620                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1621         },
1622         { /* MC0 Channel 1 */
1623                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1624                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1625         },
1626         { /* MC0 Channel 3 */
1627                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1628                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1629         },
1630         { /* MC0 Channel 4 */
1631                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1632                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1633         },
1634         { /* MC1 Channel 0 */
1635                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1636                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1637         },
1638         { /* MC1 Channel 1 */
1639                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1640                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1641         },
1642         { /* MC1 Channel 3 */
1643                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1644                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1645         },
1646         { /* MC1 Channel 4 */
1647                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1648                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1649         },
1650         { /* IRP */
1651                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1652                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1653         },
1654         { /* QPI0 Port 0 */
1655                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1656                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1657         },
1658         { /* QPI0 Port 1 */
1659                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1660                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1661         },
1662         { /* QPI1 Port 2 */
1663                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1664                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1665         },
1666         { /* R2PCIe */
1667                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1668                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1669         },
1670         { /* R3QPI0 Link 0 */
1671                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1672                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1673         },
1674         { /* R3QPI0 Link 1 */
1675                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1676                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1677         },
1678         { /* R3QPI1 Link 2 */
1679                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1680                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1681         },
1682         { /* QPI Port 0 filter  */
1683                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1684                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1685                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1686         },
1687         { /* QPI Port 0 filter  */
1688                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1689                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1690                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1691         },
1692         { /* end: all zeroes */ }
1693 };
1694
1695 static struct pci_driver ivbep_uncore_pci_driver = {
1696         .name           = "ivbep_uncore",
1697         .id_table       = ivbep_uncore_pci_ids,
1698 };
1699
1700 int ivbep_uncore_pci_init(void)
1701 {
1702         int ret = snbep_pci2phy_map_init(0x0e1e);
1703         if (ret)
1704                 return ret;
1705         uncore_pci_uncores = ivbep_pci_uncores;
1706         uncore_pci_driver = &ivbep_uncore_pci_driver;
1707         return 0;
1708 }
1709 /* end of IvyTown uncore support */
1710
1711 /* Haswell-EP uncore support */
1712 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
1713         &format_attr_event.attr,
1714         &format_attr_umask.attr,
1715         &format_attr_edge.attr,
1716         &format_attr_inv.attr,
1717         &format_attr_thresh5.attr,
1718         &format_attr_filter_tid2.attr,
1719         &format_attr_filter_cid.attr,
1720         NULL,
1721 };
1722
1723 static struct attribute_group hswep_uncore_ubox_format_group = {
1724         .name = "format",
1725         .attrs = hswep_uncore_ubox_formats_attr,
1726 };
1727
1728 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1729 {
1730         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1731         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
1732         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
1733         reg1->idx = 0;
1734         return 0;
1735 }
1736
1737 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
1738         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1739         .hw_config              = hswep_ubox_hw_config,
1740         .get_constraint         = uncore_get_constraint,
1741         .put_constraint         = uncore_put_constraint,
1742 };
1743
1744 static struct intel_uncore_type hswep_uncore_ubox = {
1745         .name                   = "ubox",
1746         .num_counters           = 2,
1747         .num_boxes              = 1,
1748         .perf_ctr_bits          = 44,
1749         .fixed_ctr_bits         = 48,
1750         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
1751         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
1752         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
1753         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1754         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1755         .num_shared_regs        = 1,
1756         .ops                    = &hswep_uncore_ubox_ops,
1757         .format_group           = &hswep_uncore_ubox_format_group,
1758 };
1759
1760 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
1761         &format_attr_event.attr,
1762         &format_attr_umask.attr,
1763         &format_attr_edge.attr,
1764         &format_attr_tid_en.attr,
1765         &format_attr_thresh8.attr,
1766         &format_attr_filter_tid3.attr,
1767         &format_attr_filter_link2.attr,
1768         &format_attr_filter_state3.attr,
1769         &format_attr_filter_nid2.attr,
1770         &format_attr_filter_opc2.attr,
1771         &format_attr_filter_nc.attr,
1772         &format_attr_filter_c6.attr,
1773         &format_attr_filter_isoc.attr,
1774         NULL,
1775 };
1776
1777 static struct attribute_group hswep_uncore_cbox_format_group = {
1778         .name = "format",
1779         .attrs = hswep_uncore_cbox_formats_attr,
1780 };
1781
1782 static struct event_constraint hswep_uncore_cbox_constraints[] = {
1783         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
1784         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
1785         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1786         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1787         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
1788         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
1789         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
1790         EVENT_CONSTRAINT_END
1791 };
1792
1793 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
1794         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1795                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1796         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1797         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1798         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1799         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1800         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
1801         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
1802         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1803         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
1804         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
1805         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
1806         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
1807         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
1808         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
1809         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1810         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1811         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1812         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1813         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1814         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1815         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1816         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1817         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1818         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1819         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1820         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1821         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1822         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1823         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1824         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1825         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1826         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1827         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1828         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1829         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1830         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1831         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1832         EVENT_EXTRA_END
1833 };
1834
1835 static u64 hswep_cbox_filter_mask(int fields)
1836 {
1837         u64 mask = 0;
1838         if (fields & 0x1)
1839                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
1840         if (fields & 0x2)
1841                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1842         if (fields & 0x4)
1843                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1844         if (fields & 0x8)
1845                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
1846         if (fields & 0x10) {
1847                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1848                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
1849                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
1850                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1851         }
1852         return mask;
1853 }
1854
1855 static struct event_constraint *
1856 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1857 {
1858         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
1859 }
1860
1861 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1862 {
1863         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1864         struct extra_reg *er;
1865         int idx = 0;
1866
1867         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
1868                 if (er->event != (event->hw.config & er->config_mask))
1869                         continue;
1870                 idx |= er->idx;
1871         }
1872
1873         if (idx) {
1874                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1875                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1876                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
1877                 reg1->idx = idx;
1878         }
1879         return 0;
1880 }
1881
1882 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
1883                                   struct perf_event *event)
1884 {
1885         struct hw_perf_event *hwc = &event->hw;
1886         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1887
1888         if (reg1->idx != EXTRA_REG_NONE) {
1889                 u64 filter = uncore_shared_reg_config(box, 0);
1890                 wrmsrl(reg1->reg, filter & 0xffffffff);
1891                 wrmsrl(reg1->reg + 1, filter >> 32);
1892         }
1893
1894         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1895 }
1896
1897 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
1898         .init_box               = snbep_uncore_msr_init_box,
1899         .disable_box            = snbep_uncore_msr_disable_box,
1900         .enable_box             = snbep_uncore_msr_enable_box,
1901         .disable_event          = snbep_uncore_msr_disable_event,
1902         .enable_event           = hswep_cbox_enable_event,
1903         .read_counter           = uncore_msr_read_counter,
1904         .hw_config              = hswep_cbox_hw_config,
1905         .get_constraint         = hswep_cbox_get_constraint,
1906         .put_constraint         = snbep_cbox_put_constraint,
1907 };
1908
1909 static struct intel_uncore_type hswep_uncore_cbox = {
1910         .name                   = "cbox",
1911         .num_counters           = 4,
1912         .num_boxes              = 18,
1913         .perf_ctr_bits          = 44,
1914         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
1915         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
1916         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1917         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
1918         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
1919         .num_shared_regs        = 1,
1920         .constraints            = hswep_uncore_cbox_constraints,
1921         .ops                    = &hswep_uncore_cbox_ops,
1922         .format_group           = &hswep_uncore_cbox_format_group,
1923 };
1924
1925 /*
1926  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
1927  */
1928 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
1929 {
1930         unsigned msr = uncore_msr_box_ctl(box);
1931
1932         if (msr) {
1933                 u64 init = SNBEP_PMON_BOX_CTL_INT;
1934                 u64 flags = 0;
1935                 int i;
1936
1937                 for_each_set_bit(i, (unsigned long *)&init, 64) {
1938                         flags |= (1ULL << i);
1939                         wrmsrl(msr, flags);
1940                 }
1941         }
1942 }
1943
1944 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
1945         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1946         .init_box               = hswep_uncore_sbox_msr_init_box
1947 };
1948
1949 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
1950         &format_attr_event.attr,
1951         &format_attr_umask.attr,
1952         &format_attr_edge.attr,
1953         &format_attr_tid_en.attr,
1954         &format_attr_inv.attr,
1955         &format_attr_thresh8.attr,
1956         NULL,
1957 };
1958
1959 static struct attribute_group hswep_uncore_sbox_format_group = {
1960         .name = "format",
1961         .attrs = hswep_uncore_sbox_formats_attr,
1962 };
1963
1964 static struct intel_uncore_type hswep_uncore_sbox = {
1965         .name                   = "sbox",
1966         .num_counters           = 4,
1967         .num_boxes              = 4,
1968         .perf_ctr_bits          = 44,
1969         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
1970         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
1971         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
1972         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
1973         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
1974         .ops                    = &hswep_uncore_sbox_msr_ops,
1975         .format_group           = &hswep_uncore_sbox_format_group,
1976 };
1977
1978 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1979 {
1980         struct hw_perf_event *hwc = &event->hw;
1981         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1982         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1983
1984         if (ev_sel >= 0xb && ev_sel <= 0xe) {
1985                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
1986                 reg1->idx = ev_sel - 0xb;
1987                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
1988         }
1989         return 0;
1990 }
1991
1992 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
1993         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1994         .hw_config              = hswep_pcu_hw_config,
1995         .get_constraint         = snbep_pcu_get_constraint,
1996         .put_constraint         = snbep_pcu_put_constraint,
1997 };
1998
1999 static struct intel_uncore_type hswep_uncore_pcu = {
2000         .name                   = "pcu",
2001         .num_counters           = 4,
2002         .num_boxes              = 1,
2003         .perf_ctr_bits          = 48,
2004         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2005         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2006         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2007         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2008         .num_shared_regs        = 1,
2009         .ops                    = &hswep_uncore_pcu_ops,
2010         .format_group           = &snbep_uncore_pcu_format_group,
2011 };
2012
2013 static struct intel_uncore_type *hswep_msr_uncores[] = {
2014         &hswep_uncore_ubox,
2015         &hswep_uncore_cbox,
2016         &hswep_uncore_sbox,
2017         &hswep_uncore_pcu,
2018         NULL,
2019 };
2020
2021 void hswep_uncore_cpu_init(void)
2022 {
2023         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2024                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2025         uncore_msr_uncores = hswep_msr_uncores;
2026 }
2027
2028 static struct intel_uncore_type hswep_uncore_ha = {
2029         .name           = "ha",
2030         .num_counters   = 5,
2031         .num_boxes      = 2,
2032         .perf_ctr_bits  = 48,
2033         SNBEP_UNCORE_PCI_COMMON_INIT(),
2034 };
2035
2036 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2037         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2038         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2039         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2040         { /* end: all zeroes */ },
2041 };
2042
2043 static struct intel_uncore_type hswep_uncore_imc = {
2044         .name           = "imc",
2045         .num_counters   = 5,
2046         .num_boxes      = 8,
2047         .perf_ctr_bits  = 48,
2048         .fixed_ctr_bits = 48,
2049         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2050         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2051         .event_descs    = hswep_uncore_imc_events,
2052         SNBEP_UNCORE_PCI_COMMON_INIT(),
2053 };
2054
2055 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2056
2057 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2058 {
2059         struct pci_dev *pdev = box->pci_dev;
2060         struct hw_perf_event *hwc = &event->hw;
2061         u64 count = 0;
2062
2063         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2064         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2065
2066         return count;
2067 }
2068
2069 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2070         .init_box       = snbep_uncore_pci_init_box,
2071         .disable_box    = snbep_uncore_pci_disable_box,
2072         .enable_box     = snbep_uncore_pci_enable_box,
2073         .disable_event  = ivbep_uncore_irp_disable_event,
2074         .enable_event   = ivbep_uncore_irp_enable_event,
2075         .read_counter   = hswep_uncore_irp_read_counter,
2076 };
2077
2078 static struct intel_uncore_type hswep_uncore_irp = {
2079         .name                   = "irp",
2080         .num_counters           = 4,
2081         .num_boxes              = 1,
2082         .perf_ctr_bits          = 48,
2083         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2084         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2085         .ops                    = &hswep_uncore_irp_ops,
2086         .format_group           = &snbep_uncore_format_group,
2087 };
2088
2089 static struct intel_uncore_type hswep_uncore_qpi = {
2090         .name                   = "qpi",
2091         .num_counters           = 5,
2092         .num_boxes              = 3,
2093         .perf_ctr_bits          = 48,
2094         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2095         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2096         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2097         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2098         .num_shared_regs        = 1,
2099         .ops                    = &snbep_uncore_qpi_ops,
2100         .format_group           = &snbep_uncore_qpi_format_group,
2101 };
2102
2103 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2104         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2105         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2106         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2107         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2108         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2109         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2110         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2111         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2112         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2113         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2114         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2115         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2116         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2117         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2118         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2119         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2120         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2121         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2122         EVENT_CONSTRAINT_END
2123 };
2124
2125 static struct intel_uncore_type hswep_uncore_r2pcie = {
2126         .name           = "r2pcie",
2127         .num_counters   = 4,
2128         .num_boxes      = 1,
2129         .perf_ctr_bits  = 48,
2130         .constraints    = hswep_uncore_r2pcie_constraints,
2131         SNBEP_UNCORE_PCI_COMMON_INIT(),
2132 };
2133
2134 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2135         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2136         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2137         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2138         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2139         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2140         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2141         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2142         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2143         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2144         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2145         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2146         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2147         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2148         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2149         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2150         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2151         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2152         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2153         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2154         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2155         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2156         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2157         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2158         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2159         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2160         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2161         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2162         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2163         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2164         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2165         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2166         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2167         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2168         EVENT_CONSTRAINT_END
2169 };
2170
2171 static struct intel_uncore_type hswep_uncore_r3qpi = {
2172         .name           = "r3qpi",
2173         .num_counters   = 4,
2174         .num_boxes      = 3,
2175         .perf_ctr_bits  = 44,
2176         .constraints    = hswep_uncore_r3qpi_constraints,
2177         SNBEP_UNCORE_PCI_COMMON_INIT(),
2178 };
2179
2180 enum {
2181         HSWEP_PCI_UNCORE_HA,
2182         HSWEP_PCI_UNCORE_IMC,
2183         HSWEP_PCI_UNCORE_IRP,
2184         HSWEP_PCI_UNCORE_QPI,
2185         HSWEP_PCI_UNCORE_R2PCIE,
2186         HSWEP_PCI_UNCORE_R3QPI,
2187 };
2188
2189 static struct intel_uncore_type *hswep_pci_uncores[] = {
2190         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
2191         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
2192         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
2193         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
2194         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
2195         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
2196         NULL,
2197 };
2198
2199 static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = {
2200         { /* Home Agent 0 */
2201                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2202                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2203         },
2204         { /* Home Agent 1 */
2205                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2206                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2207         },
2208         { /* MC0 Channel 0 */
2209                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2210                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2211         },
2212         { /* MC0 Channel 1 */
2213                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2214                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2215         },
2216         { /* MC0 Channel 2 */
2217                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2218                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2219         },
2220         { /* MC0 Channel 3 */
2221                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2222                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2223         },
2224         { /* MC1 Channel 0 */
2225                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2226                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2227         },
2228         { /* MC1 Channel 1 */
2229                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2230                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2231         },
2232         { /* MC1 Channel 2 */
2233                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2234                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2235         },
2236         { /* MC1 Channel 3 */
2237                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2238                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2239         },
2240         { /* IRP */
2241                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2242                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2243         },
2244         { /* QPI0 Port 0 */
2245                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2246                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2247         },
2248         { /* QPI0 Port 1 */
2249                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2250                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2251         },
2252         { /* QPI1 Port 2 */
2253                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2254                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2255         },
2256         { /* R2PCIe */
2257                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2258                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2259         },
2260         { /* R3QPI0 Link 0 */
2261                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2262                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2263         },
2264         { /* R3QPI0 Link 1 */
2265                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2266                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2267         },
2268         { /* R3QPI1 Link 2 */
2269                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2270                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2271         },
2272         { /* QPI Port 0 filter  */
2273                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2274                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2275                                                    SNBEP_PCI_QPI_PORT0_FILTER),
2276         },
2277         { /* QPI Port 1 filter  */
2278                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2279                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2280                                                    SNBEP_PCI_QPI_PORT1_FILTER),
2281         },
2282         { /* end: all zeroes */ }
2283 };
2284
2285 static struct pci_driver hswep_uncore_pci_driver = {
2286         .name           = "hswep_uncore",
2287         .id_table       = hswep_uncore_pci_ids,
2288 };
2289
2290 int hswep_uncore_pci_init(void)
2291 {
2292         int ret = snbep_pci2phy_map_init(0x2f1e);
2293         if (ret)
2294                 return ret;
2295         uncore_pci_uncores = hswep_pci_uncores;
2296         uncore_pci_driver = &hswep_uncore_pci_driver;
2297         return 0;
2298 }
2299 /* end of Haswell-EP uncore support */