2 * CCI cache coherent interconnect driver
4 * Copyright (C) 2013 ARM Ltd.
5 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/arm-cci.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/perf_event.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
29 #include <asm/cacheflush.h>
30 #include <asm/smp_plat.h>
32 static void __iomem *cci_ctrl_base;
33 static unsigned long cci_ctrl_phys;
35 #ifdef CONFIG_ARM_CCI400_PORT_CTRL
38 unsigned int nb_ace_lite;
41 static const struct cci_nb_ports cci400_ports = {
46 #define CCI400_PORTS_DATA (&cci400_ports)
48 #define CCI400_PORTS_DATA (NULL)
51 static const struct of_device_id arm_cci_matches[] = {
52 #ifdef CONFIG_ARM_CCI400_COMMON
53 {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA },
55 #ifdef CONFIG_ARM_CCI500_PMU
56 { .compatible = "arm,cci-500", },
61 #ifdef CONFIG_ARM_CCI_PMU
63 #define DRIVER_NAME "ARM-CCI"
64 #define DRIVER_NAME_PMU DRIVER_NAME " PMU"
66 #define CCI_PMCR 0x0100
67 #define CCI_PID2 0x0fe8
69 #define CCI_PMCR_CEN 0x00000001
70 #define CCI_PMCR_NCNT_MASK 0x0000f800
71 #define CCI_PMCR_NCNT_SHIFT 11
73 #define CCI_PID2_REV_MASK 0xf0
74 #define CCI_PID2_REV_SHIFT 4
76 #define CCI_PMU_EVT_SEL 0x000
77 #define CCI_PMU_CNTR 0x004
78 #define CCI_PMU_CNTR_CTRL 0x008
79 #define CCI_PMU_OVRFLW 0x00c
81 #define CCI_PMU_OVRFLW_FLAG 1
83 #define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size)
84 #define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model))
85 #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
86 #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1)
88 #define CCI_PMU_MAX_HW_CNTRS(model) \
89 ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs)
91 /* Types of interfaces that can generate events */
95 #ifdef CONFIG_ARM_CCI500_PMU
106 struct cci_pmu_hw_events {
107 struct perf_event **events;
108 unsigned long *used_mask;
109 raw_spinlock_t pmu_lock;
114 * struct cci_pmu_model:
115 * @fixed_hw_cntrs - Number of fixed event counters
116 * @num_hw_cntrs - Maximum number of programmable event counters
117 * @cntr_size - Size of an event counter mapping
119 struct cci_pmu_model {
126 struct dev_ext_attribute *format_attrs;
127 struct dev_ext_attribute *event_attrs;
128 struct event_range event_ranges[CCI_IF_MAX];
129 int (*validate_hw_event)(struct cci_pmu *, unsigned long);
130 int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
133 static struct cci_pmu_model cci_pmu_models[];
140 unsigned long active_irqs;
141 const struct cci_pmu_model *model;
142 struct cci_pmu_hw_events hw_events;
143 struct platform_device *plat_device;
145 atomic_t active_events;
146 struct mutex reserve_mutex;
147 struct notifier_block cpu_nb;
151 #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
154 #ifdef CONFIG_ARM_CCI400_PMU
158 #ifdef CONFIG_ARM_CCI500_PMU
164 static ssize_t cci_pmu_format_show(struct device *dev,
165 struct device_attribute *attr, char *buf);
166 static ssize_t cci_pmu_event_show(struct device *dev,
167 struct device_attribute *attr, char *buf);
169 #define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \
170 { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config }
172 #define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \
173 CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config)
174 #define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \
175 CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config)
177 /* CCI400 PMU Specific definitions */
179 #ifdef CONFIG_ARM_CCI400_PMU
182 #define CCI400_PORT_S0 0
183 #define CCI400_PORT_S1 1
184 #define CCI400_PORT_S2 2
185 #define CCI400_PORT_S3 3
186 #define CCI400_PORT_S4 4
187 #define CCI400_PORT_M0 5
188 #define CCI400_PORT_M1 6
189 #define CCI400_PORT_M2 7
191 #define CCI400_R1_PX 5
194 * Instead of an event id to monitor CCI cycles, a dedicated counter is
195 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
196 * make use of this event in hardware.
198 enum cci400_perf_events {
199 CCI400_PMU_CYCLES = 0xff
202 #define CCI400_PMU_CYCLE_CNTR_IDX 0
203 #define CCI400_PMU_CNTR0_IDX 1
206 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
207 * ports and bits 4:0 are event codes. There are different event codes
208 * associated with each port type.
210 * Additionally, the range of events associated with the port types changed
211 * between Rev0 and Rev1.
213 * The constants below define the range of valid codes for each port type for
214 * the different revisions and are used to validate the event to be monitored.
217 #define CCI400_PMU_EVENT_MASK 0xffUL
218 #define CCI400_PMU_EVENT_SOURCE_SHIFT 5
219 #define CCI400_PMU_EVENT_SOURCE_MASK 0x7
220 #define CCI400_PMU_EVENT_CODE_SHIFT 0
221 #define CCI400_PMU_EVENT_CODE_MASK 0x1f
222 #define CCI400_PMU_EVENT_SOURCE(event) \
223 ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \
224 CCI400_PMU_EVENT_SOURCE_MASK)
225 #define CCI400_PMU_EVENT_CODE(event) \
226 ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK)
228 #define CCI400_R0_SLAVE_PORT_MIN_EV 0x00
229 #define CCI400_R0_SLAVE_PORT_MAX_EV 0x13
230 #define CCI400_R0_MASTER_PORT_MIN_EV 0x14
231 #define CCI400_R0_MASTER_PORT_MAX_EV 0x1a
233 #define CCI400_R1_SLAVE_PORT_MIN_EV 0x00
234 #define CCI400_R1_SLAVE_PORT_MAX_EV 0x14
235 #define CCI400_R1_MASTER_PORT_MIN_EV 0x00
236 #define CCI400_R1_MASTER_PORT_MAX_EV 0x11
238 #define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \
239 CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \
240 (unsigned long)_config)
242 static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
243 struct device_attribute *attr, char *buf);
245 static struct dev_ext_attribute cci400_pmu_format_attrs[] = {
246 CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
247 CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"),
250 static struct dev_ext_attribute cci400_r0_pmu_event_attrs[] = {
252 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
253 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
254 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
255 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
256 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
257 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
258 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
259 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
260 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
261 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
262 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
263 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
264 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
265 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
266 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
267 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
268 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
269 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
270 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
271 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
273 CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14),
274 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15),
275 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16),
276 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17),
277 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18),
278 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19),
279 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A),
280 /* Special event for cycles counter */
281 CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
284 static struct dev_ext_attribute cci400_r1_pmu_event_attrs[] = {
286 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
287 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
288 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
289 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
290 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
291 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
292 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
293 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
294 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
295 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
296 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
297 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
298 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
299 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
300 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
301 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
302 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
303 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
304 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
305 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
306 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14),
308 CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0),
309 CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1),
310 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2),
311 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3),
312 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4),
313 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5),
314 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6),
315 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7),
316 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8),
317 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9),
318 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA),
319 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB),
320 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC),
321 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD),
322 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE),
323 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF),
324 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10),
325 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11),
326 /* Special event for cycles counter */
327 CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
330 static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
331 struct device_attribute *attr, char *buf)
333 struct dev_ext_attribute *eattr = container_of(attr,
334 struct dev_ext_attribute, attr);
335 return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var);
338 static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
339 struct cci_pmu_hw_events *hw,
340 unsigned long cci_event)
344 /* cycles event idx is fixed */
345 if (cci_event == CCI400_PMU_CYCLES) {
346 if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask))
349 return CCI400_PMU_CYCLE_CNTR_IDX;
352 for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
353 if (!test_and_set_bit(idx, hw->used_mask))
356 /* No counters available */
360 static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
362 u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event);
363 u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event);
366 if (hw_event & ~CCI400_PMU_EVENT_MASK)
369 if (hw_event == CCI400_PMU_CYCLES)
378 /* Slave Interface */
379 if_type = CCI_IF_SLAVE;
384 /* Master Interface */
385 if_type = CCI_IF_MASTER;
391 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
392 ev_code <= cci_pmu->model->event_ranges[if_type].max)
398 static int probe_cci400_revision(void)
401 rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
402 rev >>= CCI_PID2_REV_SHIFT;
404 if (rev < CCI400_R1_PX)
410 static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev)
412 if (platform_has_secure_cci_access())
413 return &cci_pmu_models[probe_cci400_revision()];
416 #else /* !CONFIG_ARM_CCI400_PMU */
417 static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev)
421 #endif /* CONFIG_ARM_CCI400_PMU */
423 #ifdef CONFIG_ARM_CCI500_PMU
426 * CCI500 provides 8 independent event counters that can count
427 * any of the events available.
429 * CCI500 PMU event id is an 9-bit value made of two parts.
430 * bits [8:5] - Source for the event
431 * 0x0-0x6 - Slave interfaces
432 * 0x8-0xD - Master interfaces
433 * 0xf - Global Events
436 * bits [4:0] - Event code (specific to type of interface)
440 #define CCI500_PORT_S0 0x0
441 #define CCI500_PORT_S1 0x1
442 #define CCI500_PORT_S2 0x2
443 #define CCI500_PORT_S3 0x3
444 #define CCI500_PORT_S4 0x4
445 #define CCI500_PORT_S5 0x5
446 #define CCI500_PORT_S6 0x6
448 #define CCI500_PORT_M0 0x8
449 #define CCI500_PORT_M1 0x9
450 #define CCI500_PORT_M2 0xa
451 #define CCI500_PORT_M3 0xb
452 #define CCI500_PORT_M4 0xc
453 #define CCI500_PORT_M5 0xd
455 #define CCI500_PORT_GLOBAL 0xf
457 #define CCI500_PMU_EVENT_MASK 0x1ffUL
458 #define CCI500_PMU_EVENT_SOURCE_SHIFT 0x5
459 #define CCI500_PMU_EVENT_SOURCE_MASK 0xf
460 #define CCI500_PMU_EVENT_CODE_SHIFT 0x0
461 #define CCI500_PMU_EVENT_CODE_MASK 0x1f
463 #define CCI500_PMU_EVENT_SOURCE(event) \
464 ((event >> CCI500_PMU_EVENT_SOURCE_SHIFT) & CCI500_PMU_EVENT_SOURCE_MASK)
465 #define CCI500_PMU_EVENT_CODE(event) \
466 ((event >> CCI500_PMU_EVENT_CODE_SHIFT) & CCI500_PMU_EVENT_CODE_MASK)
468 #define CCI500_SLAVE_PORT_MIN_EV 0x00
469 #define CCI500_SLAVE_PORT_MAX_EV 0x1f
470 #define CCI500_MASTER_PORT_MIN_EV 0x00
471 #define CCI500_MASTER_PORT_MAX_EV 0x06
472 #define CCI500_GLOBAL_PORT_MIN_EV 0x00
473 #define CCI500_GLOBAL_PORT_MAX_EV 0x0f
476 #define CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \
477 CCI_EXT_ATTR_ENTRY(_name, cci500_pmu_global_event_show, \
478 (unsigned long) _config)
480 static ssize_t cci500_pmu_global_event_show(struct device *dev,
481 struct device_attribute *attr, char *buf);
483 static struct dev_ext_attribute cci500_pmu_format_attrs[] = {
484 CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
485 CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"),
488 static struct dev_ext_attribute cci500_pmu_event_attrs[] = {
490 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0),
491 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1),
492 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2),
493 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3),
494 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4),
495 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5),
496 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6),
497 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
498 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8),
499 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9),
500 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA),
501 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB),
502 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC),
503 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD),
504 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE),
505 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF),
506 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10),
507 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11),
508 CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12),
509 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13),
510 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14),
511 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15),
512 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16),
513 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17),
514 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18),
515 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19),
516 CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A),
517 CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B),
518 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C),
519 CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D),
520 CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E),
521 CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F),
524 CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0),
525 CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1),
526 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2),
527 CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3),
528 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4),
529 CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5),
530 CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6),
533 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0),
534 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1),
535 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2),
536 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3),
537 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4),
538 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5),
539 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6),
540 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7),
541 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8),
542 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9),
543 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA),
544 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
545 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
546 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
547 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE),
548 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
551 static ssize_t cci500_pmu_global_event_show(struct device *dev,
552 struct device_attribute *attr, char *buf)
554 struct dev_ext_attribute *eattr = container_of(attr,
555 struct dev_ext_attribute, attr);
556 /* Global events have single fixed source code */
557 return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n",
558 (unsigned long)eattr->var, CCI500_PORT_GLOBAL);
561 static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
562 unsigned long hw_event)
564 u32 ev_source = CCI500_PMU_EVENT_SOURCE(hw_event);
565 u32 ev_code = CCI500_PMU_EVENT_CODE(hw_event);
568 if (hw_event & ~CCI500_PMU_EVENT_MASK)
579 if_type = CCI_IF_SLAVE;
587 if_type = CCI_IF_MASTER;
589 case CCI500_PORT_GLOBAL:
590 if_type = CCI_IF_GLOBAL;
596 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
597 ev_code <= cci_pmu->model->event_ranges[if_type].max)
602 #endif /* CONFIG_ARM_CCI500_PMU */
604 static ssize_t cci_pmu_format_show(struct device *dev,
605 struct device_attribute *attr, char *buf)
607 struct dev_ext_attribute *eattr = container_of(attr,
608 struct dev_ext_attribute, attr);
609 return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
612 static ssize_t cci_pmu_event_show(struct device *dev,
613 struct device_attribute *attr, char *buf)
615 struct dev_ext_attribute *eattr = container_of(attr,
616 struct dev_ext_attribute, attr);
617 /* source parameter is mandatory for normal PMU events */
618 return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n",
619 (unsigned long)eattr->var);
622 static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
624 return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu);
627 static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset)
629 return readl_relaxed(cci_pmu->base +
630 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
633 static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value,
634 int idx, unsigned int offset)
636 return writel_relaxed(value, cci_pmu->base +
637 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
640 static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx)
642 pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL);
645 static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx)
647 pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL);
650 static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event)
652 pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL);
656 * Returns the number of programmable counters actually implemented
659 static u32 pmu_get_max_counters(void)
661 return (readl_relaxed(cci_ctrl_base + CCI_PMCR) &
662 CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
665 static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
667 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
668 unsigned long cci_event = event->hw.config_base;
671 if (cci_pmu->model->get_event_idx)
672 return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
674 /* Generic code to find an unused idx from the mask */
675 for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
676 if (!test_and_set_bit(idx, hw->used_mask))
679 /* No counters available */
683 static int pmu_map_event(struct perf_event *event)
685 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
687 if (event->attr.type < PERF_TYPE_MAX ||
688 !cci_pmu->model->validate_hw_event)
691 return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config);
694 static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
697 struct platform_device *pmu_device = cci_pmu->plat_device;
699 if (unlikely(!pmu_device))
702 if (cci_pmu->nr_irqs < 1) {
703 dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
708 * Register all available CCI PMU interrupts. In the interrupt handler
709 * we iterate over the counters checking for interrupt source (the
710 * overflowing counter) and clear it.
712 * This should allow handling of non-unique interrupt for the counters.
714 for (i = 0; i < cci_pmu->nr_irqs; i++) {
715 int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED,
716 "arm-cci-pmu", cci_pmu);
718 dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
723 set_bit(i, &cci_pmu->active_irqs);
729 static void pmu_free_irq(struct cci_pmu *cci_pmu)
733 for (i = 0; i < cci_pmu->nr_irqs; i++) {
734 if (!test_and_clear_bit(i, &cci_pmu->active_irqs))
737 free_irq(cci_pmu->irqs[i], cci_pmu);
741 static u32 pmu_read_counter(struct perf_event *event)
743 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
744 struct hw_perf_event *hw_counter = &event->hw;
745 int idx = hw_counter->idx;
748 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
749 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
752 value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR);
757 static void pmu_write_counter(struct perf_event *event, u32 value)
759 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
760 struct hw_perf_event *hw_counter = &event->hw;
761 int idx = hw_counter->idx;
763 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx)))
764 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
766 pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
769 static u64 pmu_event_update(struct perf_event *event)
771 struct hw_perf_event *hwc = &event->hw;
772 u64 delta, prev_raw_count, new_raw_count;
775 prev_raw_count = local64_read(&hwc->prev_count);
776 new_raw_count = pmu_read_counter(event);
777 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
778 new_raw_count) != prev_raw_count);
780 delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK;
782 local64_add(delta, &event->count);
784 return new_raw_count;
787 static void pmu_read(struct perf_event *event)
789 pmu_event_update(event);
792 void pmu_event_set_period(struct perf_event *event)
794 struct hw_perf_event *hwc = &event->hw;
796 * The CCI PMU counters have a period of 2^32. To account for the
797 * possiblity of extreme interrupt latency we program for a period of
798 * half that. Hopefully we can handle the interrupt before another 2^31
799 * events occur and the counter overtakes its previous value.
801 u64 val = 1ULL << 31;
802 local64_set(&hwc->prev_count, val);
803 pmu_write_counter(event, val);
806 static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
809 struct cci_pmu *cci_pmu = dev;
810 struct cci_pmu_hw_events *events = &cci_pmu->hw_events;
811 int idx, handled = IRQ_NONE;
813 raw_spin_lock_irqsave(&events->pmu_lock, flags);
815 * Iterate over counters and update the corresponding perf events.
816 * This should work regardless of whether we have per-counter overflow
817 * interrupt or a combined overflow interrupt.
819 for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
820 struct perf_event *event = events->events[idx];
821 struct hw_perf_event *hw_counter;
826 hw_counter = &event->hw;
828 /* Did this counter overflow? */
829 if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) &
830 CCI_PMU_OVRFLW_FLAG))
833 pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx,
836 pmu_event_update(event);
837 pmu_event_set_period(event);
838 handled = IRQ_HANDLED;
840 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
842 return IRQ_RETVAL(handled);
845 static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
847 int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
849 pmu_free_irq(cci_pmu);
855 static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
857 pmu_free_irq(cci_pmu);
860 static void hw_perf_event_destroy(struct perf_event *event)
862 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
863 atomic_t *active_events = &cci_pmu->active_events;
864 struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
866 if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) {
867 cci_pmu_put_hw(cci_pmu);
868 mutex_unlock(reserve_mutex);
872 static void cci_pmu_enable(struct pmu *pmu)
874 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
875 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
876 int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs);
883 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
885 /* Enable all the PMU counters. */
886 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
887 writel(val, cci_ctrl_base + CCI_PMCR);
888 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
892 static void cci_pmu_disable(struct pmu *pmu)
894 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
895 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
899 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
901 /* Disable all the PMU counters. */
902 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
903 writel(val, cci_ctrl_base + CCI_PMCR);
904 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
908 * Check if the idx represents a non-programmable counter.
909 * All the fixed event counters are mapped before the programmable
912 static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx)
914 return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs);
917 static void cci_pmu_start(struct perf_event *event, int pmu_flags)
919 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
920 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
921 struct hw_perf_event *hwc = &event->hw;
926 * To handle interrupt latency, we always reprogram the period
927 * regardlesss of PERF_EF_RELOAD.
929 if (pmu_flags & PERF_EF_RELOAD)
930 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
934 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
935 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
939 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
941 /* Configure the counter unless you are counting a fixed event */
942 if (!pmu_fixed_hw_idx(cci_pmu, idx))
943 pmu_set_event(cci_pmu, idx, hwc->config_base);
945 pmu_event_set_period(event);
946 pmu_enable_counter(cci_pmu, idx);
948 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
951 static void cci_pmu_stop(struct perf_event *event, int pmu_flags)
953 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
954 struct hw_perf_event *hwc = &event->hw;
957 if (hwc->state & PERF_HES_STOPPED)
960 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
961 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
966 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
969 pmu_disable_counter(cci_pmu, idx);
970 pmu_event_update(event);
971 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
974 static int cci_pmu_add(struct perf_event *event, int flags)
976 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
977 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
978 struct hw_perf_event *hwc = &event->hw;
982 perf_pmu_disable(event->pmu);
984 /* If we don't have a space for the counter then finish early. */
985 idx = pmu_get_event_idx(hw_events, event);
992 hw_events->events[idx] = event;
994 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
995 if (flags & PERF_EF_START)
996 cci_pmu_start(event, PERF_EF_RELOAD);
998 /* Propagate our changes to the userspace mapping. */
999 perf_event_update_userpage(event);
1002 perf_pmu_enable(event->pmu);
1006 static void cci_pmu_del(struct perf_event *event, int flags)
1008 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1009 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1010 struct hw_perf_event *hwc = &event->hw;
1013 cci_pmu_stop(event, PERF_EF_UPDATE);
1014 hw_events->events[idx] = NULL;
1015 clear_bit(idx, hw_events->used_mask);
1017 perf_event_update_userpage(event);
1021 validate_event(struct pmu *cci_pmu,
1022 struct cci_pmu_hw_events *hw_events,
1023 struct perf_event *event)
1025 if (is_software_event(event))
1029 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
1030 * core perf code won't check that the pmu->ctx == leader->ctx
1031 * until after pmu->event_init(event).
1033 if (event->pmu != cci_pmu)
1036 if (event->state < PERF_EVENT_STATE_OFF)
1039 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
1042 return pmu_get_event_idx(hw_events, event) >= 0;
1046 validate_group(struct perf_event *event)
1048 struct perf_event *sibling, *leader = event->group_leader;
1049 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1050 unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)];
1051 struct cci_pmu_hw_events fake_pmu = {
1053 * Initialise the fake PMU. We only need to populate the
1054 * used_mask for the purposes of validation.
1058 memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long));
1060 if (!validate_event(event->pmu, &fake_pmu, leader))
1063 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
1064 if (!validate_event(event->pmu, &fake_pmu, sibling))
1068 if (!validate_event(event->pmu, &fake_pmu, event))
1075 __hw_perf_event_init(struct perf_event *event)
1077 struct hw_perf_event *hwc = &event->hw;
1080 mapping = pmu_map_event(event);
1083 pr_debug("event %x:%llx not supported\n", event->attr.type,
1084 event->attr.config);
1089 * We don't assign an index until we actually place the event onto
1090 * hardware. Use -1 to signify that we haven't decided where to put it
1094 hwc->config_base = 0;
1096 hwc->event_base = 0;
1099 * Store the event encoding into the config_base field.
1101 hwc->config_base |= (unsigned long)mapping;
1104 * Limit the sample_period to half of the counter width. That way, the
1105 * new counter value is far less likely to overtake the previous one
1106 * unless you have some serious IRQ latency issues.
1108 hwc->sample_period = CCI_PMU_CNTR_MASK >> 1;
1109 hwc->last_period = hwc->sample_period;
1110 local64_set(&hwc->period_left, hwc->sample_period);
1112 if (event->group_leader != event) {
1113 if (validate_group(event) != 0)
1120 static int cci_pmu_event_init(struct perf_event *event)
1122 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1123 atomic_t *active_events = &cci_pmu->active_events;
1127 if (event->attr.type != event->pmu->type)
1130 /* Shared by all CPUs, no meaningful state to sample */
1131 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
1134 /* We have no filtering of any kind */
1135 if (event->attr.exclude_user ||
1136 event->attr.exclude_kernel ||
1137 event->attr.exclude_hv ||
1138 event->attr.exclude_idle ||
1139 event->attr.exclude_host ||
1140 event->attr.exclude_guest)
1144 * Following the example set by other "uncore" PMUs, we accept any CPU
1145 * and rewrite its affinity dynamically rather than having perf core
1146 * handle cpu == -1 and pid == -1 for this case.
1148 * The perf core will pin online CPUs for the duration of this call and
1149 * the event being installed into its context, so the PMU's CPU can't
1150 * change under our feet.
1152 cpu = cpumask_first(&cci_pmu->cpus);
1153 if (event->cpu < 0 || cpu < 0)
1157 event->destroy = hw_perf_event_destroy;
1158 if (!atomic_inc_not_zero(active_events)) {
1159 mutex_lock(&cci_pmu->reserve_mutex);
1160 if (atomic_read(active_events) == 0)
1161 err = cci_pmu_get_hw(cci_pmu);
1163 atomic_inc(active_events);
1164 mutex_unlock(&cci_pmu->reserve_mutex);
1169 err = __hw_perf_event_init(event);
1171 hw_perf_event_destroy(event);
1176 static ssize_t pmu_cpumask_attr_show(struct device *dev,
1177 struct device_attribute *attr, char *buf)
1179 struct dev_ext_attribute *eattr = container_of(attr,
1180 struct dev_ext_attribute, attr);
1181 struct cci_pmu *cci_pmu = eattr->var;
1183 int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
1184 cpumask_pr_args(&cci_pmu->cpus));
1190 static struct dev_ext_attribute pmu_cpumask_attr = {
1191 __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL),
1192 NULL, /* Populated in cci_pmu_init */
1195 static struct attribute *pmu_attrs[] = {
1196 &pmu_cpumask_attr.attr.attr,
1200 static struct attribute_group pmu_attr_group = {
1204 static struct attribute_group pmu_format_attr_group = {
1206 .attrs = NULL, /* Filled in cci_pmu_init_attrs */
1209 static struct attribute_group pmu_event_attr_group = {
1211 .attrs = NULL, /* Filled in cci_pmu_init_attrs */
1214 static const struct attribute_group *pmu_attr_groups[] = {
1216 &pmu_format_attr_group,
1217 &pmu_event_attr_group,
1221 static struct attribute **alloc_attrs(struct platform_device *pdev,
1222 int n, struct dev_ext_attribute *source)
1225 struct attribute **attrs;
1227 /* Alloc n + 1 (for terminating NULL) */
1228 attrs = devm_kcalloc(&pdev->dev, n + 1, sizeof(struct attribute *),
1232 for(i = 0; i < n; i++)
1233 attrs[i] = &source[i].attr.attr;
1237 static int cci_pmu_init_attrs(struct cci_pmu *cci_pmu, struct platform_device *pdev)
1239 const struct cci_pmu_model *model = cci_pmu->model;
1240 struct attribute **attrs;
1243 * All allocations below are managed, hence doesn't need to be
1244 * free'd explicitly in case of an error.
1247 if (model->nevent_attrs) {
1248 attrs = alloc_attrs(pdev, model->nevent_attrs,
1249 model->event_attrs);
1252 pmu_event_attr_group.attrs = attrs;
1254 if (model->nformat_attrs) {
1255 attrs = alloc_attrs(pdev, model->nformat_attrs,
1256 model->format_attrs);
1259 pmu_format_attr_group.attrs = attrs;
1261 pmu_cpumask_attr.var = cci_pmu;
1266 static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
1268 char *name = cci_pmu->model->name;
1272 rc = cci_pmu_init_attrs(cci_pmu, pdev);
1276 cci_pmu->pmu = (struct pmu) {
1277 .name = cci_pmu->model->name,
1278 .task_ctx_nr = perf_invalid_context,
1279 .pmu_enable = cci_pmu_enable,
1280 .pmu_disable = cci_pmu_disable,
1281 .event_init = cci_pmu_event_init,
1284 .start = cci_pmu_start,
1285 .stop = cci_pmu_stop,
1287 .attr_groups = pmu_attr_groups,
1290 cci_pmu->plat_device = pdev;
1291 num_cntrs = pmu_get_max_counters();
1292 if (num_cntrs > cci_pmu->model->num_hw_cntrs) {
1293 dev_warn(&pdev->dev,
1294 "PMU implements more counters(%d) than supported by"
1295 " the model(%d), truncated.",
1296 num_cntrs, cci_pmu->model->num_hw_cntrs);
1297 num_cntrs = cci_pmu->model->num_hw_cntrs;
1299 cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs;
1301 return perf_pmu_register(&cci_pmu->pmu, name, -1);
1304 static int cci_pmu_cpu_notifier(struct notifier_block *self,
1305 unsigned long action, void *hcpu)
1307 struct cci_pmu *cci_pmu = container_of(self,
1308 struct cci_pmu, cpu_nb);
1309 unsigned int cpu = (long)hcpu;
1310 unsigned int target;
1312 switch (action & ~CPU_TASKS_FROZEN) {
1313 case CPU_DOWN_PREPARE:
1314 if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
1316 target = cpumask_any_but(cpu_online_mask, cpu);
1317 if (target < 0) // UP, last CPU
1320 * TODO: migrate context once core races on event->ctx have
1323 cpumask_set_cpu(target, &cci_pmu->cpus);
1331 static struct cci_pmu_model cci_pmu_models[] = {
1332 #ifdef CONFIG_ARM_CCI400_PMU
1335 .fixed_hw_cntrs = 1, /* Cycle counter */
1338 .format_attrs = cci400_pmu_format_attrs,
1339 .nformat_attrs = ARRAY_SIZE(cci400_pmu_format_attrs),
1340 .event_attrs = cci400_r0_pmu_event_attrs,
1341 .nevent_attrs = ARRAY_SIZE(cci400_r0_pmu_event_attrs),
1344 CCI400_R0_SLAVE_PORT_MIN_EV,
1345 CCI400_R0_SLAVE_PORT_MAX_EV,
1348 CCI400_R0_MASTER_PORT_MIN_EV,
1349 CCI400_R0_MASTER_PORT_MAX_EV,
1352 .validate_hw_event = cci400_validate_hw_event,
1353 .get_event_idx = cci400_get_event_idx,
1356 .name = "CCI_400_r1",
1357 .fixed_hw_cntrs = 1, /* Cycle counter */
1360 .format_attrs = cci400_pmu_format_attrs,
1361 .nformat_attrs = ARRAY_SIZE(cci400_pmu_format_attrs),
1362 .event_attrs = cci400_r1_pmu_event_attrs,
1363 .nevent_attrs = ARRAY_SIZE(cci400_r1_pmu_event_attrs),
1366 CCI400_R1_SLAVE_PORT_MIN_EV,
1367 CCI400_R1_SLAVE_PORT_MAX_EV,
1370 CCI400_R1_MASTER_PORT_MIN_EV,
1371 CCI400_R1_MASTER_PORT_MAX_EV,
1374 .validate_hw_event = cci400_validate_hw_event,
1375 .get_event_idx = cci400_get_event_idx,
1378 #ifdef CONFIG_ARM_CCI500_PMU
1381 .fixed_hw_cntrs = 0,
1383 .cntr_size = SZ_64K,
1384 .format_attrs = cci500_pmu_format_attrs,
1385 .nformat_attrs = ARRAY_SIZE(cci500_pmu_format_attrs),
1386 .event_attrs = cci500_pmu_event_attrs,
1387 .nevent_attrs = ARRAY_SIZE(cci500_pmu_event_attrs),
1390 CCI500_SLAVE_PORT_MIN_EV,
1391 CCI500_SLAVE_PORT_MAX_EV,
1394 CCI500_MASTER_PORT_MIN_EV,
1395 CCI500_MASTER_PORT_MAX_EV,
1398 CCI500_GLOBAL_PORT_MIN_EV,
1399 CCI500_GLOBAL_PORT_MAX_EV,
1402 .validate_hw_event = cci500_validate_hw_event,
1407 static const struct of_device_id arm_cci_pmu_matches[] = {
1408 #ifdef CONFIG_ARM_CCI400_PMU
1410 .compatible = "arm,cci-400-pmu",
1414 .compatible = "arm,cci-400-pmu,r0",
1415 .data = &cci_pmu_models[CCI400_R0],
1418 .compatible = "arm,cci-400-pmu,r1",
1419 .data = &cci_pmu_models[CCI400_R1],
1422 #ifdef CONFIG_ARM_CCI500_PMU
1424 .compatible = "arm,cci-500-pmu,r0",
1425 .data = &cci_pmu_models[CCI500_R0],
1431 static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev)
1433 const struct of_device_id *match = of_match_node(arm_cci_pmu_matches,
1440 dev_warn(&pdev->dev, "DEPRECATED compatible property,"
1441 "requires secure access to CCI registers");
1442 return probe_cci_model(pdev);
1445 static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
1449 for (i = 0; i < nr_irqs; i++)
1456 static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev)
1458 struct cci_pmu *cci_pmu;
1459 const struct cci_pmu_model *model;
1462 * All allocations are devm_* hence we don't have to free
1463 * them explicitly on an error, as it would end up in driver
1466 model = get_cci_model(pdev);
1468 dev_warn(&pdev->dev, "CCI PMU version not supported\n");
1469 return ERR_PTR(-ENODEV);
1472 cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*cci_pmu), GFP_KERNEL);
1474 return ERR_PTR(-ENOMEM);
1476 cci_pmu->model = model;
1477 cci_pmu->irqs = devm_kcalloc(&pdev->dev, CCI_PMU_MAX_HW_CNTRS(model),
1478 sizeof(*cci_pmu->irqs), GFP_KERNEL);
1480 return ERR_PTR(-ENOMEM);
1481 cci_pmu->hw_events.events = devm_kcalloc(&pdev->dev,
1482 CCI_PMU_MAX_HW_CNTRS(model),
1483 sizeof(*cci_pmu->hw_events.events),
1485 if (!cci_pmu->hw_events.events)
1486 return ERR_PTR(-ENOMEM);
1487 cci_pmu->hw_events.used_mask = devm_kcalloc(&pdev->dev,
1488 BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)),
1489 sizeof(*cci_pmu->hw_events.used_mask),
1491 if (!cci_pmu->hw_events.used_mask)
1492 return ERR_PTR(-ENOMEM);
1498 static int cci_pmu_probe(struct platform_device *pdev)
1500 struct resource *res;
1501 struct cci_pmu *cci_pmu;
1504 cci_pmu = cci_pmu_alloc(pdev);
1505 if (IS_ERR(cci_pmu))
1506 return PTR_ERR(cci_pmu);
1508 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1509 cci_pmu->base = devm_ioremap_resource(&pdev->dev, res);
1510 if (IS_ERR(cci_pmu->base))
1514 * CCI PMU has one overflow interrupt per counter; but some may be tied
1515 * together to a common interrupt.
1517 cci_pmu->nr_irqs = 0;
1518 for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) {
1519 irq = platform_get_irq(pdev, i);
1523 if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs))
1526 cci_pmu->irqs[cci_pmu->nr_irqs++] = irq;
1530 * Ensure that the device tree has as many interrupts as the number
1533 if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) {
1534 dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
1535 i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model));
1539 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
1540 mutex_init(&cci_pmu->reserve_mutex);
1541 atomic_set(&cci_pmu->active_events, 0);
1542 cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
1544 cci_pmu->cpu_nb = (struct notifier_block) {
1545 .notifier_call = cci_pmu_cpu_notifier,
1547 * to migrate uncore events, our notifier should be executed
1548 * before perf core's notifier.
1550 .priority = CPU_PRI_PERF + 1,
1553 ret = register_cpu_notifier(&cci_pmu->cpu_nb);
1557 ret = cci_pmu_init(cci_pmu, pdev);
1559 unregister_cpu_notifier(&cci_pmu->cpu_nb);
1563 pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
1567 static int cci_platform_probe(struct platform_device *pdev)
1572 return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
1575 static struct platform_driver cci_pmu_driver = {
1577 .name = DRIVER_NAME_PMU,
1578 .of_match_table = arm_cci_pmu_matches,
1580 .probe = cci_pmu_probe,
1583 static struct platform_driver cci_platform_driver = {
1585 .name = DRIVER_NAME,
1586 .of_match_table = arm_cci_matches,
1588 .probe = cci_platform_probe,
1591 static int __init cci_platform_init(void)
1595 ret = platform_driver_register(&cci_pmu_driver);
1599 return platform_driver_register(&cci_platform_driver);
1602 #else /* !CONFIG_ARM_CCI_PMU */
1604 static int __init cci_platform_init(void)
1609 #endif /* CONFIG_ARM_CCI_PMU */
1611 #ifdef CONFIG_ARM_CCI400_PORT_CTRL
1613 #define CCI_PORT_CTRL 0x0
1614 #define CCI_CTRL_STATUS 0xc
1616 #define CCI_ENABLE_SNOOP_REQ 0x1
1617 #define CCI_ENABLE_DVM_REQ 0x2
1618 #define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
1620 enum cci_ace_port_type {
1621 ACE_INVALID_PORT = 0x0,
1626 struct cci_ace_port {
1629 enum cci_ace_port_type type;
1630 struct device_node *dn;
1633 static struct cci_ace_port *ports;
1634 static unsigned int nb_cci_ports;
1642 * Use the port MSB as valid flag, shift can be made dynamic
1643 * by computing number of bits required for port indexes.
1644 * Code disabling CCI cpu ports runs with D-cache invalidated
1645 * and SCTLR bit clear so data accesses must be kept to a minimum
1646 * to improve performance; for now shift is left static to
1647 * avoid one more data access while disabling the CCI port.
1649 #define PORT_VALID_SHIFT 31
1650 #define PORT_VALID (0x1 << PORT_VALID_SHIFT)
1652 static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr)
1654 port->port = PORT_VALID | index;
1655 port->mpidr = mpidr;
1658 static inline bool cpu_port_is_valid(struct cpu_port *port)
1660 return !!(port->port & PORT_VALID);
1663 static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr)
1665 return port->mpidr == (mpidr & MPIDR_HWID_BITMASK);
1668 static struct cpu_port cpu_port[NR_CPUS];
1671 * __cci_ace_get_port - Function to retrieve the port index connected to
1674 * @dn: device node of the device to look-up
1678 * - CCI port index if success
1679 * - -ENODEV if failure
1681 static int __cci_ace_get_port(struct device_node *dn, int type)
1685 struct device_node *cci_portn;
1687 cci_portn = of_parse_phandle(dn, "cci-control-port", 0);
1688 for (i = 0; i < nb_cci_ports; i++) {
1689 ace_match = ports[i].type == type;
1690 if (ace_match && cci_portn == ports[i].dn)
1696 int cci_ace_get_port(struct device_node *dn)
1698 return __cci_ace_get_port(dn, ACE_LITE_PORT);
1700 EXPORT_SYMBOL_GPL(cci_ace_get_port);
1702 static void cci_ace_init_ports(void)
1705 struct device_node *cpun;
1708 * Port index look-up speeds up the function disabling ports by CPU,
1709 * since the logical to port index mapping is done once and does
1710 * not change after system boot.
1711 * The stashed index array is initialized for all possible CPUs
1714 for_each_possible_cpu(cpu) {
1715 /* too early to use cpu->of_node */
1716 cpun = of_get_cpu_node(cpu, NULL);
1718 if (WARN(!cpun, "Missing cpu device node\n"))
1721 port = __cci_ace_get_port(cpun, ACE_PORT);
1725 init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
1728 for_each_possible_cpu(cpu) {
1729 WARN(!cpu_port_is_valid(&cpu_port[cpu]),
1730 "CPU %u does not have an associated CCI port\n",
1735 * Functions to enable/disable a CCI interconnect slave port
1737 * They are called by low-level power management code to disable slave
1738 * interfaces snoops and DVM broadcast.
1739 * Since they may execute with cache data allocation disabled and
1740 * after the caches have been cleaned and invalidated the functions provide
1741 * no explicit locking since they may run with D-cache disabled, so normal
1742 * cacheable kernel locks based on ldrex/strex may not work.
1743 * Locking has to be provided by BSP implementations to ensure proper
1748 * cci_port_control() - function to control a CCI port
1750 * @port: index of the port to setup
1751 * @enable: if true enables the port, if false disables it
1753 static void notrace cci_port_control(unsigned int port, bool enable)
1755 void __iomem *base = ports[port].base;
1757 writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL);
1759 * This function is called from power down procedures
1760 * and must not execute any instruction that might
1761 * cause the processor to be put in a quiescent state
1762 * (eg wfi). Hence, cpu_relax() can not be added to this
1763 * read loop to optimize power, since it might hide possibly
1764 * disruptive operations.
1766 while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1)
1771 * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
1774 * @mpidr: mpidr of the CPU whose CCI port should be disabled
1776 * Disabling a CCI port for a CPU implies disabling the CCI port
1777 * controlling that CPU cluster. Code disabling CPU CCI ports
1778 * must make sure that the CPU running the code is the last active CPU
1779 * in the cluster ie all other CPUs are quiescent in a low power state.
1783 * -ENODEV on port look-up failure
1785 int notrace cci_disable_port_by_cpu(u64 mpidr)
1789 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1790 is_valid = cpu_port_is_valid(&cpu_port[cpu]);
1791 if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
1792 cci_port_control(cpu_port[cpu].port, false);
1798 EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu);
1801 * cci_enable_port_for_self() - enable a CCI port for calling CPU
1803 * Enabling a CCI port for the calling CPU implies enabling the CCI
1804 * port controlling that CPU's cluster. Caller must make sure that the
1805 * CPU running the code is the first active CPU in the cluster and all
1806 * other CPUs are quiescent in a low power state or waiting for this CPU
1807 * to complete the CCI initialization.
1809 * Because this is called when the MMU is still off and with no stack,
1810 * the code must be position independent and ideally rely on callee
1811 * clobbered registers only. To achieve this we must code this function
1812 * entirely in assembler.
1814 * On success this returns with the proper CCI port enabled. In case of
1815 * any failure this never returns as the inability to enable the CCI is
1816 * fatal and there is no possible recovery at this stage.
1818 asmlinkage void __naked cci_enable_port_for_self(void)
1822 " mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n"
1823 " and r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n"
1826 " add r1, r1, r2 @ &cpu_port \n"
1827 " add ip, r1, %[sizeof_cpu_port] \n"
1829 /* Loop over the cpu_port array looking for a matching MPIDR */
1830 "1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
1831 " cmp r2, r0 @ compare MPIDR \n"
1834 /* Found a match, now test port validity */
1835 " ldr r3, [r1, %[offsetof_cpu_port_port]] \n"
1836 " tst r3, #"__stringify(PORT_VALID)" \n"
1839 /* no match, loop with the next cpu_port entry */
1840 "2: add r1, r1, %[sizeof_struct_cpu_port] \n"
1841 " cmp r1, ip @ done? \n"
1844 /* CCI port not found -- cheaply try to stall this CPU */
1845 "cci_port_not_found: \n"
1848 " b cci_port_not_found \n"
1850 /* Use matched port index to look up the corresponding ports entry */
1851 "3: bic r3, r3, #"__stringify(PORT_VALID)" \n"
1853 " ldmia r0, {r1, r2} \n"
1854 " sub r1, r1, r0 @ virt - phys \n"
1855 " ldr r0, [r0, r2] @ *(&ports) \n"
1856 " mov r2, %[sizeof_struct_ace_port] \n"
1857 " mla r0, r2, r3, r0 @ &ports[index] \n"
1858 " sub r0, r0, r1 @ virt_to_phys() \n"
1860 /* Enable the CCI port */
1861 " ldr r0, [r0, %[offsetof_port_phys]] \n"
1862 " mov r3, %[cci_enable_req]\n"
1863 " str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
1865 /* poll the status reg for completion */
1868 " ldr r0, [r0, r1] @ cci_ctrl_base \n"
1869 "4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
1870 " tst r1, %[cci_control_status_bits] \n"
1877 "5: .word cpu_port - . \n"
1879 " .word ports - 6b \n"
1880 "7: .word cci_ctrl_phys - . \n"
1882 [sizeof_cpu_port] "i" (sizeof(cpu_port)),
1883 [cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
1884 [cci_control_status_bits] "i" cpu_to_le32(1),
1886 [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
1888 [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4),
1890 [offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)),
1891 [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
1892 [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
1893 [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
1899 * __cci_control_port_by_device() - function to control a CCI port by device
1902 * @dn: device node pointer of the device whose CCI port should be
1904 * @enable: if true enables the port, if false disables it
1908 * -ENODEV on port look-up failure
1910 int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
1917 port = __cci_ace_get_port(dn, ACE_LITE_PORT);
1918 if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n",
1921 cci_port_control(port, enable);
1924 EXPORT_SYMBOL_GPL(__cci_control_port_by_device);
1927 * __cci_control_port_by_index() - function to control a CCI port by port index
1929 * @port: port index previously retrieved with cci_ace_get_port()
1930 * @enable: if true enables the port, if false disables it
1934 * -ENODEV on port index out of range
1935 * -EPERM if operation carried out on an ACE PORT
1937 int notrace __cci_control_port_by_index(u32 port, bool enable)
1939 if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT)
1942 * CCI control for ports connected to CPUS is extremely fragile
1943 * and must be made to go through a specific and controlled
1944 * interface (ie cci_disable_port_by_cpu(); control by general purpose
1945 * indexing is therefore disabled for ACE ports.
1947 if (ports[port].type == ACE_PORT)
1950 cci_port_control(port, enable);
1953 EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
1955 static const struct of_device_id arm_cci_ctrl_if_matches[] = {
1956 {.compatible = "arm,cci-400-ctrl-if", },
1960 static int cci_probe_ports(struct device_node *np)
1962 struct cci_nb_ports const *cci_config;
1963 int ret, i, nb_ace = 0, nb_ace_lite = 0;
1964 struct device_node *cp;
1965 struct resource res;
1966 const char *match_str;
1970 cci_config = of_match_node(arm_cci_matches, np)->data;
1974 nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
1976 ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
1980 for_each_child_of_node(np, cp) {
1981 if (!of_match_node(arm_cci_ctrl_if_matches, cp))
1984 i = nb_ace + nb_ace_lite;
1986 if (i >= nb_cci_ports)
1989 if (of_property_read_string(cp, "interface-type",
1991 WARN(1, "node %s missing interface-type property\n",
1995 is_ace = strcmp(match_str, "ace") == 0;
1996 if (!is_ace && strcmp(match_str, "ace-lite")) {
1997 WARN(1, "node %s containing invalid interface-type property, skipping it\n",
2002 ret = of_address_to_resource(cp, 0, &res);
2004 ports[i].base = ioremap(res.start, resource_size(&res));
2005 ports[i].phys = res.start;
2007 if (ret || !ports[i].base) {
2008 WARN(1, "unable to ioremap CCI port %d\n", i);
2013 if (WARN_ON(nb_ace >= cci_config->nb_ace))
2015 ports[i].type = ACE_PORT;
2018 if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite))
2020 ports[i].type = ACE_LITE_PORT;
2026 /* initialize a stashed array of ACE ports to speed-up look-up */
2027 cci_ace_init_ports();
2030 * Multi-cluster systems may need this data when non-coherent, during
2031 * cluster power-up/power-down. Make sure it reaches main memory.
2033 sync_cache_w(&cci_ctrl_base);
2034 sync_cache_w(&cci_ctrl_phys);
2035 sync_cache_w(&ports);
2036 sync_cache_w(&cpu_port);
2037 __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
2038 pr_info("ARM CCI driver probed\n");
2042 #else /* !CONFIG_ARM_CCI400_PORT_CTRL */
2043 static inline int cci_probe_ports(struct device_node *np)
2047 #endif /* CONFIG_ARM_CCI400_PORT_CTRL */
2049 static int cci_probe(void)
2052 struct device_node *np;
2053 struct resource res;
2055 np = of_find_matching_node(NULL, arm_cci_matches);
2056 if(!np || !of_device_is_available(np))
2059 ret = of_address_to_resource(np, 0, &res);
2061 cci_ctrl_base = ioremap(res.start, resource_size(&res));
2062 cci_ctrl_phys = res.start;
2064 if (ret || !cci_ctrl_base) {
2065 WARN(1, "unable to ioremap CCI ctrl\n");
2069 return cci_probe_ports(np);
2072 static int cci_init_status = -EAGAIN;
2073 static DEFINE_MUTEX(cci_probing);
2075 static int cci_init(void)
2077 if (cci_init_status != -EAGAIN)
2078 return cci_init_status;
2080 mutex_lock(&cci_probing);
2081 if (cci_init_status == -EAGAIN)
2082 cci_init_status = cci_probe();
2083 mutex_unlock(&cci_probing);
2084 return cci_init_status;
2088 * To sort out early init calls ordering a helper function is provided to
2089 * check if the CCI driver has beed initialized. Function check if the driver
2090 * has been initialized, if not it calls the init function that probes
2091 * the driver and updates the return value.
2093 bool cci_probed(void)
2095 return cci_init() == 0;
2097 EXPORT_SYMBOL_GPL(cci_probed);
2099 early_initcall(cci_init);
2100 core_initcall(cci_platform_init);
2101 MODULE_LICENSE("GPL");
2102 MODULE_DESCRIPTION("ARM CCI support");