1 /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/kernel.h>
14 #include <linux/moduleparam.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
20 #include <linux/err.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
24 #include <linux/smp.h>
25 #include <linux/sysfs.h>
26 #include <linux/stat.h>
27 #include <linux/clk.h>
28 #include <linux/cpu.h>
29 #include <linux/coresight.h>
30 #include <linux/pm_wakeup.h>
31 #include <linux/amba/bus.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/pm_runtime.h>
35 #include <asm/sections.h>
37 #include "coresight-etm4x.h"
39 static int boot_enable;
40 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
42 /* The number of ETMv4 currently registered */
43 static int etm4_count;
44 static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
46 static void etm4_os_unlock(void *info)
48 struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
50 /* Writing any value to ETMOSLAR unlocks the trace registers */
51 writel_relaxed(0x0, drvdata->base + TRCOSLAR);
55 static bool etm4_arch_supported(u8 arch)
66 static int etm4_cpu_id(struct coresight_device *csdev)
68 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
73 static int etm4_trace_id(struct coresight_device *csdev)
75 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
80 return drvdata->trcid;
82 spin_lock_irqsave(&drvdata->spinlock, flags);
84 CS_UNLOCK(drvdata->base);
85 trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
86 trace_id &= ETM_TRACEID_MASK;
87 CS_LOCK(drvdata->base);
89 spin_unlock_irqrestore(&drvdata->spinlock, flags);
94 static void etm4_enable_hw(void *info)
97 struct etmv4_drvdata *drvdata = info;
99 CS_UNLOCK(drvdata->base);
101 etm4_os_unlock(drvdata);
103 /* Disable the trace unit before programming trace registers */
104 writel_relaxed(0, drvdata->base + TRCPRGCTLR);
106 /* wait for TRCSTATR.IDLE to go up */
107 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
108 dev_err(drvdata->dev,
109 "timeout observed when probing at offset %#x\n",
112 writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR);
113 writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR);
114 /* nothing specific implemented */
115 writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
116 writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R);
117 writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R);
118 writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR);
119 writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR);
120 writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR);
121 writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR);
122 writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR);
123 writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
124 writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR);
125 writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR);
126 writel_relaxed(drvdata->vissctlr,
127 drvdata->base + TRCVISSCTLR);
128 writel_relaxed(drvdata->vipcssctlr,
129 drvdata->base + TRCVIPCSSCTLR);
130 for (i = 0; i < drvdata->nrseqstate - 1; i++)
131 writel_relaxed(drvdata->seq_ctrl[i],
132 drvdata->base + TRCSEQEVRn(i));
133 writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR);
134 writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR);
135 writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR);
136 for (i = 0; i < drvdata->nr_cntr; i++) {
137 writel_relaxed(drvdata->cntrldvr[i],
138 drvdata->base + TRCCNTRLDVRn(i));
139 writel_relaxed(drvdata->cntr_ctrl[i],
140 drvdata->base + TRCCNTCTLRn(i));
141 writel_relaxed(drvdata->cntr_val[i],
142 drvdata->base + TRCCNTVRn(i));
145 /* Resource selector pair 0 is always implemented and reserved */
146 for (i = 2; i < drvdata->nr_resource * 2; i++)
147 writel_relaxed(drvdata->res_ctrl[i],
148 drvdata->base + TRCRSCTLRn(i));
150 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
151 writel_relaxed(drvdata->ss_ctrl[i],
152 drvdata->base + TRCSSCCRn(i));
153 writel_relaxed(drvdata->ss_status[i],
154 drvdata->base + TRCSSCSRn(i));
155 writel_relaxed(drvdata->ss_pe_cmp[i],
156 drvdata->base + TRCSSPCICRn(i));
158 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
159 writeq_relaxed(drvdata->addr_val[i],
160 drvdata->base + TRCACVRn(i));
161 writeq_relaxed(drvdata->addr_acc[i],
162 drvdata->base + TRCACATRn(i));
164 for (i = 0; i < drvdata->numcidc; i++)
165 writeq_relaxed(drvdata->ctxid_pid[i],
166 drvdata->base + TRCCIDCVRn(i));
167 writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
168 writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
170 for (i = 0; i < drvdata->numvmidc; i++)
171 writeq_relaxed(drvdata->vmid_val[i],
172 drvdata->base + TRCVMIDCVRn(i));
173 writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
174 writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
176 /* Enable the trace unit */
177 writel_relaxed(1, drvdata->base + TRCPRGCTLR);
179 /* wait for TRCSTATR.IDLE to go back down to '0' */
180 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
181 dev_err(drvdata->dev,
182 "timeout observed when probing at offset %#x\n",
185 CS_LOCK(drvdata->base);
187 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
190 static int etm4_enable(struct coresight_device *csdev)
192 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
195 spin_lock(&drvdata->spinlock);
198 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
199 * ensures that register writes occur when cpu is powered.
201 ret = smp_call_function_single(drvdata->cpu,
202 etm4_enable_hw, drvdata, 1);
205 drvdata->enable = true;
206 drvdata->sticky_enable = true;
208 spin_unlock(&drvdata->spinlock);
210 dev_info(drvdata->dev, "ETM tracing enabled\n");
213 spin_unlock(&drvdata->spinlock);
217 static void etm4_disable_hw(void *info)
220 struct etmv4_drvdata *drvdata = info;
222 CS_UNLOCK(drvdata->base);
224 control = readl_relaxed(drvdata->base + TRCPRGCTLR);
226 /* EN, bit[0] Trace unit enable bit */
229 /* make sure everything completes before disabling */
232 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
234 CS_LOCK(drvdata->base);
236 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
239 static void etm4_disable(struct coresight_device *csdev)
241 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
244 * Taking hotplug lock here protects from clocks getting disabled
245 * with tracing being left on (crash scenario) if user disable occurs
246 * after cpu online mask indicates the cpu is offline but before the
247 * DYING hotplug callback is serviced by the ETM driver.
250 spin_lock(&drvdata->spinlock);
253 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
254 * ensures that register writes occur when cpu is powered.
256 smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
257 drvdata->enable = false;
259 spin_unlock(&drvdata->spinlock);
262 dev_info(drvdata->dev, "ETM tracing disabled\n");
265 static const struct coresight_ops_source etm4_source_ops = {
266 .cpu_id = etm4_cpu_id,
267 .trace_id = etm4_trace_id,
268 .enable = etm4_enable,
269 .disable = etm4_disable,
272 static const struct coresight_ops etm4_cs_ops = {
273 .source_ops = &etm4_source_ops,
276 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
278 u8 idx = drvdata->addr_idx;
281 * TRCACATRn.TYPE bit[1:0]: type of comparison
282 * the trace unit performs
284 if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
289 * We are performing instruction address comparison. Set the
290 * relevant bit of ViewInst Include/Exclude Control register
291 * for corresponding address comparator pair.
293 if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
294 drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
297 if (exclude == true) {
299 * Set exclude bit and unset the include bit
300 * corresponding to comparator pair
302 drvdata->viiectlr |= BIT(idx / 2 + 16);
303 drvdata->viiectlr &= ~BIT(idx / 2);
306 * Set include bit and unset exclude bit
307 * corresponding to comparator pair
309 drvdata->viiectlr |= BIT(idx / 2);
310 drvdata->viiectlr &= ~BIT(idx / 2 + 16);
316 static ssize_t nr_pe_cmp_show(struct device *dev,
317 struct device_attribute *attr,
321 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
323 val = drvdata->nr_pe_cmp;
324 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
326 static DEVICE_ATTR_RO(nr_pe_cmp);
328 static ssize_t nr_addr_cmp_show(struct device *dev,
329 struct device_attribute *attr,
333 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
335 val = drvdata->nr_addr_cmp;
336 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
338 static DEVICE_ATTR_RO(nr_addr_cmp);
340 static ssize_t nr_cntr_show(struct device *dev,
341 struct device_attribute *attr,
345 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
347 val = drvdata->nr_cntr;
348 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
350 static DEVICE_ATTR_RO(nr_cntr);
352 static ssize_t nr_ext_inp_show(struct device *dev,
353 struct device_attribute *attr,
357 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
359 val = drvdata->nr_ext_inp;
360 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
362 static DEVICE_ATTR_RO(nr_ext_inp);
364 static ssize_t numcidc_show(struct device *dev,
365 struct device_attribute *attr,
369 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
371 val = drvdata->numcidc;
372 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
374 static DEVICE_ATTR_RO(numcidc);
376 static ssize_t numvmidc_show(struct device *dev,
377 struct device_attribute *attr,
381 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
383 val = drvdata->numvmidc;
384 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
386 static DEVICE_ATTR_RO(numvmidc);
388 static ssize_t nrseqstate_show(struct device *dev,
389 struct device_attribute *attr,
393 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
395 val = drvdata->nrseqstate;
396 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
398 static DEVICE_ATTR_RO(nrseqstate);
400 static ssize_t nr_resource_show(struct device *dev,
401 struct device_attribute *attr,
405 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
407 val = drvdata->nr_resource;
408 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
410 static DEVICE_ATTR_RO(nr_resource);
412 static ssize_t nr_ss_cmp_show(struct device *dev,
413 struct device_attribute *attr,
417 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
419 val = drvdata->nr_ss_cmp;
420 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
422 static DEVICE_ATTR_RO(nr_ss_cmp);
424 static ssize_t reset_store(struct device *dev,
425 struct device_attribute *attr,
426 const char *buf, size_t size)
430 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
432 if (kstrtoul(buf, 16, &val))
435 spin_lock(&drvdata->spinlock);
439 /* Disable data tracing: do not trace load and store data transfers */
440 drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
441 drvdata->cfg &= ~(BIT(1) | BIT(2));
443 /* Disable data value and data address tracing */
444 drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
445 ETM_MODE_DATA_TRACE_VAL);
446 drvdata->cfg &= ~(BIT(16) | BIT(17));
448 /* Disable all events tracing */
449 drvdata->eventctrl0 = 0x0;
450 drvdata->eventctrl1 = 0x0;
452 /* Disable timestamp event */
453 drvdata->ts_ctrl = 0x0;
455 /* Disable stalling */
456 drvdata->stall_ctrl = 0x0;
458 /* Reset trace synchronization period to 2^8 = 256 bytes*/
459 if (drvdata->syncpr == false)
460 drvdata->syncfreq = 0x8;
463 * Enable ViewInst to trace everything with start-stop logic in
464 * started state. ARM recommends start-stop logic is set before
467 drvdata->vinst_ctrl |= BIT(0);
468 if (drvdata->nr_addr_cmp == true) {
469 drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP;
470 /* SSSTATUS, bit[9] */
471 drvdata->vinst_ctrl |= BIT(9);
474 /* No address range filtering for ViewInst */
475 drvdata->viiectlr = 0x0;
477 /* No start-stop filtering for ViewInst */
478 drvdata->vissctlr = 0x0;
480 /* Disable seq events */
481 for (i = 0; i < drvdata->nrseqstate-1; i++)
482 drvdata->seq_ctrl[i] = 0x0;
483 drvdata->seq_rst = 0x0;
484 drvdata->seq_state = 0x0;
486 /* Disable external input events */
487 drvdata->ext_inp = 0x0;
489 drvdata->cntr_idx = 0x0;
490 for (i = 0; i < drvdata->nr_cntr; i++) {
491 drvdata->cntrldvr[i] = 0x0;
492 drvdata->cntr_ctrl[i] = 0x0;
493 drvdata->cntr_val[i] = 0x0;
496 /* Resource selector pair 0 is always implemented and reserved */
497 drvdata->res_idx = 0x2;
498 for (i = 2; i < drvdata->nr_resource * 2; i++)
499 drvdata->res_ctrl[i] = 0x0;
501 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
502 drvdata->ss_ctrl[i] = 0x0;
503 drvdata->ss_pe_cmp[i] = 0x0;
506 drvdata->addr_idx = 0x0;
507 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
508 drvdata->addr_val[i] = 0x0;
509 drvdata->addr_acc[i] = 0x0;
510 drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
513 drvdata->ctxid_idx = 0x0;
514 for (i = 0; i < drvdata->numcidc; i++) {
515 drvdata->ctxid_pid[i] = 0x0;
516 drvdata->ctxid_vpid[i] = 0x0;
519 drvdata->ctxid_mask0 = 0x0;
520 drvdata->ctxid_mask1 = 0x0;
522 drvdata->vmid_idx = 0x0;
523 for (i = 0; i < drvdata->numvmidc; i++)
524 drvdata->vmid_val[i] = 0x0;
525 drvdata->vmid_mask0 = 0x0;
526 drvdata->vmid_mask1 = 0x0;
528 drvdata->trcid = drvdata->cpu + 1;
529 spin_unlock(&drvdata->spinlock);
532 static DEVICE_ATTR_WO(reset);
534 static ssize_t mode_show(struct device *dev,
535 struct device_attribute *attr,
539 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
542 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
545 static ssize_t mode_store(struct device *dev,
546 struct device_attribute *attr,
547 const char *buf, size_t size)
549 unsigned long val, mode;
550 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
552 if (kstrtoul(buf, 16, &val))
555 spin_lock(&drvdata->spinlock);
556 drvdata->mode = val & ETMv4_MODE_ALL;
558 if (drvdata->mode & ETM_MODE_EXCLUDE)
559 etm4_set_mode_exclude(drvdata, true);
561 etm4_set_mode_exclude(drvdata, false);
563 if (drvdata->instrp0 == true) {
564 /* start by clearing instruction P0 field */
565 drvdata->cfg &= ~(BIT(1) | BIT(2));
566 if (drvdata->mode & ETM_MODE_LOAD)
567 /* 0b01 Trace load instructions as P0 instructions */
568 drvdata->cfg |= BIT(1);
569 if (drvdata->mode & ETM_MODE_STORE)
570 /* 0b10 Trace store instructions as P0 instructions */
571 drvdata->cfg |= BIT(2);
572 if (drvdata->mode & ETM_MODE_LOAD_STORE)
574 * 0b11 Trace load and store instructions
577 drvdata->cfg |= BIT(1) | BIT(2);
580 /* bit[3], Branch broadcast mode */
581 if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
582 drvdata->cfg |= BIT(3);
584 drvdata->cfg &= ~BIT(3);
586 /* bit[4], Cycle counting instruction trace bit */
587 if ((drvdata->mode & ETMv4_MODE_CYCACC) &&
588 (drvdata->trccci == true))
589 drvdata->cfg |= BIT(4);
591 drvdata->cfg &= ~BIT(4);
593 /* bit[6], Context ID tracing bit */
594 if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
595 drvdata->cfg |= BIT(6);
597 drvdata->cfg &= ~BIT(6);
599 if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
600 drvdata->cfg |= BIT(7);
602 drvdata->cfg &= ~BIT(7);
604 /* bits[10:8], Conditional instruction tracing bit */
605 mode = ETM_MODE_COND(drvdata->mode);
606 if (drvdata->trccond == true) {
607 drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
608 drvdata->cfg |= mode << 8;
611 /* bit[11], Global timestamp tracing bit */
612 if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
613 drvdata->cfg |= BIT(11);
615 drvdata->cfg &= ~BIT(11);
617 /* bit[12], Return stack enable bit */
618 if ((drvdata->mode & ETM_MODE_RETURNSTACK) &&
619 (drvdata->retstack == true))
620 drvdata->cfg |= BIT(12);
622 drvdata->cfg &= ~BIT(12);
624 /* bits[14:13], Q element enable field */
625 mode = ETM_MODE_QELEM(drvdata->mode);
626 /* start by clearing QE bits */
627 drvdata->cfg &= ~(BIT(13) | BIT(14));
628 /* if supported, Q elements with instruction counts are enabled */
629 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
630 drvdata->cfg |= BIT(13);
632 * if supported, Q elements with and without instruction
635 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
636 drvdata->cfg |= BIT(14);
638 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
639 if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) &&
640 (drvdata->atbtrig == true))
641 drvdata->eventctrl1 |= BIT(11);
643 drvdata->eventctrl1 &= ~BIT(11);
645 /* bit[12], Low-power state behavior override bit */
646 if ((drvdata->mode & ETM_MODE_LPOVERRIDE) &&
647 (drvdata->lpoverride == true))
648 drvdata->eventctrl1 |= BIT(12);
650 drvdata->eventctrl1 &= ~BIT(12);
652 /* bit[8], Instruction stall bit */
653 if (drvdata->mode & ETM_MODE_ISTALL_EN)
654 drvdata->stall_ctrl |= BIT(8);
656 drvdata->stall_ctrl &= ~BIT(8);
658 /* bit[10], Prioritize instruction trace bit */
659 if (drvdata->mode & ETM_MODE_INSTPRIO)
660 drvdata->stall_ctrl |= BIT(10);
662 drvdata->stall_ctrl &= ~BIT(10);
664 /* bit[13], Trace overflow prevention bit */
665 if ((drvdata->mode & ETM_MODE_NOOVERFLOW) &&
666 (drvdata->nooverflow == true))
667 drvdata->stall_ctrl |= BIT(13);
669 drvdata->stall_ctrl &= ~BIT(13);
671 /* bit[9] Start/stop logic control bit */
672 if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP)
673 drvdata->vinst_ctrl |= BIT(9);
675 drvdata->vinst_ctrl &= ~BIT(9);
677 /* bit[10], Whether a trace unit must trace a Reset exception */
678 if (drvdata->mode & ETM_MODE_TRACE_RESET)
679 drvdata->vinst_ctrl |= BIT(10);
681 drvdata->vinst_ctrl &= ~BIT(10);
683 /* bit[11], Whether a trace unit must trace a system error exception */
684 if ((drvdata->mode & ETM_MODE_TRACE_ERR) &&
685 (drvdata->trc_error == true))
686 drvdata->vinst_ctrl |= BIT(11);
688 drvdata->vinst_ctrl &= ~BIT(11);
690 spin_unlock(&drvdata->spinlock);
693 static DEVICE_ATTR_RW(mode);
695 static ssize_t pe_show(struct device *dev,
696 struct device_attribute *attr,
700 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
702 val = drvdata->pe_sel;
703 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
706 static ssize_t pe_store(struct device *dev,
707 struct device_attribute *attr,
708 const char *buf, size_t size)
711 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
713 if (kstrtoul(buf, 16, &val))
716 spin_lock(&drvdata->spinlock);
717 if (val > drvdata->nr_pe) {
718 spin_unlock(&drvdata->spinlock);
722 drvdata->pe_sel = val;
723 spin_unlock(&drvdata->spinlock);
726 static DEVICE_ATTR_RW(pe);
728 static ssize_t event_show(struct device *dev,
729 struct device_attribute *attr,
733 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
735 val = drvdata->eventctrl0;
736 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
739 static ssize_t event_store(struct device *dev,
740 struct device_attribute *attr,
741 const char *buf, size_t size)
744 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
746 if (kstrtoul(buf, 16, &val))
749 spin_lock(&drvdata->spinlock);
750 switch (drvdata->nr_event) {
752 /* EVENT0, bits[7:0] */
753 drvdata->eventctrl0 = val & 0xFF;
756 /* EVENT1, bits[15:8] */
757 drvdata->eventctrl0 = val & 0xFFFF;
760 /* EVENT2, bits[23:16] */
761 drvdata->eventctrl0 = val & 0xFFFFFF;
764 /* EVENT3, bits[31:24] */
765 drvdata->eventctrl0 = val;
770 spin_unlock(&drvdata->spinlock);
773 static DEVICE_ATTR_RW(event);
775 static ssize_t event_instren_show(struct device *dev,
776 struct device_attribute *attr,
780 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
782 val = BMVAL(drvdata->eventctrl1, 0, 3);
783 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
786 static ssize_t event_instren_store(struct device *dev,
787 struct device_attribute *attr,
788 const char *buf, size_t size)
791 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
793 if (kstrtoul(buf, 16, &val))
796 spin_lock(&drvdata->spinlock);
797 /* start by clearing all instruction event enable bits */
798 drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
799 switch (drvdata->nr_event) {
801 /* generate Event element for event 1 */
802 drvdata->eventctrl1 |= val & BIT(1);
805 /* generate Event element for event 1 and 2 */
806 drvdata->eventctrl1 |= val & (BIT(0) | BIT(1));
809 /* generate Event element for event 1, 2 and 3 */
810 drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
813 /* generate Event element for all 4 events */
814 drvdata->eventctrl1 |= val & 0xF;
819 spin_unlock(&drvdata->spinlock);
822 static DEVICE_ATTR_RW(event_instren);
824 static ssize_t event_ts_show(struct device *dev,
825 struct device_attribute *attr,
829 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
831 val = drvdata->ts_ctrl;
832 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
835 static ssize_t event_ts_store(struct device *dev,
836 struct device_attribute *attr,
837 const char *buf, size_t size)
840 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
842 if (kstrtoul(buf, 16, &val))
844 if (!drvdata->ts_size)
847 drvdata->ts_ctrl = val & ETMv4_EVENT_MASK;
850 static DEVICE_ATTR_RW(event_ts);
852 static ssize_t syncfreq_show(struct device *dev,
853 struct device_attribute *attr,
857 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
859 val = drvdata->syncfreq;
860 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
863 static ssize_t syncfreq_store(struct device *dev,
864 struct device_attribute *attr,
865 const char *buf, size_t size)
868 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
870 if (kstrtoul(buf, 16, &val))
872 if (drvdata->syncpr == true)
875 drvdata->syncfreq = val & ETMv4_SYNC_MASK;
878 static DEVICE_ATTR_RW(syncfreq);
880 static ssize_t cyc_threshold_show(struct device *dev,
881 struct device_attribute *attr,
885 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
887 val = drvdata->ccctlr;
888 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
891 static ssize_t cyc_threshold_store(struct device *dev,
892 struct device_attribute *attr,
893 const char *buf, size_t size)
896 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
898 if (kstrtoul(buf, 16, &val))
900 if (val < drvdata->ccitmin)
903 drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
906 static DEVICE_ATTR_RW(cyc_threshold);
908 static ssize_t bb_ctrl_show(struct device *dev,
909 struct device_attribute *attr,
913 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
915 val = drvdata->bb_ctrl;
916 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
919 static ssize_t bb_ctrl_store(struct device *dev,
920 struct device_attribute *attr,
921 const char *buf, size_t size)
924 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
926 if (kstrtoul(buf, 16, &val))
928 if (drvdata->trcbb == false)
930 if (!drvdata->nr_addr_cmp)
933 * Bit[7:0] selects which address range comparator is used for
934 * branch broadcast control.
936 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
939 drvdata->bb_ctrl = val;
942 static DEVICE_ATTR_RW(bb_ctrl);
944 static ssize_t event_vinst_show(struct device *dev,
945 struct device_attribute *attr,
949 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
951 val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK;
952 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
955 static ssize_t event_vinst_store(struct device *dev,
956 struct device_attribute *attr,
957 const char *buf, size_t size)
960 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
962 if (kstrtoul(buf, 16, &val))
965 spin_lock(&drvdata->spinlock);
966 val &= ETMv4_EVENT_MASK;
967 drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK;
968 drvdata->vinst_ctrl |= val;
969 spin_unlock(&drvdata->spinlock);
972 static DEVICE_ATTR_RW(event_vinst);
974 static ssize_t s_exlevel_vinst_show(struct device *dev,
975 struct device_attribute *attr,
979 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
981 val = BMVAL(drvdata->vinst_ctrl, 16, 19);
982 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
985 static ssize_t s_exlevel_vinst_store(struct device *dev,
986 struct device_attribute *attr,
987 const char *buf, size_t size)
990 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
992 if (kstrtoul(buf, 16, &val))
995 spin_lock(&drvdata->spinlock);
996 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
997 drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
998 /* enable instruction tracing for corresponding exception level */
999 val &= drvdata->s_ex_level;
1000 drvdata->vinst_ctrl |= (val << 16);
1001 spin_unlock(&drvdata->spinlock);
1004 static DEVICE_ATTR_RW(s_exlevel_vinst);
1006 static ssize_t ns_exlevel_vinst_show(struct device *dev,
1007 struct device_attribute *attr,
1011 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1013 /* EXLEVEL_NS, bits[23:20] */
1014 val = BMVAL(drvdata->vinst_ctrl, 20, 23);
1015 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1018 static ssize_t ns_exlevel_vinst_store(struct device *dev,
1019 struct device_attribute *attr,
1020 const char *buf, size_t size)
1023 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1025 if (kstrtoul(buf, 16, &val))
1028 spin_lock(&drvdata->spinlock);
1029 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
1030 drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
1031 /* enable instruction tracing for corresponding exception level */
1032 val &= drvdata->ns_ex_level;
1033 drvdata->vinst_ctrl |= (val << 20);
1034 spin_unlock(&drvdata->spinlock);
1037 static DEVICE_ATTR_RW(ns_exlevel_vinst);
1039 static ssize_t addr_idx_show(struct device *dev,
1040 struct device_attribute *attr,
1044 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1046 val = drvdata->addr_idx;
1047 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1050 static ssize_t addr_idx_store(struct device *dev,
1051 struct device_attribute *attr,
1052 const char *buf, size_t size)
1055 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1057 if (kstrtoul(buf, 16, &val))
1059 if (val >= drvdata->nr_addr_cmp * 2)
1063 * Use spinlock to ensure index doesn't change while it gets
1064 * dereferenced multiple times within a spinlock block elsewhere.
1066 spin_lock(&drvdata->spinlock);
1067 drvdata->addr_idx = val;
1068 spin_unlock(&drvdata->spinlock);
1071 static DEVICE_ATTR_RW(addr_idx);
1073 static ssize_t addr_instdatatype_show(struct device *dev,
1074 struct device_attribute *attr,
1079 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1081 spin_lock(&drvdata->spinlock);
1082 idx = drvdata->addr_idx;
1083 val = BMVAL(drvdata->addr_acc[idx], 0, 1);
1084 len = scnprintf(buf, PAGE_SIZE, "%s\n",
1085 val == ETM_INSTR_ADDR ? "instr" :
1086 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
1087 (val == ETM_DATA_STORE_ADDR ? "data_store" :
1088 "data_load_store")));
1089 spin_unlock(&drvdata->spinlock);
1093 static ssize_t addr_instdatatype_store(struct device *dev,
1094 struct device_attribute *attr,
1095 const char *buf, size_t size)
1099 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1101 if (strlen(buf) >= 20)
1103 if (sscanf(buf, "%s", str) != 1)
1106 spin_lock(&drvdata->spinlock);
1107 idx = drvdata->addr_idx;
1108 if (!strcmp(str, "instr"))
1109 /* TYPE, bits[1:0] */
1110 drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1));
1112 spin_unlock(&drvdata->spinlock);
1115 static DEVICE_ATTR_RW(addr_instdatatype);
1117 static ssize_t addr_single_show(struct device *dev,
1118 struct device_attribute *attr,
1123 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1125 idx = drvdata->addr_idx;
1126 spin_lock(&drvdata->spinlock);
1127 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1128 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1129 spin_unlock(&drvdata->spinlock);
1132 val = (unsigned long)drvdata->addr_val[idx];
1133 spin_unlock(&drvdata->spinlock);
1134 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1137 static ssize_t addr_single_store(struct device *dev,
1138 struct device_attribute *attr,
1139 const char *buf, size_t size)
1143 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1145 if (kstrtoul(buf, 16, &val))
1148 spin_lock(&drvdata->spinlock);
1149 idx = drvdata->addr_idx;
1150 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1151 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1152 spin_unlock(&drvdata->spinlock);
1156 drvdata->addr_val[idx] = (u64)val;
1157 drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
1158 spin_unlock(&drvdata->spinlock);
1161 static DEVICE_ATTR_RW(addr_single);
1163 static ssize_t addr_range_show(struct device *dev,
1164 struct device_attribute *attr,
1168 unsigned long val1, val2;
1169 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1171 spin_lock(&drvdata->spinlock);
1172 idx = drvdata->addr_idx;
1174 spin_unlock(&drvdata->spinlock);
1177 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1178 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1179 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1180 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1181 spin_unlock(&drvdata->spinlock);
1185 val1 = (unsigned long)drvdata->addr_val[idx];
1186 val2 = (unsigned long)drvdata->addr_val[idx + 1];
1187 spin_unlock(&drvdata->spinlock);
1188 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1191 static ssize_t addr_range_store(struct device *dev,
1192 struct device_attribute *attr,
1193 const char *buf, size_t size)
1196 unsigned long val1, val2;
1197 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1199 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1201 /* lower address comparator cannot have a higher address value */
1205 spin_lock(&drvdata->spinlock);
1206 idx = drvdata->addr_idx;
1208 spin_unlock(&drvdata->spinlock);
1212 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1213 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1214 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1215 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1216 spin_unlock(&drvdata->spinlock);
1220 drvdata->addr_val[idx] = (u64)val1;
1221 drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1222 drvdata->addr_val[idx + 1] = (u64)val2;
1223 drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1225 * Program include or exclude control bits for vinst or vdata
1226 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1228 if (drvdata->mode & ETM_MODE_EXCLUDE)
1229 etm4_set_mode_exclude(drvdata, true);
1231 etm4_set_mode_exclude(drvdata, false);
1233 spin_unlock(&drvdata->spinlock);
1236 static DEVICE_ATTR_RW(addr_range);
1238 static ssize_t addr_start_show(struct device *dev,
1239 struct device_attribute *attr,
1244 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1246 spin_lock(&drvdata->spinlock);
1247 idx = drvdata->addr_idx;
1249 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1250 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1251 spin_unlock(&drvdata->spinlock);
1255 val = (unsigned long)drvdata->addr_val[idx];
1256 spin_unlock(&drvdata->spinlock);
1257 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1260 static ssize_t addr_start_store(struct device *dev,
1261 struct device_attribute *attr,
1262 const char *buf, size_t size)
1266 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1268 if (kstrtoul(buf, 16, &val))
1271 spin_lock(&drvdata->spinlock);
1272 idx = drvdata->addr_idx;
1273 if (!drvdata->nr_addr_cmp) {
1274 spin_unlock(&drvdata->spinlock);
1277 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1278 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1279 spin_unlock(&drvdata->spinlock);
1283 drvdata->addr_val[idx] = (u64)val;
1284 drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
1285 drvdata->vissctlr |= BIT(idx);
1286 /* SSSTATUS, bit[9] - turn on start/stop logic */
1287 drvdata->vinst_ctrl |= BIT(9);
1288 spin_unlock(&drvdata->spinlock);
1291 static DEVICE_ATTR_RW(addr_start);
1293 static ssize_t addr_stop_show(struct device *dev,
1294 struct device_attribute *attr,
1299 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1301 spin_lock(&drvdata->spinlock);
1302 idx = drvdata->addr_idx;
1304 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1305 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1306 spin_unlock(&drvdata->spinlock);
1310 val = (unsigned long)drvdata->addr_val[idx];
1311 spin_unlock(&drvdata->spinlock);
1312 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1315 static ssize_t addr_stop_store(struct device *dev,
1316 struct device_attribute *attr,
1317 const char *buf, size_t size)
1321 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1323 if (kstrtoul(buf, 16, &val))
1326 spin_lock(&drvdata->spinlock);
1327 idx = drvdata->addr_idx;
1328 if (!drvdata->nr_addr_cmp) {
1329 spin_unlock(&drvdata->spinlock);
1332 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1333 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1334 spin_unlock(&drvdata->spinlock);
1338 drvdata->addr_val[idx] = (u64)val;
1339 drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1340 drvdata->vissctlr |= BIT(idx + 16);
1341 /* SSSTATUS, bit[9] - turn on start/stop logic */
1342 drvdata->vinst_ctrl |= BIT(9);
1343 spin_unlock(&drvdata->spinlock);
1346 static DEVICE_ATTR_RW(addr_stop);
1348 static ssize_t addr_ctxtype_show(struct device *dev,
1349 struct device_attribute *attr,
1354 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1356 spin_lock(&drvdata->spinlock);
1357 idx = drvdata->addr_idx;
1358 /* CONTEXTTYPE, bits[3:2] */
1359 val = BMVAL(drvdata->addr_acc[idx], 2, 3);
1360 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1361 (val == ETM_CTX_CTXID ? "ctxid" :
1362 (val == ETM_CTX_VMID ? "vmid" : "all")));
1363 spin_unlock(&drvdata->spinlock);
1367 static ssize_t addr_ctxtype_store(struct device *dev,
1368 struct device_attribute *attr,
1369 const char *buf, size_t size)
1373 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1375 if (strlen(buf) >= 10)
1377 if (sscanf(buf, "%s", str) != 1)
1380 spin_lock(&drvdata->spinlock);
1381 idx = drvdata->addr_idx;
1382 if (!strcmp(str, "none"))
1383 /* start by clearing context type bits */
1384 drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1385 else if (!strcmp(str, "ctxid")) {
1386 /* 0b01 The trace unit performs a Context ID */
1387 if (drvdata->numcidc) {
1388 drvdata->addr_acc[idx] |= BIT(2);
1389 drvdata->addr_acc[idx] &= ~BIT(3);
1391 } else if (!strcmp(str, "vmid")) {
1392 /* 0b10 The trace unit performs a VMID */
1393 if (drvdata->numvmidc) {
1394 drvdata->addr_acc[idx] &= ~BIT(2);
1395 drvdata->addr_acc[idx] |= BIT(3);
1397 } else if (!strcmp(str, "all")) {
1399 * 0b11 The trace unit performs a Context ID
1400 * comparison and a VMID
1402 if (drvdata->numcidc)
1403 drvdata->addr_acc[idx] |= BIT(2);
1404 if (drvdata->numvmidc)
1405 drvdata->addr_acc[idx] |= BIT(3);
1407 spin_unlock(&drvdata->spinlock);
1410 static DEVICE_ATTR_RW(addr_ctxtype);
1412 static ssize_t addr_context_show(struct device *dev,
1413 struct device_attribute *attr,
1418 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1420 spin_lock(&drvdata->spinlock);
1421 idx = drvdata->addr_idx;
1422 /* context ID comparator bits[6:4] */
1423 val = BMVAL(drvdata->addr_acc[idx], 4, 6);
1424 spin_unlock(&drvdata->spinlock);
1425 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1428 static ssize_t addr_context_store(struct device *dev,
1429 struct device_attribute *attr,
1430 const char *buf, size_t size)
1434 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1436 if (kstrtoul(buf, 16, &val))
1438 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1440 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1441 drvdata->numcidc : drvdata->numvmidc))
1444 spin_lock(&drvdata->spinlock);
1445 idx = drvdata->addr_idx;
1446 /* clear context ID comparator bits[6:4] */
1447 drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1448 drvdata->addr_acc[idx] |= (val << 4);
1449 spin_unlock(&drvdata->spinlock);
1452 static DEVICE_ATTR_RW(addr_context);
1454 static ssize_t seq_idx_show(struct device *dev,
1455 struct device_attribute *attr,
1459 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1461 val = drvdata->seq_idx;
1462 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1465 static ssize_t seq_idx_store(struct device *dev,
1466 struct device_attribute *attr,
1467 const char *buf, size_t size)
1470 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1472 if (kstrtoul(buf, 16, &val))
1474 if (val >= drvdata->nrseqstate - 1)
1478 * Use spinlock to ensure index doesn't change while it gets
1479 * dereferenced multiple times within a spinlock block elsewhere.
1481 spin_lock(&drvdata->spinlock);
1482 drvdata->seq_idx = val;
1483 spin_unlock(&drvdata->spinlock);
1486 static DEVICE_ATTR_RW(seq_idx);
1488 static ssize_t seq_state_show(struct device *dev,
1489 struct device_attribute *attr,
1493 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1495 val = drvdata->seq_state;
1496 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1499 static ssize_t seq_state_store(struct device *dev,
1500 struct device_attribute *attr,
1501 const char *buf, size_t size)
1504 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1506 if (kstrtoul(buf, 16, &val))
1508 if (val >= drvdata->nrseqstate)
1511 drvdata->seq_state = val;
1514 static DEVICE_ATTR_RW(seq_state);
1516 static ssize_t seq_event_show(struct device *dev,
1517 struct device_attribute *attr,
1522 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1524 spin_lock(&drvdata->spinlock);
1525 idx = drvdata->seq_idx;
1526 val = drvdata->seq_ctrl[idx];
1527 spin_unlock(&drvdata->spinlock);
1528 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1531 static ssize_t seq_event_store(struct device *dev,
1532 struct device_attribute *attr,
1533 const char *buf, size_t size)
1537 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1539 if (kstrtoul(buf, 16, &val))
1542 spin_lock(&drvdata->spinlock);
1543 idx = drvdata->seq_idx;
1544 /* RST, bits[7:0] */
1545 drvdata->seq_ctrl[idx] = val & 0xFF;
1546 spin_unlock(&drvdata->spinlock);
1549 static DEVICE_ATTR_RW(seq_event);
1551 static ssize_t seq_reset_event_show(struct device *dev,
1552 struct device_attribute *attr,
1556 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1558 val = drvdata->seq_rst;
1559 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1562 static ssize_t seq_reset_event_store(struct device *dev,
1563 struct device_attribute *attr,
1564 const char *buf, size_t size)
1567 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1569 if (kstrtoul(buf, 16, &val))
1571 if (!(drvdata->nrseqstate))
1574 drvdata->seq_rst = val & ETMv4_EVENT_MASK;
1577 static DEVICE_ATTR_RW(seq_reset_event);
1579 static ssize_t cntr_idx_show(struct device *dev,
1580 struct device_attribute *attr,
1584 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1586 val = drvdata->cntr_idx;
1587 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1590 static ssize_t cntr_idx_store(struct device *dev,
1591 struct device_attribute *attr,
1592 const char *buf, size_t size)
1595 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1597 if (kstrtoul(buf, 16, &val))
1599 if (val >= drvdata->nr_cntr)
1603 * Use spinlock to ensure index doesn't change while it gets
1604 * dereferenced multiple times within a spinlock block elsewhere.
1606 spin_lock(&drvdata->spinlock);
1607 drvdata->cntr_idx = val;
1608 spin_unlock(&drvdata->spinlock);
1611 static DEVICE_ATTR_RW(cntr_idx);
1613 static ssize_t cntrldvr_show(struct device *dev,
1614 struct device_attribute *attr,
1619 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1621 spin_lock(&drvdata->spinlock);
1622 idx = drvdata->cntr_idx;
1623 val = drvdata->cntrldvr[idx];
1624 spin_unlock(&drvdata->spinlock);
1625 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1628 static ssize_t cntrldvr_store(struct device *dev,
1629 struct device_attribute *attr,
1630 const char *buf, size_t size)
1634 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1636 if (kstrtoul(buf, 16, &val))
1638 if (val > ETM_CNTR_MAX_VAL)
1641 spin_lock(&drvdata->spinlock);
1642 idx = drvdata->cntr_idx;
1643 drvdata->cntrldvr[idx] = val;
1644 spin_unlock(&drvdata->spinlock);
1647 static DEVICE_ATTR_RW(cntrldvr);
1649 static ssize_t cntr_val_show(struct device *dev,
1650 struct device_attribute *attr,
1655 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1657 spin_lock(&drvdata->spinlock);
1658 idx = drvdata->cntr_idx;
1659 val = drvdata->cntr_val[idx];
1660 spin_unlock(&drvdata->spinlock);
1661 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1664 static ssize_t cntr_val_store(struct device *dev,
1665 struct device_attribute *attr,
1666 const char *buf, size_t size)
1670 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1672 if (kstrtoul(buf, 16, &val))
1674 if (val > ETM_CNTR_MAX_VAL)
1677 spin_lock(&drvdata->spinlock);
1678 idx = drvdata->cntr_idx;
1679 drvdata->cntr_val[idx] = val;
1680 spin_unlock(&drvdata->spinlock);
1683 static DEVICE_ATTR_RW(cntr_val);
1685 static ssize_t cntr_ctrl_show(struct device *dev,
1686 struct device_attribute *attr,
1691 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1693 spin_lock(&drvdata->spinlock);
1694 idx = drvdata->cntr_idx;
1695 val = drvdata->cntr_ctrl[idx];
1696 spin_unlock(&drvdata->spinlock);
1697 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1700 static ssize_t cntr_ctrl_store(struct device *dev,
1701 struct device_attribute *attr,
1702 const char *buf, size_t size)
1706 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1708 if (kstrtoul(buf, 16, &val))
1711 spin_lock(&drvdata->spinlock);
1712 idx = drvdata->cntr_idx;
1713 drvdata->cntr_ctrl[idx] = val;
1714 spin_unlock(&drvdata->spinlock);
1717 static DEVICE_ATTR_RW(cntr_ctrl);
1719 static ssize_t res_idx_show(struct device *dev,
1720 struct device_attribute *attr,
1724 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1726 val = drvdata->res_idx;
1727 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1730 static ssize_t res_idx_store(struct device *dev,
1731 struct device_attribute *attr,
1732 const char *buf, size_t size)
1735 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1737 if (kstrtoul(buf, 16, &val))
1739 /* Resource selector pair 0 is always implemented and reserved */
1740 if (val < 2 || val >= drvdata->nr_resource * 2)
1744 * Use spinlock to ensure index doesn't change while it gets
1745 * dereferenced multiple times within a spinlock block elsewhere.
1747 spin_lock(&drvdata->spinlock);
1748 drvdata->res_idx = val;
1749 spin_unlock(&drvdata->spinlock);
1752 static DEVICE_ATTR_RW(res_idx);
1754 static ssize_t res_ctrl_show(struct device *dev,
1755 struct device_attribute *attr,
1760 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1762 spin_lock(&drvdata->spinlock);
1763 idx = drvdata->res_idx;
1764 val = drvdata->res_ctrl[idx];
1765 spin_unlock(&drvdata->spinlock);
1766 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1769 static ssize_t res_ctrl_store(struct device *dev,
1770 struct device_attribute *attr,
1771 const char *buf, size_t size)
1775 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1777 if (kstrtoul(buf, 16, &val))
1780 spin_lock(&drvdata->spinlock);
1781 idx = drvdata->res_idx;
1782 /* For odd idx pair inversal bit is RES0 */
1784 /* PAIRINV, bit[21] */
1786 drvdata->res_ctrl[idx] = val;
1787 spin_unlock(&drvdata->spinlock);
1790 static DEVICE_ATTR_RW(res_ctrl);
1792 static ssize_t ctxid_idx_show(struct device *dev,
1793 struct device_attribute *attr,
1797 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1799 val = drvdata->ctxid_idx;
1800 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1803 static ssize_t ctxid_idx_store(struct device *dev,
1804 struct device_attribute *attr,
1805 const char *buf, size_t size)
1808 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1810 if (kstrtoul(buf, 16, &val))
1812 if (val >= drvdata->numcidc)
1816 * Use spinlock to ensure index doesn't change while it gets
1817 * dereferenced multiple times within a spinlock block elsewhere.
1819 spin_lock(&drvdata->spinlock);
1820 drvdata->ctxid_idx = val;
1821 spin_unlock(&drvdata->spinlock);
1824 static DEVICE_ATTR_RW(ctxid_idx);
1826 static ssize_t ctxid_pid_show(struct device *dev,
1827 struct device_attribute *attr,
1832 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1834 spin_lock(&drvdata->spinlock);
1835 idx = drvdata->ctxid_idx;
1836 val = (unsigned long)drvdata->ctxid_vpid[idx];
1837 spin_unlock(&drvdata->spinlock);
1838 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1841 static ssize_t ctxid_pid_store(struct device *dev,
1842 struct device_attribute *attr,
1843 const char *buf, size_t size)
1846 unsigned long vpid, pid;
1847 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1850 * only implemented when ctxid tracing is enabled, i.e. at least one
1851 * ctxid comparator is implemented and ctxid is greater than 0 bits
1854 if (!drvdata->ctxid_size || !drvdata->numcidc)
1856 if (kstrtoul(buf, 16, &vpid))
1859 pid = coresight_vpid_to_pid(vpid);
1861 spin_lock(&drvdata->spinlock);
1862 idx = drvdata->ctxid_idx;
1863 drvdata->ctxid_pid[idx] = (u64)pid;
1864 drvdata->ctxid_vpid[idx] = (u64)vpid;
1865 spin_unlock(&drvdata->spinlock);
1868 static DEVICE_ATTR_RW(ctxid_pid);
1870 static ssize_t ctxid_masks_show(struct device *dev,
1871 struct device_attribute *attr,
1874 unsigned long val1, val2;
1875 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1877 spin_lock(&drvdata->spinlock);
1878 val1 = drvdata->ctxid_mask0;
1879 val2 = drvdata->ctxid_mask1;
1880 spin_unlock(&drvdata->spinlock);
1881 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1884 static ssize_t ctxid_masks_store(struct device *dev,
1885 struct device_attribute *attr,
1886 const char *buf, size_t size)
1889 unsigned long val1, val2, mask;
1890 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1893 * only implemented when ctxid tracing is enabled, i.e. at least one
1894 * ctxid comparator is implemented and ctxid is greater than 0 bits
1897 if (!drvdata->ctxid_size || !drvdata->numcidc)
1899 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1902 spin_lock(&drvdata->spinlock);
1904 * each byte[0..3] controls mask value applied to ctxid
1907 switch (drvdata->numcidc) {
1909 /* COMP0, bits[7:0] */
1910 drvdata->ctxid_mask0 = val1 & 0xFF;
1913 /* COMP1, bits[15:8] */
1914 drvdata->ctxid_mask0 = val1 & 0xFFFF;
1917 /* COMP2, bits[23:16] */
1918 drvdata->ctxid_mask0 = val1 & 0xFFFFFF;
1921 /* COMP3, bits[31:24] */
1922 drvdata->ctxid_mask0 = val1;
1925 /* COMP4, bits[7:0] */
1926 drvdata->ctxid_mask0 = val1;
1927 drvdata->ctxid_mask1 = val2 & 0xFF;
1930 /* COMP5, bits[15:8] */
1931 drvdata->ctxid_mask0 = val1;
1932 drvdata->ctxid_mask1 = val2 & 0xFFFF;
1935 /* COMP6, bits[23:16] */
1936 drvdata->ctxid_mask0 = val1;
1937 drvdata->ctxid_mask1 = val2 & 0xFFFFFF;
1940 /* COMP7, bits[31:24] */
1941 drvdata->ctxid_mask0 = val1;
1942 drvdata->ctxid_mask1 = val2;
1948 * If software sets a mask bit to 1, it must program relevant byte
1949 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1950 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1951 * of ctxid comparator0 value (corresponding to byte 0) register.
1953 mask = drvdata->ctxid_mask0;
1954 for (i = 0; i < drvdata->numcidc; i++) {
1955 /* mask value of corresponding ctxid comparator */
1956 maskbyte = mask & ETMv4_EVENT_MASK;
1958 * each bit corresponds to a byte of respective ctxid comparator
1961 for (j = 0; j < 8; j++) {
1963 drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
1966 /* Select the next ctxid comparator mask value */
1968 /* ctxid comparators[4-7] */
1969 mask = drvdata->ctxid_mask1;
1974 spin_unlock(&drvdata->spinlock);
1977 static DEVICE_ATTR_RW(ctxid_masks);
1979 static ssize_t vmid_idx_show(struct device *dev,
1980 struct device_attribute *attr,
1984 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1986 val = drvdata->vmid_idx;
1987 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1990 static ssize_t vmid_idx_store(struct device *dev,
1991 struct device_attribute *attr,
1992 const char *buf, size_t size)
1995 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1997 if (kstrtoul(buf, 16, &val))
1999 if (val >= drvdata->numvmidc)
2003 * Use spinlock to ensure index doesn't change while it gets
2004 * dereferenced multiple times within a spinlock block elsewhere.
2006 spin_lock(&drvdata->spinlock);
2007 drvdata->vmid_idx = val;
2008 spin_unlock(&drvdata->spinlock);
2011 static DEVICE_ATTR_RW(vmid_idx);
2013 static ssize_t vmid_val_show(struct device *dev,
2014 struct device_attribute *attr,
2018 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2020 val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx];
2021 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2024 static ssize_t vmid_val_store(struct device *dev,
2025 struct device_attribute *attr,
2026 const char *buf, size_t size)
2029 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2032 * only implemented when vmid tracing is enabled, i.e. at least one
2033 * vmid comparator is implemented and at least 8 bit vmid size
2035 if (!drvdata->vmid_size || !drvdata->numvmidc)
2037 if (kstrtoul(buf, 16, &val))
2040 spin_lock(&drvdata->spinlock);
2041 drvdata->vmid_val[drvdata->vmid_idx] = (u64)val;
2042 spin_unlock(&drvdata->spinlock);
2045 static DEVICE_ATTR_RW(vmid_val);
2047 static ssize_t vmid_masks_show(struct device *dev,
2048 struct device_attribute *attr, char *buf)
2050 unsigned long val1, val2;
2051 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2053 spin_lock(&drvdata->spinlock);
2054 val1 = drvdata->vmid_mask0;
2055 val2 = drvdata->vmid_mask1;
2056 spin_unlock(&drvdata->spinlock);
2057 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2060 static ssize_t vmid_masks_store(struct device *dev,
2061 struct device_attribute *attr,
2062 const char *buf, size_t size)
2065 unsigned long val1, val2, mask;
2066 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2068 * only implemented when vmid tracing is enabled, i.e. at least one
2069 * vmid comparator is implemented and at least 8 bit vmid size
2071 if (!drvdata->vmid_size || !drvdata->numvmidc)
2073 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
2076 spin_lock(&drvdata->spinlock);
2079 * each byte[0..3] controls mask value applied to vmid
2082 switch (drvdata->numvmidc) {
2084 /* COMP0, bits[7:0] */
2085 drvdata->vmid_mask0 = val1 & 0xFF;
2088 /* COMP1, bits[15:8] */
2089 drvdata->vmid_mask0 = val1 & 0xFFFF;
2092 /* COMP2, bits[23:16] */
2093 drvdata->vmid_mask0 = val1 & 0xFFFFFF;
2096 /* COMP3, bits[31:24] */
2097 drvdata->vmid_mask0 = val1;
2100 /* COMP4, bits[7:0] */
2101 drvdata->vmid_mask0 = val1;
2102 drvdata->vmid_mask1 = val2 & 0xFF;
2105 /* COMP5, bits[15:8] */
2106 drvdata->vmid_mask0 = val1;
2107 drvdata->vmid_mask1 = val2 & 0xFFFF;
2110 /* COMP6, bits[23:16] */
2111 drvdata->vmid_mask0 = val1;
2112 drvdata->vmid_mask1 = val2 & 0xFFFFFF;
2115 /* COMP7, bits[31:24] */
2116 drvdata->vmid_mask0 = val1;
2117 drvdata->vmid_mask1 = val2;
2124 * If software sets a mask bit to 1, it must program relevant byte
2125 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2126 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2127 * of vmid comparator0 value (corresponding to byte 0) register.
2129 mask = drvdata->vmid_mask0;
2130 for (i = 0; i < drvdata->numvmidc; i++) {
2131 /* mask value of corresponding vmid comparator */
2132 maskbyte = mask & ETMv4_EVENT_MASK;
2134 * each bit corresponds to a byte of respective vmid comparator
2137 for (j = 0; j < 8; j++) {
2139 drvdata->vmid_val[i] &= ~(0xFF << (j * 8));
2142 /* Select the next vmid comparator mask value */
2144 /* vmid comparators[4-7] */
2145 mask = drvdata->vmid_mask1;
2149 spin_unlock(&drvdata->spinlock);
2152 static DEVICE_ATTR_RW(vmid_masks);
2154 static ssize_t cpu_show(struct device *dev,
2155 struct device_attribute *attr, char *buf)
2158 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2161 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2164 static DEVICE_ATTR_RO(cpu);
2166 static struct attribute *coresight_etmv4_attrs[] = {
2167 &dev_attr_nr_pe_cmp.attr,
2168 &dev_attr_nr_addr_cmp.attr,
2169 &dev_attr_nr_cntr.attr,
2170 &dev_attr_nr_ext_inp.attr,
2171 &dev_attr_numcidc.attr,
2172 &dev_attr_numvmidc.attr,
2173 &dev_attr_nrseqstate.attr,
2174 &dev_attr_nr_resource.attr,
2175 &dev_attr_nr_ss_cmp.attr,
2176 &dev_attr_reset.attr,
2177 &dev_attr_mode.attr,
2179 &dev_attr_event.attr,
2180 &dev_attr_event_instren.attr,
2181 &dev_attr_event_ts.attr,
2182 &dev_attr_syncfreq.attr,
2183 &dev_attr_cyc_threshold.attr,
2184 &dev_attr_bb_ctrl.attr,
2185 &dev_attr_event_vinst.attr,
2186 &dev_attr_s_exlevel_vinst.attr,
2187 &dev_attr_ns_exlevel_vinst.attr,
2188 &dev_attr_addr_idx.attr,
2189 &dev_attr_addr_instdatatype.attr,
2190 &dev_attr_addr_single.attr,
2191 &dev_attr_addr_range.attr,
2192 &dev_attr_addr_start.attr,
2193 &dev_attr_addr_stop.attr,
2194 &dev_attr_addr_ctxtype.attr,
2195 &dev_attr_addr_context.attr,
2196 &dev_attr_seq_idx.attr,
2197 &dev_attr_seq_state.attr,
2198 &dev_attr_seq_event.attr,
2199 &dev_attr_seq_reset_event.attr,
2200 &dev_attr_cntr_idx.attr,
2201 &dev_attr_cntrldvr.attr,
2202 &dev_attr_cntr_val.attr,
2203 &dev_attr_cntr_ctrl.attr,
2204 &dev_attr_res_idx.attr,
2205 &dev_attr_res_ctrl.attr,
2206 &dev_attr_ctxid_idx.attr,
2207 &dev_attr_ctxid_pid.attr,
2208 &dev_attr_ctxid_masks.attr,
2209 &dev_attr_vmid_idx.attr,
2210 &dev_attr_vmid_val.attr,
2211 &dev_attr_vmid_masks.attr,
2216 #define coresight_simple_func(name, offset) \
2217 static ssize_t name##_show(struct device *_dev, \
2218 struct device_attribute *attr, char *buf) \
2220 struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
2221 return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
2222 readl_relaxed(drvdata->base + offset)); \
2224 static DEVICE_ATTR_RO(name)
2226 coresight_simple_func(trcoslsr, TRCOSLSR);
2227 coresight_simple_func(trcpdcr, TRCPDCR);
2228 coresight_simple_func(trcpdsr, TRCPDSR);
2229 coresight_simple_func(trclsr, TRCLSR);
2230 coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
2231 coresight_simple_func(trcdevid, TRCDEVID);
2232 coresight_simple_func(trcdevtype, TRCDEVTYPE);
2233 coresight_simple_func(trcpidr0, TRCPIDR0);
2234 coresight_simple_func(trcpidr1, TRCPIDR1);
2235 coresight_simple_func(trcpidr2, TRCPIDR2);
2236 coresight_simple_func(trcpidr3, TRCPIDR3);
2238 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2239 &dev_attr_trcoslsr.attr,
2240 &dev_attr_trcpdcr.attr,
2241 &dev_attr_trcpdsr.attr,
2242 &dev_attr_trclsr.attr,
2243 &dev_attr_trcauthstatus.attr,
2244 &dev_attr_trcdevid.attr,
2245 &dev_attr_trcdevtype.attr,
2246 &dev_attr_trcpidr0.attr,
2247 &dev_attr_trcpidr1.attr,
2248 &dev_attr_trcpidr2.attr,
2249 &dev_attr_trcpidr3.attr,
2253 coresight_simple_func(trcidr0, TRCIDR0);
2254 coresight_simple_func(trcidr1, TRCIDR1);
2255 coresight_simple_func(trcidr2, TRCIDR2);
2256 coresight_simple_func(trcidr3, TRCIDR3);
2257 coresight_simple_func(trcidr4, TRCIDR4);
2258 coresight_simple_func(trcidr5, TRCIDR5);
2259 /* trcidr[6,7] are reserved */
2260 coresight_simple_func(trcidr8, TRCIDR8);
2261 coresight_simple_func(trcidr9, TRCIDR9);
2262 coresight_simple_func(trcidr10, TRCIDR10);
2263 coresight_simple_func(trcidr11, TRCIDR11);
2264 coresight_simple_func(trcidr12, TRCIDR12);
2265 coresight_simple_func(trcidr13, TRCIDR13);
2267 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2268 &dev_attr_trcidr0.attr,
2269 &dev_attr_trcidr1.attr,
2270 &dev_attr_trcidr2.attr,
2271 &dev_attr_trcidr3.attr,
2272 &dev_attr_trcidr4.attr,
2273 &dev_attr_trcidr5.attr,
2274 /* trcidr[6,7] are reserved */
2275 &dev_attr_trcidr8.attr,
2276 &dev_attr_trcidr9.attr,
2277 &dev_attr_trcidr10.attr,
2278 &dev_attr_trcidr11.attr,
2279 &dev_attr_trcidr12.attr,
2280 &dev_attr_trcidr13.attr,
2284 static const struct attribute_group coresight_etmv4_group = {
2285 .attrs = coresight_etmv4_attrs,
2288 static const struct attribute_group coresight_etmv4_mgmt_group = {
2289 .attrs = coresight_etmv4_mgmt_attrs,
2293 static const struct attribute_group coresight_etmv4_trcidr_group = {
2294 .attrs = coresight_etmv4_trcidr_attrs,
2298 static const struct attribute_group *coresight_etmv4_groups[] = {
2299 &coresight_etmv4_group,
2300 &coresight_etmv4_mgmt_group,
2301 &coresight_etmv4_trcidr_group,
2305 static void etm4_init_arch_data(void *info)
2313 struct etmv4_drvdata *drvdata = info;
2315 CS_UNLOCK(drvdata->base);
2317 /* find all capabilities of the tracing unit */
2318 etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
2320 /* INSTP0, bits[2:1] P0 tracing support field */
2321 if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
2322 drvdata->instrp0 = true;
2324 drvdata->instrp0 = false;
2326 /* TRCBB, bit[5] Branch broadcast tracing support bit */
2327 if (BMVAL(etmidr0, 5, 5))
2328 drvdata->trcbb = true;
2330 drvdata->trcbb = false;
2332 /* TRCCOND, bit[6] Conditional instruction tracing support bit */
2333 if (BMVAL(etmidr0, 6, 6))
2334 drvdata->trccond = true;
2336 drvdata->trccond = false;
2338 /* TRCCCI, bit[7] Cycle counting instruction bit */
2339 if (BMVAL(etmidr0, 7, 7))
2340 drvdata->trccci = true;
2342 drvdata->trccci = false;
2344 /* RETSTACK, bit[9] Return stack bit */
2345 if (BMVAL(etmidr0, 9, 9))
2346 drvdata->retstack = true;
2348 drvdata->retstack = false;
2350 /* NUMEVENT, bits[11:10] Number of events field */
2351 drvdata->nr_event = BMVAL(etmidr0, 10, 11);
2352 /* QSUPP, bits[16:15] Q element support field */
2353 drvdata->q_support = BMVAL(etmidr0, 15, 16);
2354 /* TSSIZE, bits[28:24] Global timestamp size field */
2355 drvdata->ts_size = BMVAL(etmidr0, 24, 28);
2357 /* base architecture of trace unit */
2358 etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
2360 * TRCARCHMIN, bits[7:4] architecture the minor version number
2361 * TRCARCHMAJ, bits[11:8] architecture major versin number
2363 drvdata->arch = BMVAL(etmidr1, 4, 11);
2365 /* maximum size of resources */
2366 etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
2367 /* CIDSIZE, bits[9:5] Indicates the Context ID size */
2368 drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
2369 /* VMIDSIZE, bits[14:10] Indicates the VMID size */
2370 drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
2371 /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
2372 drvdata->ccsize = BMVAL(etmidr2, 25, 28);
2374 etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
2375 /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
2376 drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
2377 /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
2378 drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
2379 /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
2380 drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
2383 * TRCERR, bit[24] whether a trace unit can trace a
2384 * system error exception.
2386 if (BMVAL(etmidr3, 24, 24))
2387 drvdata->trc_error = true;
2389 drvdata->trc_error = false;
2391 /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
2392 if (BMVAL(etmidr3, 25, 25))
2393 drvdata->syncpr = true;
2395 drvdata->syncpr = false;
2397 /* STALLCTL, bit[26] is stall control implemented? */
2398 if (BMVAL(etmidr3, 26, 26))
2399 drvdata->stallctl = true;
2401 drvdata->stallctl = false;
2403 /* SYSSTALL, bit[27] implementation can support stall control? */
2404 if (BMVAL(etmidr3, 27, 27))
2405 drvdata->sysstall = true;
2407 drvdata->sysstall = false;
2409 /* NUMPROC, bits[30:28] the number of PEs available for tracing */
2410 drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
2412 /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
2413 if (BMVAL(etmidr3, 31, 31))
2414 drvdata->nooverflow = true;
2416 drvdata->nooverflow = false;
2418 /* number of resources trace unit supports */
2419 etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
2420 /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
2421 drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
2422 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
2423 drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
2425 * NUMRSPAIR, bits[19:16]
2426 * The number of resource pairs conveyed by the HW starts at 0, i.e a
2427 * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
2428 * As such add 1 to the value of NUMRSPAIR for a better representation.
2430 drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
2432 * NUMSSCC, bits[23:20] the number of single-shot
2433 * comparator control for tracing
2435 drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
2436 /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
2437 drvdata->numcidc = BMVAL(etmidr4, 24, 27);
2438 /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
2439 drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
2441 etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
2442 /* NUMEXTIN, bits[8:0] number of external inputs implemented */
2443 drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
2444 /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
2445 drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
2446 /* ATBTRIG, bit[22] implementation can support ATB triggers? */
2447 if (BMVAL(etmidr5, 22, 22))
2448 drvdata->atbtrig = true;
2450 drvdata->atbtrig = false;
2452 * LPOVERRIDE, bit[23] implementation supports
2453 * low-power state override
2455 if (BMVAL(etmidr5, 23, 23))
2456 drvdata->lpoverride = true;
2458 drvdata->lpoverride = false;
2459 /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
2460 drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
2461 /* NUMCNTR, bits[30:28] number of counters available for tracing */
2462 drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
2463 CS_LOCK(drvdata->base);
2466 static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
2470 drvdata->pe_sel = 0x0;
2471 drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID |
2472 ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK);
2474 /* disable all events tracing */
2475 drvdata->eventctrl0 = 0x0;
2476 drvdata->eventctrl1 = 0x0;
2478 /* disable stalling */
2479 drvdata->stall_ctrl = 0x0;
2481 /* disable timestamp event */
2482 drvdata->ts_ctrl = 0x0;
2484 /* enable trace synchronization every 4096 bytes for trace */
2485 if (drvdata->syncpr == false)
2486 drvdata->syncfreq = 0xC;
2489 * enable viewInst to trace everything with start-stop logic in
2492 drvdata->vinst_ctrl |= BIT(0);
2493 /* set initial state of start-stop logic */
2494 if (drvdata->nr_addr_cmp)
2495 drvdata->vinst_ctrl |= BIT(9);
2497 /* no address range filtering for ViewInst */
2498 drvdata->viiectlr = 0x0;
2499 /* no start-stop filtering for ViewInst */
2500 drvdata->vissctlr = 0x0;
2502 /* disable seq events */
2503 for (i = 0; i < drvdata->nrseqstate-1; i++)
2504 drvdata->seq_ctrl[i] = 0x0;
2505 drvdata->seq_rst = 0x0;
2506 drvdata->seq_state = 0x0;
2508 /* disable external input events */
2509 drvdata->ext_inp = 0x0;
2511 for (i = 0; i < drvdata->nr_cntr; i++) {
2512 drvdata->cntrldvr[i] = 0x0;
2513 drvdata->cntr_ctrl[i] = 0x0;
2514 drvdata->cntr_val[i] = 0x0;
2517 /* Resource selector pair 0 is always implemented and reserved */
2518 drvdata->res_idx = 0x2;
2519 for (i = 2; i < drvdata->nr_resource * 2; i++)
2520 drvdata->res_ctrl[i] = 0x0;
2522 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
2523 drvdata->ss_ctrl[i] = 0x0;
2524 drvdata->ss_pe_cmp[i] = 0x0;
2527 if (drvdata->nr_addr_cmp >= 1) {
2528 drvdata->addr_val[0] = (unsigned long)_stext;
2529 drvdata->addr_val[1] = (unsigned long)_etext;
2530 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
2531 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
2534 for (i = 0; i < drvdata->numcidc; i++) {
2535 drvdata->ctxid_pid[i] = 0x0;
2536 drvdata->ctxid_vpid[i] = 0x0;
2539 drvdata->ctxid_mask0 = 0x0;
2540 drvdata->ctxid_mask1 = 0x0;
2542 for (i = 0; i < drvdata->numvmidc; i++)
2543 drvdata->vmid_val[i] = 0x0;
2544 drvdata->vmid_mask0 = 0x0;
2545 drvdata->vmid_mask1 = 0x0;
2548 * A trace ID value of 0 is invalid, so let's start at some
2549 * random value that fits in 7 bits. ETMv3.x has 0x10 so let's
2552 drvdata->trcid = 0x20 + drvdata->cpu;
2555 static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
2558 unsigned int cpu = (unsigned long)hcpu;
2560 if (!etmdrvdata[cpu])
2563 switch (action & (~CPU_TASKS_FROZEN)) {
2565 spin_lock(&etmdrvdata[cpu]->spinlock);
2566 if (!etmdrvdata[cpu]->os_unlock) {
2567 etm4_os_unlock(etmdrvdata[cpu]);
2568 etmdrvdata[cpu]->os_unlock = true;
2571 if (etmdrvdata[cpu]->enable)
2572 etm4_enable_hw(etmdrvdata[cpu]);
2573 spin_unlock(&etmdrvdata[cpu]->spinlock);
2577 if (etmdrvdata[cpu]->boot_enable &&
2578 !etmdrvdata[cpu]->sticky_enable)
2579 coresight_enable(etmdrvdata[cpu]->csdev);
2583 spin_lock(&etmdrvdata[cpu]->spinlock);
2584 if (etmdrvdata[cpu]->enable)
2585 etm4_disable_hw(etmdrvdata[cpu]);
2586 spin_unlock(&etmdrvdata[cpu]->spinlock);
2593 static struct notifier_block etm4_cpu_notifier = {
2594 .notifier_call = etm4_cpu_callback,
2597 static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
2601 struct device *dev = &adev->dev;
2602 struct coresight_platform_data *pdata = NULL;
2603 struct etmv4_drvdata *drvdata;
2604 struct resource *res = &adev->res;
2605 struct coresight_desc *desc;
2606 struct device_node *np = adev->dev.of_node;
2608 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
2612 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
2617 pdata = of_get_coresight_platform_data(dev, np);
2619 return PTR_ERR(pdata);
2620 adev->dev.platform_data = pdata;
2623 drvdata->dev = &adev->dev;
2624 dev_set_drvdata(dev, drvdata);
2626 /* Validity for the resource is already checked by the AMBA core */
2627 base = devm_ioremap_resource(dev, res);
2629 return PTR_ERR(base);
2631 drvdata->base = base;
2633 spin_lock_init(&drvdata->spinlock);
2635 drvdata->cpu = pdata ? pdata->cpu : 0;
2638 etmdrvdata[drvdata->cpu] = drvdata;
2640 if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
2641 drvdata->os_unlock = true;
2643 if (smp_call_function_single(drvdata->cpu,
2644 etm4_init_arch_data, drvdata, 1))
2645 dev_err(dev, "ETM arch init failed\n");
2648 register_hotcpu_notifier(&etm4_cpu_notifier);
2652 if (etm4_arch_supported(drvdata->arch) == false) {
2654 goto err_arch_supported;
2656 etm4_init_default_data(drvdata);
2658 pm_runtime_put(&adev->dev);
2660 desc->type = CORESIGHT_DEV_TYPE_SOURCE;
2661 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
2662 desc->ops = &etm4_cs_ops;
2663 desc->pdata = pdata;
2665 desc->groups = coresight_etmv4_groups;
2666 drvdata->csdev = coresight_register(desc);
2667 if (IS_ERR(drvdata->csdev)) {
2668 ret = PTR_ERR(drvdata->csdev);
2669 goto err_coresight_register;
2672 dev_info(dev, "%s initialized\n", (char *)id->data);
2675 coresight_enable(drvdata->csdev);
2676 drvdata->boot_enable = true;
2682 pm_runtime_put(&adev->dev);
2683 err_coresight_register:
2684 if (--etm4_count == 0)
2685 unregister_hotcpu_notifier(&etm4_cpu_notifier);
2689 static struct amba_id etm4_ids[] = {
2690 { /* ETM 4.0 - Qualcomm */
2695 { /* ETM 4.0 - Juno board */
2703 static struct amba_driver etm4x_driver = {
2705 .name = "coresight-etm4x",
2706 .suppress_bind_attrs = true,
2708 .probe = etm4_probe,
2709 .id_table = etm4_ids,
2712 module_amba_driver(etm4x_driver);