1 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
19 #include <linux/err.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/smp.h>
24 #include <linux/sysfs.h>
25 #include <linux/stat.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/cpu.h>
29 #include <linux/coresight.h>
30 #include <linux/amba/bus.h>
31 #include <linux/seq_file.h>
32 #include <linux/uaccess.h>
33 #include <linux/clk.h>
34 #include <asm/sections.h>
36 #include "coresight-etm.h"
38 static int boot_enable;
39 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
41 /* The number of ETM/PTM currently registered */
43 static struct etm_drvdata *etmdrvdata[NR_CPUS];
45 static inline void etm_writel(struct etm_drvdata *drvdata,
48 if (drvdata->use_cp14) {
49 if (etm_writel_cp14(off, val)) {
51 "invalid CP14 access to ETM reg: %#x", off);
54 writel_relaxed(val, drvdata->base + off);
58 static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
62 if (drvdata->use_cp14) {
63 if (etm_readl_cp14(off, &val)) {
65 "invalid CP14 access to ETM reg: %#x", off);
68 val = readl_relaxed(drvdata->base + off);
75 * Memory mapped writes to clear os lock are not supported on some processors
76 * and OS lock must be unlocked before any memory mapped access on such
77 * processors, otherwise memory mapped reads/writes will be invalid.
79 static void etm_os_unlock(void *info)
81 struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
82 /* Writing any value to ETMOSLAR unlocks the trace registers */
83 etm_writel(drvdata, 0x0, ETMOSLAR);
87 static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
91 /* Ensure pending cp14 accesses complete before setting pwrdwn */
94 etmcr = etm_readl(drvdata, ETMCR);
95 etmcr |= ETMCR_PWD_DWN;
96 etm_writel(drvdata, etmcr, ETMCR);
99 static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
103 etmcr = etm_readl(drvdata, ETMCR);
104 etmcr &= ~ETMCR_PWD_DWN;
105 etm_writel(drvdata, etmcr, ETMCR);
106 /* Ensure pwrup completes before subsequent cp14 accesses */
111 static void etm_set_pwrup(struct etm_drvdata *drvdata)
115 etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
116 etmpdcr |= ETMPDCR_PWD_UP;
117 writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
118 /* Ensure pwrup completes before subsequent cp14 accesses */
123 static void etm_clr_pwrup(struct etm_drvdata *drvdata)
127 /* Ensure pending cp14 accesses complete before clearing pwrup */
130 etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
131 etmpdcr &= ~ETMPDCR_PWD_UP;
132 writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
136 * coresight_timeout_etm - loop until a bit has changed to a specific state.
137 * @drvdata: etm's private data structure.
138 * @offset: address of a register, starting from @addr.
139 * @position: the position of the bit of interest.
140 * @value: the value the bit should have.
142 * Basically the same as @coresight_timeout except for the register access
143 * method where we have to account for CP14 configurations.
145 * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
146 * TIMEOUT_US has elapsed, which ever happens first.
149 static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
150 int position, int value)
155 for (i = TIMEOUT_US; i > 0; i--) {
156 val = etm_readl(drvdata, offset);
157 /* Waiting on the bit to go from 0 to 1 */
159 if (val & BIT(position))
161 /* Waiting on the bit to go from 1 to 0 */
163 if (!(val & BIT(position)))
168 * Delay is arbitrary - the specification doesn't say how long
169 * we are expected to wait. Extra check required to make sure
170 * we don't wait needlessly on the last iteration.
180 static void etm_set_prog(struct etm_drvdata *drvdata)
184 etmcr = etm_readl(drvdata, ETMCR);
185 etmcr |= ETMCR_ETM_PRG;
186 etm_writel(drvdata, etmcr, ETMCR);
188 * Recommended by spec for cp14 accesses to ensure etmcr write is
189 * complete before polling etmsr
192 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
193 dev_err(drvdata->dev,
194 "%s: timeout observed when probing at offset %#x\n",
199 static void etm_clr_prog(struct etm_drvdata *drvdata)
203 etmcr = etm_readl(drvdata, ETMCR);
204 etmcr &= ~ETMCR_ETM_PRG;
205 etm_writel(drvdata, etmcr, ETMCR);
207 * Recommended by spec for cp14 accesses to ensure etmcr write is
208 * complete before polling etmsr
211 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
212 dev_err(drvdata->dev,
213 "%s: timeout observed when probing at offset %#x\n",
218 static void etm_set_default(struct etm_drvdata *drvdata)
222 drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
223 drvdata->enable_event = ETM_HARD_WIRE_RES_A;
225 drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
226 drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
227 drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
228 drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
229 drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
230 drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
231 drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
233 for (i = 0; i < drvdata->nr_cntr; i++) {
234 drvdata->cntr_rld_val[i] = 0x0;
235 drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
236 drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
237 drvdata->cntr_val[i] = 0x0;
240 drvdata->seq_curr_state = 0x0;
241 drvdata->ctxid_idx = 0x0;
242 for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
243 drvdata->ctxid_pid[i] = 0x0;
244 drvdata->ctxid_vpid[i] = 0x0;
247 drvdata->ctxid_mask = 0x0;
250 static void etm_enable_hw(void *info)
254 struct etm_drvdata *drvdata = info;
256 CS_UNLOCK(drvdata->base);
259 etm_clr_pwrdwn(drvdata);
260 /* Apply power to trace registers */
261 etm_set_pwrup(drvdata);
262 /* Make sure all registers are accessible */
263 etm_os_unlock(drvdata);
265 etm_set_prog(drvdata);
267 etmcr = etm_readl(drvdata, ETMCR);
268 etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
269 etmcr |= drvdata->port_size;
270 etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
271 etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
272 etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
273 etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
274 etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
275 etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
276 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
277 etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
278 etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
280 for (i = 0; i < drvdata->nr_cntr; i++) {
281 etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
282 etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
283 etm_writel(drvdata, drvdata->cntr_rld_event[i],
285 etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
287 etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
288 etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
289 etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
290 etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
291 etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
292 etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
293 etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
294 for (i = 0; i < drvdata->nr_ext_out; i++)
295 etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
296 for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
297 etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
298 etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
299 etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
300 /* No external input selected */
301 etm_writel(drvdata, 0x0, ETMEXTINSELR);
302 etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
303 /* No auxiliary control selected */
304 etm_writel(drvdata, 0x0, ETMAUXCR);
305 etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
306 /* No VMID comparator value selected */
307 etm_writel(drvdata, 0x0, ETMVMIDCVR);
309 /* Ensures trace output is enabled from this ETM */
310 etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
312 etm_clr_prog(drvdata);
313 CS_LOCK(drvdata->base);
315 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
318 static int etm_cpu_id(struct coresight_device *csdev)
320 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
325 static int etm_trace_id(struct coresight_device *csdev)
327 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
331 if (!drvdata->enable)
332 return drvdata->traceid;
333 pm_runtime_get_sync(csdev->dev.parent);
335 spin_lock_irqsave(&drvdata->spinlock, flags);
337 CS_UNLOCK(drvdata->base);
338 trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
339 CS_LOCK(drvdata->base);
341 spin_unlock_irqrestore(&drvdata->spinlock, flags);
342 pm_runtime_put(csdev->dev.parent);
347 static int etm_enable(struct coresight_device *csdev)
349 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
352 pm_runtime_get_sync(csdev->dev.parent);
353 spin_lock(&drvdata->spinlock);
356 * Configure the ETM only if the CPU is online. If it isn't online
357 * hw configuration will take place when 'CPU_STARTING' is received
358 * in @etm_cpu_callback.
360 if (cpu_online(drvdata->cpu)) {
361 ret = smp_call_function_single(drvdata->cpu,
362 etm_enable_hw, drvdata, 1);
367 drvdata->enable = true;
368 drvdata->sticky_enable = true;
370 spin_unlock(&drvdata->spinlock);
372 dev_info(drvdata->dev, "ETM tracing enabled\n");
375 spin_unlock(&drvdata->spinlock);
376 pm_runtime_put(csdev->dev.parent);
380 static void etm_disable_hw(void *info)
383 struct etm_drvdata *drvdata = info;
385 CS_UNLOCK(drvdata->base);
386 etm_set_prog(drvdata);
388 /* Program trace enable to low by using always false event */
389 etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
391 /* Read back sequencer and counters for post trace analysis */
392 drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
394 for (i = 0; i < drvdata->nr_cntr; i++)
395 drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
397 etm_set_pwrdwn(drvdata);
398 CS_LOCK(drvdata->base);
400 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
403 static void etm_disable(struct coresight_device *csdev)
405 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
408 * Taking hotplug lock here protects from clocks getting disabled
409 * with tracing being left on (crash scenario) if user disable occurs
410 * after cpu online mask indicates the cpu is offline but before the
411 * DYING hotplug callback is serviced by the ETM driver.
414 spin_lock(&drvdata->spinlock);
417 * Executing etm_disable_hw on the cpu whose ETM is being disabled
418 * ensures that register writes occur when cpu is powered.
420 smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
421 drvdata->enable = false;
423 spin_unlock(&drvdata->spinlock);
425 pm_runtime_put(csdev->dev.parent);
427 dev_info(drvdata->dev, "ETM tracing disabled\n");
430 static const struct coresight_ops_source etm_source_ops = {
431 .cpu_id = etm_cpu_id,
432 .trace_id = etm_trace_id,
433 .enable = etm_enable,
434 .disable = etm_disable,
437 static const struct coresight_ops etm_cs_ops = {
438 .source_ops = &etm_source_ops,
441 static ssize_t nr_addr_cmp_show(struct device *dev,
442 struct device_attribute *attr, char *buf)
445 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
447 val = drvdata->nr_addr_cmp;
448 return sprintf(buf, "%#lx\n", val);
450 static DEVICE_ATTR_RO(nr_addr_cmp);
452 static ssize_t nr_cntr_show(struct device *dev,
453 struct device_attribute *attr, char *buf)
455 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
457 val = drvdata->nr_cntr;
458 return sprintf(buf, "%#lx\n", val);
460 static DEVICE_ATTR_RO(nr_cntr);
462 static ssize_t nr_ctxid_cmp_show(struct device *dev,
463 struct device_attribute *attr, char *buf)
466 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
468 val = drvdata->nr_ctxid_cmp;
469 return sprintf(buf, "%#lx\n", val);
471 static DEVICE_ATTR_RO(nr_ctxid_cmp);
473 static ssize_t etmsr_show(struct device *dev,
474 struct device_attribute *attr, char *buf)
476 unsigned long flags, val;
477 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
479 pm_runtime_get_sync(drvdata->dev);
480 spin_lock_irqsave(&drvdata->spinlock, flags);
481 CS_UNLOCK(drvdata->base);
483 val = etm_readl(drvdata, ETMSR);
485 CS_LOCK(drvdata->base);
486 spin_unlock_irqrestore(&drvdata->spinlock, flags);
487 pm_runtime_put(drvdata->dev);
489 return sprintf(buf, "%#lx\n", val);
491 static DEVICE_ATTR_RO(etmsr);
493 static ssize_t reset_store(struct device *dev,
494 struct device_attribute *attr,
495 const char *buf, size_t size)
499 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
501 ret = kstrtoul(buf, 16, &val);
506 spin_lock(&drvdata->spinlock);
507 drvdata->mode = ETM_MODE_EXCLUDE;
509 drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
510 drvdata->startstop_ctrl = 0x0;
511 drvdata->addr_idx = 0x0;
512 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
513 drvdata->addr_val[i] = 0x0;
514 drvdata->addr_acctype[i] = 0x0;
515 drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
517 drvdata->cntr_idx = 0x0;
519 etm_set_default(drvdata);
520 spin_unlock(&drvdata->spinlock);
525 static DEVICE_ATTR_WO(reset);
527 static ssize_t mode_show(struct device *dev,
528 struct device_attribute *attr, char *buf)
531 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
534 return sprintf(buf, "%#lx\n", val);
537 static ssize_t mode_store(struct device *dev,
538 struct device_attribute *attr,
539 const char *buf, size_t size)
543 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
545 ret = kstrtoul(buf, 16, &val);
549 spin_lock(&drvdata->spinlock);
550 drvdata->mode = val & ETM_MODE_ALL;
552 if (drvdata->mode & ETM_MODE_EXCLUDE)
553 drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
555 drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
557 if (drvdata->mode & ETM_MODE_CYCACC)
558 drvdata->ctrl |= ETMCR_CYC_ACC;
560 drvdata->ctrl &= ~ETMCR_CYC_ACC;
562 if (drvdata->mode & ETM_MODE_STALL) {
563 if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
564 dev_warn(drvdata->dev, "stall mode not supported\n");
568 drvdata->ctrl |= ETMCR_STALL_MODE;
570 drvdata->ctrl &= ~ETMCR_STALL_MODE;
572 if (drvdata->mode & ETM_MODE_TIMESTAMP) {
573 if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
574 dev_warn(drvdata->dev, "timestamp not supported\n");
578 drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
580 drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
582 if (drvdata->mode & ETM_MODE_CTXID)
583 drvdata->ctrl |= ETMCR_CTXID_SIZE;
585 drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
586 spin_unlock(&drvdata->spinlock);
591 spin_unlock(&drvdata->spinlock);
594 static DEVICE_ATTR_RW(mode);
596 static ssize_t trigger_event_show(struct device *dev,
597 struct device_attribute *attr, char *buf)
600 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
602 val = drvdata->trigger_event;
603 return sprintf(buf, "%#lx\n", val);
606 static ssize_t trigger_event_store(struct device *dev,
607 struct device_attribute *attr,
608 const char *buf, size_t size)
612 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
614 ret = kstrtoul(buf, 16, &val);
618 drvdata->trigger_event = val & ETM_EVENT_MASK;
622 static DEVICE_ATTR_RW(trigger_event);
624 static ssize_t enable_event_show(struct device *dev,
625 struct device_attribute *attr, char *buf)
628 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
630 val = drvdata->enable_event;
631 return sprintf(buf, "%#lx\n", val);
634 static ssize_t enable_event_store(struct device *dev,
635 struct device_attribute *attr,
636 const char *buf, size_t size)
640 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
642 ret = kstrtoul(buf, 16, &val);
646 drvdata->enable_event = val & ETM_EVENT_MASK;
650 static DEVICE_ATTR_RW(enable_event);
652 static ssize_t fifofull_level_show(struct device *dev,
653 struct device_attribute *attr, char *buf)
656 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
658 val = drvdata->fifofull_level;
659 return sprintf(buf, "%#lx\n", val);
662 static ssize_t fifofull_level_store(struct device *dev,
663 struct device_attribute *attr,
664 const char *buf, size_t size)
668 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
670 ret = kstrtoul(buf, 16, &val);
674 drvdata->fifofull_level = val;
678 static DEVICE_ATTR_RW(fifofull_level);
680 static ssize_t addr_idx_show(struct device *dev,
681 struct device_attribute *attr, char *buf)
684 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
686 val = drvdata->addr_idx;
687 return sprintf(buf, "%#lx\n", val);
690 static ssize_t addr_idx_store(struct device *dev,
691 struct device_attribute *attr,
692 const char *buf, size_t size)
696 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
698 ret = kstrtoul(buf, 16, &val);
702 if (val >= drvdata->nr_addr_cmp)
706 * Use spinlock to ensure index doesn't change while it gets
707 * dereferenced multiple times within a spinlock block elsewhere.
709 spin_lock(&drvdata->spinlock);
710 drvdata->addr_idx = val;
711 spin_unlock(&drvdata->spinlock);
715 static DEVICE_ATTR_RW(addr_idx);
717 static ssize_t addr_single_show(struct device *dev,
718 struct device_attribute *attr, char *buf)
722 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
724 spin_lock(&drvdata->spinlock);
725 idx = drvdata->addr_idx;
726 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
727 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
728 spin_unlock(&drvdata->spinlock);
732 val = drvdata->addr_val[idx];
733 spin_unlock(&drvdata->spinlock);
735 return sprintf(buf, "%#lx\n", val);
738 static ssize_t addr_single_store(struct device *dev,
739 struct device_attribute *attr,
740 const char *buf, size_t size)
745 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
747 ret = kstrtoul(buf, 16, &val);
751 spin_lock(&drvdata->spinlock);
752 idx = drvdata->addr_idx;
753 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
754 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
755 spin_unlock(&drvdata->spinlock);
759 drvdata->addr_val[idx] = val;
760 drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
761 spin_unlock(&drvdata->spinlock);
765 static DEVICE_ATTR_RW(addr_single);
767 static ssize_t addr_range_show(struct device *dev,
768 struct device_attribute *attr, char *buf)
771 unsigned long val1, val2;
772 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
774 spin_lock(&drvdata->spinlock);
775 idx = drvdata->addr_idx;
777 spin_unlock(&drvdata->spinlock);
780 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
781 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
782 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
783 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
784 spin_unlock(&drvdata->spinlock);
788 val1 = drvdata->addr_val[idx];
789 val2 = drvdata->addr_val[idx + 1];
790 spin_unlock(&drvdata->spinlock);
792 return sprintf(buf, "%#lx %#lx\n", val1, val2);
795 static ssize_t addr_range_store(struct device *dev,
796 struct device_attribute *attr,
797 const char *buf, size_t size)
800 unsigned long val1, val2;
801 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
803 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
805 /* Lower address comparator cannot have a higher address value */
809 spin_lock(&drvdata->spinlock);
810 idx = drvdata->addr_idx;
812 spin_unlock(&drvdata->spinlock);
815 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
816 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
817 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
818 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
819 spin_unlock(&drvdata->spinlock);
823 drvdata->addr_val[idx] = val1;
824 drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
825 drvdata->addr_val[idx + 1] = val2;
826 drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
827 drvdata->enable_ctrl1 |= (1 << (idx/2));
828 spin_unlock(&drvdata->spinlock);
832 static DEVICE_ATTR_RW(addr_range);
834 static ssize_t addr_start_show(struct device *dev,
835 struct device_attribute *attr, char *buf)
839 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
841 spin_lock(&drvdata->spinlock);
842 idx = drvdata->addr_idx;
843 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
844 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
845 spin_unlock(&drvdata->spinlock);
849 val = drvdata->addr_val[idx];
850 spin_unlock(&drvdata->spinlock);
852 return sprintf(buf, "%#lx\n", val);
855 static ssize_t addr_start_store(struct device *dev,
856 struct device_attribute *attr,
857 const char *buf, size_t size)
862 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
864 ret = kstrtoul(buf, 16, &val);
868 spin_lock(&drvdata->spinlock);
869 idx = drvdata->addr_idx;
870 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
871 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
872 spin_unlock(&drvdata->spinlock);
876 drvdata->addr_val[idx] = val;
877 drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
878 drvdata->startstop_ctrl |= (1 << idx);
879 drvdata->enable_ctrl1 |= BIT(25);
880 spin_unlock(&drvdata->spinlock);
884 static DEVICE_ATTR_RW(addr_start);
886 static ssize_t addr_stop_show(struct device *dev,
887 struct device_attribute *attr, char *buf)
891 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
893 spin_lock(&drvdata->spinlock);
894 idx = drvdata->addr_idx;
895 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
896 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
897 spin_unlock(&drvdata->spinlock);
901 val = drvdata->addr_val[idx];
902 spin_unlock(&drvdata->spinlock);
904 return sprintf(buf, "%#lx\n", val);
907 static ssize_t addr_stop_store(struct device *dev,
908 struct device_attribute *attr,
909 const char *buf, size_t size)
914 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
916 ret = kstrtoul(buf, 16, &val);
920 spin_lock(&drvdata->spinlock);
921 idx = drvdata->addr_idx;
922 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
923 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
924 spin_unlock(&drvdata->spinlock);
928 drvdata->addr_val[idx] = val;
929 drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
930 drvdata->startstop_ctrl |= (1 << (idx + 16));
931 drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
932 spin_unlock(&drvdata->spinlock);
936 static DEVICE_ATTR_RW(addr_stop);
938 static ssize_t addr_acctype_show(struct device *dev,
939 struct device_attribute *attr, char *buf)
942 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
944 spin_lock(&drvdata->spinlock);
945 val = drvdata->addr_acctype[drvdata->addr_idx];
946 spin_unlock(&drvdata->spinlock);
948 return sprintf(buf, "%#lx\n", val);
951 static ssize_t addr_acctype_store(struct device *dev,
952 struct device_attribute *attr,
953 const char *buf, size_t size)
957 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
959 ret = kstrtoul(buf, 16, &val);
963 spin_lock(&drvdata->spinlock);
964 drvdata->addr_acctype[drvdata->addr_idx] = val;
965 spin_unlock(&drvdata->spinlock);
969 static DEVICE_ATTR_RW(addr_acctype);
971 static ssize_t cntr_idx_show(struct device *dev,
972 struct device_attribute *attr, char *buf)
975 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
977 val = drvdata->cntr_idx;
978 return sprintf(buf, "%#lx\n", val);
981 static ssize_t cntr_idx_store(struct device *dev,
982 struct device_attribute *attr,
983 const char *buf, size_t size)
987 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
989 ret = kstrtoul(buf, 16, &val);
993 if (val >= drvdata->nr_cntr)
996 * Use spinlock to ensure index doesn't change while it gets
997 * dereferenced multiple times within a spinlock block elsewhere.
999 spin_lock(&drvdata->spinlock);
1000 drvdata->cntr_idx = val;
1001 spin_unlock(&drvdata->spinlock);
1005 static DEVICE_ATTR_RW(cntr_idx);
1007 static ssize_t cntr_rld_val_show(struct device *dev,
1008 struct device_attribute *attr, char *buf)
1011 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1013 spin_lock(&drvdata->spinlock);
1014 val = drvdata->cntr_rld_val[drvdata->cntr_idx];
1015 spin_unlock(&drvdata->spinlock);
1017 return sprintf(buf, "%#lx\n", val);
1020 static ssize_t cntr_rld_val_store(struct device *dev,
1021 struct device_attribute *attr,
1022 const char *buf, size_t size)
1026 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1028 ret = kstrtoul(buf, 16, &val);
1032 spin_lock(&drvdata->spinlock);
1033 drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
1034 spin_unlock(&drvdata->spinlock);
1038 static DEVICE_ATTR_RW(cntr_rld_val);
1040 static ssize_t cntr_event_show(struct device *dev,
1041 struct device_attribute *attr, char *buf)
1044 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1046 spin_lock(&drvdata->spinlock);
1047 val = drvdata->cntr_event[drvdata->cntr_idx];
1048 spin_unlock(&drvdata->spinlock);
1050 return sprintf(buf, "%#lx\n", val);
1053 static ssize_t cntr_event_store(struct device *dev,
1054 struct device_attribute *attr,
1055 const char *buf, size_t size)
1059 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1061 ret = kstrtoul(buf, 16, &val);
1065 spin_lock(&drvdata->spinlock);
1066 drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1067 spin_unlock(&drvdata->spinlock);
1071 static DEVICE_ATTR_RW(cntr_event);
1073 static ssize_t cntr_rld_event_show(struct device *dev,
1074 struct device_attribute *attr, char *buf)
1077 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1079 spin_lock(&drvdata->spinlock);
1080 val = drvdata->cntr_rld_event[drvdata->cntr_idx];
1081 spin_unlock(&drvdata->spinlock);
1083 return sprintf(buf, "%#lx\n", val);
1086 static ssize_t cntr_rld_event_store(struct device *dev,
1087 struct device_attribute *attr,
1088 const char *buf, size_t size)
1092 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1094 ret = kstrtoul(buf, 16, &val);
1098 spin_lock(&drvdata->spinlock);
1099 drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1100 spin_unlock(&drvdata->spinlock);
1104 static DEVICE_ATTR_RW(cntr_rld_event);
1106 static ssize_t cntr_val_show(struct device *dev,
1107 struct device_attribute *attr, char *buf)
1111 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1113 if (!drvdata->enable) {
1114 spin_lock(&drvdata->spinlock);
1115 for (i = 0; i < drvdata->nr_cntr; i++)
1116 ret += sprintf(buf, "counter %d: %x\n",
1117 i, drvdata->cntr_val[i]);
1118 spin_unlock(&drvdata->spinlock);
1122 for (i = 0; i < drvdata->nr_cntr; i++) {
1123 val = etm_readl(drvdata, ETMCNTVRn(i));
1124 ret += sprintf(buf, "counter %d: %x\n", i, val);
1130 static ssize_t cntr_val_store(struct device *dev,
1131 struct device_attribute *attr,
1132 const char *buf, size_t size)
1136 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1138 ret = kstrtoul(buf, 16, &val);
1142 spin_lock(&drvdata->spinlock);
1143 drvdata->cntr_val[drvdata->cntr_idx] = val;
1144 spin_unlock(&drvdata->spinlock);
1148 static DEVICE_ATTR_RW(cntr_val);
1150 static ssize_t seq_12_event_show(struct device *dev,
1151 struct device_attribute *attr, char *buf)
1154 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1156 val = drvdata->seq_12_event;
1157 return sprintf(buf, "%#lx\n", val);
1160 static ssize_t seq_12_event_store(struct device *dev,
1161 struct device_attribute *attr,
1162 const char *buf, size_t size)
1166 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1168 ret = kstrtoul(buf, 16, &val);
1172 drvdata->seq_12_event = val & ETM_EVENT_MASK;
1175 static DEVICE_ATTR_RW(seq_12_event);
1177 static ssize_t seq_21_event_show(struct device *dev,
1178 struct device_attribute *attr, char *buf)
1181 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1183 val = drvdata->seq_21_event;
1184 return sprintf(buf, "%#lx\n", val);
1187 static ssize_t seq_21_event_store(struct device *dev,
1188 struct device_attribute *attr,
1189 const char *buf, size_t size)
1193 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1195 ret = kstrtoul(buf, 16, &val);
1199 drvdata->seq_21_event = val & ETM_EVENT_MASK;
1202 static DEVICE_ATTR_RW(seq_21_event);
1204 static ssize_t seq_23_event_show(struct device *dev,
1205 struct device_attribute *attr, char *buf)
1208 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1210 val = drvdata->seq_23_event;
1211 return sprintf(buf, "%#lx\n", val);
1214 static ssize_t seq_23_event_store(struct device *dev,
1215 struct device_attribute *attr,
1216 const char *buf, size_t size)
1220 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1222 ret = kstrtoul(buf, 16, &val);
1226 drvdata->seq_23_event = val & ETM_EVENT_MASK;
1229 static DEVICE_ATTR_RW(seq_23_event);
1231 static ssize_t seq_31_event_show(struct device *dev,
1232 struct device_attribute *attr, char *buf)
1235 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1237 val = drvdata->seq_31_event;
1238 return sprintf(buf, "%#lx\n", val);
1241 static ssize_t seq_31_event_store(struct device *dev,
1242 struct device_attribute *attr,
1243 const char *buf, size_t size)
1247 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1249 ret = kstrtoul(buf, 16, &val);
1253 drvdata->seq_31_event = val & ETM_EVENT_MASK;
1256 static DEVICE_ATTR_RW(seq_31_event);
1258 static ssize_t seq_32_event_show(struct device *dev,
1259 struct device_attribute *attr, char *buf)
1262 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1264 val = drvdata->seq_32_event;
1265 return sprintf(buf, "%#lx\n", val);
1268 static ssize_t seq_32_event_store(struct device *dev,
1269 struct device_attribute *attr,
1270 const char *buf, size_t size)
1274 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1276 ret = kstrtoul(buf, 16, &val);
1280 drvdata->seq_32_event = val & ETM_EVENT_MASK;
1283 static DEVICE_ATTR_RW(seq_32_event);
1285 static ssize_t seq_13_event_show(struct device *dev,
1286 struct device_attribute *attr, char *buf)
1289 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1291 val = drvdata->seq_13_event;
1292 return sprintf(buf, "%#lx\n", val);
1295 static ssize_t seq_13_event_store(struct device *dev,
1296 struct device_attribute *attr,
1297 const char *buf, size_t size)
1301 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1303 ret = kstrtoul(buf, 16, &val);
1307 drvdata->seq_13_event = val & ETM_EVENT_MASK;
1310 static DEVICE_ATTR_RW(seq_13_event);
1312 static ssize_t seq_curr_state_show(struct device *dev,
1313 struct device_attribute *attr, char *buf)
1315 unsigned long val, flags;
1316 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1318 if (!drvdata->enable) {
1319 val = drvdata->seq_curr_state;
1323 pm_runtime_get_sync(drvdata->dev);
1324 spin_lock_irqsave(&drvdata->spinlock, flags);
1326 CS_UNLOCK(drvdata->base);
1327 val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
1328 CS_LOCK(drvdata->base);
1330 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1331 pm_runtime_put(drvdata->dev);
1333 return sprintf(buf, "%#lx\n", val);
1336 static ssize_t seq_curr_state_store(struct device *dev,
1337 struct device_attribute *attr,
1338 const char *buf, size_t size)
1342 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1344 ret = kstrtoul(buf, 16, &val);
1348 if (val > ETM_SEQ_STATE_MAX_VAL)
1351 drvdata->seq_curr_state = val;
1355 static DEVICE_ATTR_RW(seq_curr_state);
1357 static ssize_t ctxid_idx_show(struct device *dev,
1358 struct device_attribute *attr, char *buf)
1361 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1363 val = drvdata->ctxid_idx;
1364 return sprintf(buf, "%#lx\n", val);
1367 static ssize_t ctxid_idx_store(struct device *dev,
1368 struct device_attribute *attr,
1369 const char *buf, size_t size)
1373 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1375 ret = kstrtoul(buf, 16, &val);
1379 if (val >= drvdata->nr_ctxid_cmp)
1383 * Use spinlock to ensure index doesn't change while it gets
1384 * dereferenced multiple times within a spinlock block elsewhere.
1386 spin_lock(&drvdata->spinlock);
1387 drvdata->ctxid_idx = val;
1388 spin_unlock(&drvdata->spinlock);
1392 static DEVICE_ATTR_RW(ctxid_idx);
1394 static ssize_t ctxid_pid_show(struct device *dev,
1395 struct device_attribute *attr, char *buf)
1398 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1400 spin_lock(&drvdata->spinlock);
1401 val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
1402 spin_unlock(&drvdata->spinlock);
1404 return sprintf(buf, "%#lx\n", val);
1407 static ssize_t ctxid_pid_store(struct device *dev,
1408 struct device_attribute *attr,
1409 const char *buf, size_t size)
1412 unsigned long vpid, pid;
1413 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1415 ret = kstrtoul(buf, 16, &vpid);
1419 pid = coresight_vpid_to_pid(vpid);
1421 spin_lock(&drvdata->spinlock);
1422 drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
1423 drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
1424 spin_unlock(&drvdata->spinlock);
1428 static DEVICE_ATTR_RW(ctxid_pid);
1430 static ssize_t ctxid_mask_show(struct device *dev,
1431 struct device_attribute *attr, char *buf)
1434 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1436 val = drvdata->ctxid_mask;
1437 return sprintf(buf, "%#lx\n", val);
1440 static ssize_t ctxid_mask_store(struct device *dev,
1441 struct device_attribute *attr,
1442 const char *buf, size_t size)
1446 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1448 ret = kstrtoul(buf, 16, &val);
1452 drvdata->ctxid_mask = val;
1455 static DEVICE_ATTR_RW(ctxid_mask);
1457 static ssize_t sync_freq_show(struct device *dev,
1458 struct device_attribute *attr, char *buf)
1461 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1463 val = drvdata->sync_freq;
1464 return sprintf(buf, "%#lx\n", val);
1467 static ssize_t sync_freq_store(struct device *dev,
1468 struct device_attribute *attr,
1469 const char *buf, size_t size)
1473 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1475 ret = kstrtoul(buf, 16, &val);
1479 drvdata->sync_freq = val & ETM_SYNC_MASK;
1482 static DEVICE_ATTR_RW(sync_freq);
1484 static ssize_t timestamp_event_show(struct device *dev,
1485 struct device_attribute *attr, char *buf)
1488 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1490 val = drvdata->timestamp_event;
1491 return sprintf(buf, "%#lx\n", val);
1494 static ssize_t timestamp_event_store(struct device *dev,
1495 struct device_attribute *attr,
1496 const char *buf, size_t size)
1500 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1502 ret = kstrtoul(buf, 16, &val);
1506 drvdata->timestamp_event = val & ETM_EVENT_MASK;
1509 static DEVICE_ATTR_RW(timestamp_event);
1511 static ssize_t cpu_show(struct device *dev,
1512 struct device_attribute *attr, char *buf)
1515 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1518 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
1521 static DEVICE_ATTR_RO(cpu);
1523 static ssize_t traceid_show(struct device *dev,
1524 struct device_attribute *attr, char *buf)
1526 unsigned long val, flags;
1527 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1529 if (!drvdata->enable) {
1530 val = drvdata->traceid;
1534 pm_runtime_get_sync(drvdata->dev);
1535 spin_lock_irqsave(&drvdata->spinlock, flags);
1536 CS_UNLOCK(drvdata->base);
1538 val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
1540 CS_LOCK(drvdata->base);
1541 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1542 pm_runtime_put(drvdata->dev);
1544 return sprintf(buf, "%#lx\n", val);
1547 static ssize_t traceid_store(struct device *dev,
1548 struct device_attribute *attr,
1549 const char *buf, size_t size)
1553 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1555 ret = kstrtoul(buf, 16, &val);
1559 drvdata->traceid = val & ETM_TRACEID_MASK;
1562 static DEVICE_ATTR_RW(traceid);
1564 static struct attribute *coresight_etm_attrs[] = {
1565 &dev_attr_nr_addr_cmp.attr,
1566 &dev_attr_nr_cntr.attr,
1567 &dev_attr_nr_ctxid_cmp.attr,
1568 &dev_attr_etmsr.attr,
1569 &dev_attr_reset.attr,
1570 &dev_attr_mode.attr,
1571 &dev_attr_trigger_event.attr,
1572 &dev_attr_enable_event.attr,
1573 &dev_attr_fifofull_level.attr,
1574 &dev_attr_addr_idx.attr,
1575 &dev_attr_addr_single.attr,
1576 &dev_attr_addr_range.attr,
1577 &dev_attr_addr_start.attr,
1578 &dev_attr_addr_stop.attr,
1579 &dev_attr_addr_acctype.attr,
1580 &dev_attr_cntr_idx.attr,
1581 &dev_attr_cntr_rld_val.attr,
1582 &dev_attr_cntr_event.attr,
1583 &dev_attr_cntr_rld_event.attr,
1584 &dev_attr_cntr_val.attr,
1585 &dev_attr_seq_12_event.attr,
1586 &dev_attr_seq_21_event.attr,
1587 &dev_attr_seq_23_event.attr,
1588 &dev_attr_seq_31_event.attr,
1589 &dev_attr_seq_32_event.attr,
1590 &dev_attr_seq_13_event.attr,
1591 &dev_attr_seq_curr_state.attr,
1592 &dev_attr_ctxid_idx.attr,
1593 &dev_attr_ctxid_pid.attr,
1594 &dev_attr_ctxid_mask.attr,
1595 &dev_attr_sync_freq.attr,
1596 &dev_attr_timestamp_event.attr,
1597 &dev_attr_traceid.attr,
1602 #define coresight_simple_func(name, offset) \
1603 static ssize_t name##_show(struct device *_dev, \
1604 struct device_attribute *attr, char *buf) \
1606 struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
1607 return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
1608 readl_relaxed(drvdata->base + offset)); \
1610 DEVICE_ATTR_RO(name)
1612 coresight_simple_func(etmccr, ETMCCR);
1613 coresight_simple_func(etmccer, ETMCCER);
1614 coresight_simple_func(etmscr, ETMSCR);
1615 coresight_simple_func(etmidr, ETMIDR);
1616 coresight_simple_func(etmcr, ETMCR);
1617 coresight_simple_func(etmtraceidr, ETMTRACEIDR);
1618 coresight_simple_func(etmteevr, ETMTEEVR);
1619 coresight_simple_func(etmtssvr, ETMTSSCR);
1620 coresight_simple_func(etmtecr1, ETMTECR1);
1621 coresight_simple_func(etmtecr2, ETMTECR2);
1623 static struct attribute *coresight_etm_mgmt_attrs[] = {
1624 &dev_attr_etmccr.attr,
1625 &dev_attr_etmccer.attr,
1626 &dev_attr_etmscr.attr,
1627 &dev_attr_etmidr.attr,
1628 &dev_attr_etmcr.attr,
1629 &dev_attr_etmtraceidr.attr,
1630 &dev_attr_etmteevr.attr,
1631 &dev_attr_etmtssvr.attr,
1632 &dev_attr_etmtecr1.attr,
1633 &dev_attr_etmtecr2.attr,
1637 static const struct attribute_group coresight_etm_group = {
1638 .attrs = coresight_etm_attrs,
1642 static const struct attribute_group coresight_etm_mgmt_group = {
1643 .attrs = coresight_etm_mgmt_attrs,
1647 static const struct attribute_group *coresight_etm_groups[] = {
1648 &coresight_etm_group,
1649 &coresight_etm_mgmt_group,
1653 static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
1656 unsigned int cpu = (unsigned long)hcpu;
1658 if (!etmdrvdata[cpu])
1661 switch (action & (~CPU_TASKS_FROZEN)) {
1663 spin_lock(&etmdrvdata[cpu]->spinlock);
1664 if (!etmdrvdata[cpu]->os_unlock) {
1665 etm_os_unlock(etmdrvdata[cpu]);
1666 etmdrvdata[cpu]->os_unlock = true;
1669 if (etmdrvdata[cpu]->enable)
1670 etm_enable_hw(etmdrvdata[cpu]);
1671 spin_unlock(&etmdrvdata[cpu]->spinlock);
1675 if (etmdrvdata[cpu]->boot_enable &&
1676 !etmdrvdata[cpu]->sticky_enable)
1677 coresight_enable(etmdrvdata[cpu]->csdev);
1681 spin_lock(&etmdrvdata[cpu]->spinlock);
1682 if (etmdrvdata[cpu]->enable)
1683 etm_disable_hw(etmdrvdata[cpu]);
1684 spin_unlock(&etmdrvdata[cpu]->spinlock);
1691 static struct notifier_block etm_cpu_notifier = {
1692 .notifier_call = etm_cpu_callback,
1695 static bool etm_arch_supported(u8 arch)
1712 static void etm_init_arch_data(void *info)
1716 struct etm_drvdata *drvdata = info;
1718 CS_UNLOCK(drvdata->base);
1720 /* First dummy read */
1721 (void)etm_readl(drvdata, ETMPDSR);
1722 /* Provide power to ETM: ETMPDCR[3] == 1 */
1723 etm_set_pwrup(drvdata);
1725 * Clear power down bit since when this bit is set writes to
1726 * certain registers might be ignored.
1728 etm_clr_pwrdwn(drvdata);
1730 * Set prog bit. It will be set from reset but this is included to
1733 etm_set_prog(drvdata);
1735 /* Find all capabilities */
1736 etmidr = etm_readl(drvdata, ETMIDR);
1737 drvdata->arch = BMVAL(etmidr, 4, 11);
1738 drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
1740 drvdata->etmccer = etm_readl(drvdata, ETMCCER);
1741 etmccr = etm_readl(drvdata, ETMCCR);
1742 drvdata->etmccr = etmccr;
1743 drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
1744 drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
1745 drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
1746 drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
1747 drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
1749 etm_set_pwrdwn(drvdata);
1750 etm_clr_pwrup(drvdata);
1751 CS_LOCK(drvdata->base);
1754 static void etm_init_default_data(struct etm_drvdata *drvdata)
1757 * A trace ID of value 0 is invalid, so let's start at some
1758 * random value that fits in 7 bits and will be just as good.
1760 static int etm3x_traceid = 0x10;
1762 u32 flags = (1 << 0 | /* instruction execute*/
1763 3 << 3 | /* ARM instruction */
1764 0 << 5 | /* No data value comparison */
1765 0 << 7 | /* No exact mach */
1766 0 << 8 | /* Ignore context ID */
1767 0 << 10); /* Security ignored */
1770 * Initial configuration only - guarantees sources handled by
1771 * this driver have a unique ID at startup time but not between
1772 * all other types of sources. For that we lean on the core
1775 drvdata->traceid = etm3x_traceid++;
1776 drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
1777 drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
1778 if (drvdata->nr_addr_cmp >= 2) {
1779 drvdata->addr_val[0] = (u32) _stext;
1780 drvdata->addr_val[1] = (u32) _etext;
1781 drvdata->addr_acctype[0] = flags;
1782 drvdata->addr_acctype[1] = flags;
1783 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
1784 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
1787 etm_set_default(drvdata);
1790 static int etm_probe(struct amba_device *adev, const struct amba_id *id)
1794 struct device *dev = &adev->dev;
1795 struct coresight_platform_data *pdata = NULL;
1796 struct etm_drvdata *drvdata;
1797 struct resource *res = &adev->res;
1798 struct coresight_desc *desc;
1799 struct device_node *np = adev->dev.of_node;
1801 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
1805 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1810 pdata = of_get_coresight_platform_data(dev, np);
1812 return PTR_ERR(pdata);
1814 adev->dev.platform_data = pdata;
1815 drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
1818 drvdata->dev = &adev->dev;
1819 dev_set_drvdata(dev, drvdata);
1821 /* Validity for the resource is already checked by the AMBA core */
1822 base = devm_ioremap_resource(dev, res);
1824 return PTR_ERR(base);
1826 drvdata->base = base;
1828 spin_lock_init(&drvdata->spinlock);
1830 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
1831 if (!IS_ERR(drvdata->atclk)) {
1832 ret = clk_prepare_enable(drvdata->atclk);
1837 drvdata->cpu = pdata ? pdata->cpu : 0;
1840 etmdrvdata[drvdata->cpu] = drvdata;
1842 if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
1843 drvdata->os_unlock = true;
1845 if (smp_call_function_single(drvdata->cpu,
1846 etm_init_arch_data, drvdata, 1))
1847 dev_err(dev, "ETM arch init failed\n");
1850 register_hotcpu_notifier(&etm_cpu_notifier);
1854 if (etm_arch_supported(drvdata->arch) == false) {
1856 goto err_arch_supported;
1858 etm_init_default_data(drvdata);
1860 desc->type = CORESIGHT_DEV_TYPE_SOURCE;
1861 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
1862 desc->ops = &etm_cs_ops;
1863 desc->pdata = pdata;
1865 desc->groups = coresight_etm_groups;
1866 drvdata->csdev = coresight_register(desc);
1867 if (IS_ERR(drvdata->csdev)) {
1868 ret = PTR_ERR(drvdata->csdev);
1869 goto err_arch_supported;
1872 pm_runtime_put(&adev->dev);
1873 dev_info(dev, "%s initialized\n", (char *)id->data);
1876 coresight_enable(drvdata->csdev);
1877 drvdata->boot_enable = true;
1883 if (--etm_count == 0)
1884 unregister_hotcpu_notifier(&etm_cpu_notifier);
1889 static int etm_runtime_suspend(struct device *dev)
1891 struct etm_drvdata *drvdata = dev_get_drvdata(dev);
1893 if (drvdata && !IS_ERR(drvdata->atclk))
1894 clk_disable_unprepare(drvdata->atclk);
1899 static int etm_runtime_resume(struct device *dev)
1901 struct etm_drvdata *drvdata = dev_get_drvdata(dev);
1903 if (drvdata && !IS_ERR(drvdata->atclk))
1904 clk_prepare_enable(drvdata->atclk);
1910 static const struct dev_pm_ops etm_dev_pm_ops = {
1911 SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
1914 static struct amba_id etm_ids[] = {
1935 { /* PTM 1.1 Qualcomm */
1943 static struct amba_driver etm_driver = {
1945 .name = "coresight-etm3x",
1946 .owner = THIS_MODULE,
1947 .pm = &etm_dev_pm_ops,
1948 .suppress_bind_attrs = true,
1951 .id_table = etm_ids,
1954 module_amba_driver(etm_driver);
1956 MODULE_LICENSE("GPL v2");
1957 MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");