coresight: remove the extra spaces
[firefly-linux-kernel-4.4.55.git] / drivers / coresight / coresight-etm3x.c
1 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/io.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/smp.h>
24 #include <linux/sysfs.h>
25 #include <linux/stat.h>
26 #include <linux/clk.h>
27 #include <linux/cpu.h>
28 #include <linux/of.h>
29 #include <linux/coresight.h>
30 #include <linux/amba/bus.h>
31 #include <linux/seq_file.h>
32 #include <linux/uaccess.h>
33 #include <asm/sections.h>
34
35 #include "coresight-etm.h"
36
37 #ifdef CONFIG_CORESIGHT_SOURCE_ETM_DEFAULT_ENABLE
38 static int boot_enable = 1;
39 #else
40 static int boot_enable;
41 #endif
42 module_param_named(
43         boot_enable, boot_enable, int, S_IRUGO
44 );
45
46 /* The number of ETM/PTM currently registered */
47 static int etm_count;
48 static struct etm_drvdata *etmdrvdata[NR_CPUS];
49
50 static inline void etm_writel(struct etm_drvdata *drvdata,
51                               u32 val, u32 off)
52 {
53         if (drvdata->use_cp14) {
54                 if (etm_writel_cp14(off, val)) {
55                         dev_err(drvdata->dev,
56                                 "invalid CP14 access to ETM reg: %#x", off);
57                 }
58         } else {
59                 writel_relaxed(val, drvdata->base + off);
60         }
61 }
62
63 static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
64 {
65         u32 val;
66
67         if (drvdata->use_cp14) {
68                 if (etm_readl_cp14(off, &val)) {
69                         dev_err(drvdata->dev,
70                                 "invalid CP14 access to ETM reg: %#x", off);
71                 }
72         } else {
73                 val = readl_relaxed(drvdata->base + off);
74         }
75
76         return val;
77 }
78
79 /*
80  * Memory mapped writes to clear os lock are not supported on some processors
81  * and OS lock must be unlocked before any memory mapped access on such
82  * processors, otherwise memory mapped reads/writes will be invalid.
83  */
84 static void etm_os_unlock(void *info)
85 {
86         struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
87         /* Writing any value to ETMOSLAR unlocks the trace registers */
88         etm_writel(drvdata, 0x0, ETMOSLAR);
89         isb();
90 }
91
92 static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
93 {
94         u32 etmcr;
95
96         /* Ensure pending cp14 accesses complete before setting pwrdwn */
97         mb();
98         isb();
99         etmcr = etm_readl(drvdata, ETMCR);
100         etmcr |= ETMCR_PWD_DWN;
101         etm_writel(drvdata, etmcr, ETMCR);
102 }
103
104 static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
105 {
106         u32 etmcr;
107
108         etmcr = etm_readl(drvdata, ETMCR);
109         etmcr &= ~ETMCR_PWD_DWN;
110         etm_writel(drvdata, etmcr, ETMCR);
111         /* Ensure pwrup completes before subsequent cp14 accesses */
112         mb();
113         isb();
114 }
115
116 static void etm_set_pwrup(struct etm_drvdata *drvdata)
117 {
118         u32 etmpdcr;
119
120         etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
121         etmpdcr |= ETMPDCR_PWD_UP;
122         writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
123         /* Ensure pwrup completes before subsequent cp14 accesses */
124         mb();
125         isb();
126 }
127
128 static void etm_clr_pwrup(struct etm_drvdata *drvdata)
129 {
130         u32 etmpdcr;
131
132         /* Ensure pending cp14 accesses complete before clearing pwrup */
133         mb();
134         isb();
135         etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
136         etmpdcr &= ~ETMPDCR_PWD_UP;
137         writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
138 }
139
140 /**
141  * coresight_timeout_etm - loop until a bit has changed to a specific state.
142  * @drvdata: etm's private data structure.
143  * @offset: address of a register, starting from @addr.
144  * @position: the position of the bit of interest.
145  * @value: the value the bit should have.
146  *
147  * Basically the same as @coresight_timeout except for the register access
148  * method where we have to account for CP14 configurations.
149
150  * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
151  * TIMEOUT_US has elapsed, which ever happens first.
152  */
153
154 static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
155                                   int position, int value)
156 {
157         int i;
158         u32 val;
159
160         for (i = TIMEOUT_US; i > 0; i--) {
161                 val = etm_readl(drvdata, offset);
162                 /* Waiting on the bit to go from 0 to 1 */
163                 if (value) {
164                         if (val & BIT(position))
165                                 return 0;
166                 /* Waiting on the bit to go from 1 to 0 */
167                 } else {
168                         if (!(val & BIT(position)))
169                                 return 0;
170                 }
171
172                 /*
173                  * Delay is arbitrary - the specification doesn't say how long
174                  * we are expected to wait.  Extra check required to make sure
175                  * we don't wait needlessly on the last iteration.
176                  */
177                 if (i - 1)
178                         udelay(1);
179         }
180
181         return -EAGAIN;
182 }
183
184
185 static void etm_set_prog(struct etm_drvdata *drvdata)
186 {
187         u32 etmcr;
188
189         etmcr = etm_readl(drvdata, ETMCR);
190         etmcr |= ETMCR_ETM_PRG;
191         etm_writel(drvdata, etmcr, ETMCR);
192         /*
193          * Recommended by spec for cp14 accesses to ensure etmcr write is
194          * complete before polling etmsr
195          */
196         isb();
197         if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
198                 dev_err(drvdata->dev,
199                         "timeout observed when probing at offset %#x\n", ETMSR);
200         }
201 }
202
203 static void etm_clr_prog(struct etm_drvdata *drvdata)
204 {
205         u32 etmcr;
206
207         etmcr = etm_readl(drvdata, ETMCR);
208         etmcr &= ~ETMCR_ETM_PRG;
209         etm_writel(drvdata, etmcr, ETMCR);
210         /*
211          * Recommended by spec for cp14 accesses to ensure etmcr write is
212          * complete before polling etmsr
213          */
214         isb();
215         if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
216                 dev_err(drvdata->dev,
217                         "timeout observed when probing at offset %#x\n", ETMSR);
218         }
219 }
220
221 static void etm_set_default(struct etm_drvdata *drvdata)
222 {
223         int i;
224
225         drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
226         drvdata->enable_event = ETM_HARD_WIRE_RES_A;
227
228         drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
229         drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
230         drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
231         drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
232         drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
233         drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
234         drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
235
236         for (i = 0; i < drvdata->nr_cntr; i++) {
237                 drvdata->cntr_rld_val[i] = 0x0;
238                 drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
239                 drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
240                 drvdata->cntr_val[i] = 0x0;
241         }
242
243         drvdata->seq_curr_state = 0x0;
244         drvdata->ctxid_idx = 0x0;
245         for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
246                 drvdata->ctxid_val[i] = 0x0;
247         drvdata->ctxid_mask = 0x0;
248 }
249
250 static void etm_enable_hw(void *info)
251 {
252         int i;
253         u32 etmcr;
254         struct etm_drvdata *drvdata = info;
255
256         CS_UNLOCK(drvdata->base);
257
258         /* Turn engine on */
259         etm_clr_pwrdwn(drvdata);
260         /* Apply power to trace registers */
261         etm_set_pwrup(drvdata);
262         /* Make sure all registers are accessible */
263         etm_os_unlock(drvdata);
264
265         etm_set_prog(drvdata);
266
267         etmcr = etm_readl(drvdata, ETMCR);
268         etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
269         etmcr |= drvdata->port_size;
270         etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
271         etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
272         etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
273         etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
274         etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
275         etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
276         for (i = 0; i < drvdata->nr_addr_cmp; i++) {
277                 etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
278                 etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
279         }
280         for (i = 0; i < drvdata->nr_cntr; i++) {
281                 etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
282                 etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
283                 etm_writel(drvdata, drvdata->cntr_rld_event[i],
284                            ETMCNTRLDEVRn(i));
285                 etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
286         }
287         etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
288         etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
289         etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
290         etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
291         etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
292         etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
293         etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
294         for (i = 0; i < drvdata->nr_ext_out; i++)
295                 etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
296         for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
297                 etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i));
298         etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
299         etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
300         /* No external input selected */
301         etm_writel(drvdata, 0x0, ETMEXTINSELR);
302         etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
303         /* No auxiliary control selected */
304         etm_writel(drvdata, 0x0, ETMAUXCR);
305         etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
306         /* No VMID comparator value selected */
307         etm_writel(drvdata, 0x0, ETMVMIDCVR);
308
309         /* Ensures trace output is enabled from this ETM */
310         etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
311
312         etm_clr_prog(drvdata);
313         CS_LOCK(drvdata->base);
314
315         dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
316 }
317
318 static int etm_trace_id_simple(struct etm_drvdata *drvdata)
319 {
320         if (!drvdata->enable)
321                 return drvdata->traceid;
322
323         return (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
324 }
325
326 static int etm_trace_id(struct coresight_device *csdev)
327 {
328         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
329         unsigned long flags;
330         int trace_id = -1;
331
332         if (!drvdata->enable)
333                 return drvdata->traceid;
334
335         if (clk_prepare_enable(drvdata->clk))
336                 goto out;
337
338         spin_lock_irqsave(&drvdata->spinlock, flags);
339
340         CS_UNLOCK(drvdata->base);
341         trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
342         CS_LOCK(drvdata->base);
343
344         spin_unlock_irqrestore(&drvdata->spinlock, flags);
345         clk_disable_unprepare(drvdata->clk);
346 out:
347         return trace_id;
348 }
349
350 static int etm_enable(struct coresight_device *csdev)
351 {
352         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
353         int ret;
354
355         ret = clk_prepare_enable(drvdata->clk);
356         if (ret)
357                 goto err_clk;
358
359         spin_lock(&drvdata->spinlock);
360
361         /*
362          * Configure the ETM only if the CPU is online.  If it isn't online
363          * hw configuration will take place when 'CPU_STARTING' is received
364          * in @etm_cpu_callback.
365          */
366         if (cpu_online(drvdata->cpu)) {
367                 ret = smp_call_function_single(drvdata->cpu,
368                                                etm_enable_hw, drvdata, 1);
369                 if (ret)
370                         goto err;
371         }
372
373         drvdata->enable = true;
374         drvdata->sticky_enable = true;
375
376         spin_unlock(&drvdata->spinlock);
377
378         dev_info(drvdata->dev, "ETM tracing enabled\n");
379         return 0;
380 err:
381         spin_unlock(&drvdata->spinlock);
382         clk_disable_unprepare(drvdata->clk);
383 err_clk:
384         return ret;
385 }
386
387 static void etm_disable_hw(void *info)
388 {
389         int i;
390         struct etm_drvdata *drvdata = info;
391
392         CS_UNLOCK(drvdata->base);
393         etm_set_prog(drvdata);
394
395         /* Program trace enable to low by using always false event */
396         etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
397
398         /* Read back sequencer and counters for post trace analysis */
399         drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
400
401         for (i = 0; i < drvdata->nr_cntr; i++)
402                 drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
403
404         etm_set_pwrdwn(drvdata);
405         CS_LOCK(drvdata->base);
406
407         dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
408 }
409
410 static void etm_disable(struct coresight_device *csdev)
411 {
412         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
413
414         /*
415          * Taking hotplug lock here protects from clocks getting disabled
416          * with tracing being left on (crash scenario) if user disable occurs
417          * after cpu online mask indicates the cpu is offline but before the
418          * DYING hotplug callback is serviced by the ETM driver.
419          */
420         get_online_cpus();
421         spin_lock(&drvdata->spinlock);
422
423         /*
424          * Executing etm_disable_hw on the cpu whose ETM is being disabled
425          * ensures that register writes occur when cpu is powered.
426          */
427         smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
428         drvdata->enable = false;
429
430         spin_unlock(&drvdata->spinlock);
431         put_online_cpus();
432
433         clk_disable_unprepare(drvdata->clk);
434
435         dev_info(drvdata->dev, "ETM tracing disabled\n");
436 }
437
438 static const struct coresight_ops_source etm_source_ops = {
439         .trace_id       = etm_trace_id,
440         .enable         = etm_enable,
441         .disable        = etm_disable,
442 };
443
444 static const struct coresight_ops etm_cs_ops = {
445         .source_ops     = &etm_source_ops,
446 };
447
448 static ssize_t nr_addr_cmp_show(struct device *dev,
449                                 struct device_attribute *attr, char *buf)
450 {
451         unsigned long val;
452         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
453
454         val = drvdata->nr_addr_cmp;
455         return sprintf(buf, "%#lx\n", val);
456 }
457 static DEVICE_ATTR_RO(nr_addr_cmp);
458
459 static ssize_t nr_cntr_show(struct device *dev,
460                             struct device_attribute *attr, char *buf)
461 {       unsigned long val;
462         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
463
464         val = drvdata->nr_cntr;
465         return sprintf(buf, "%#lx\n", val);
466 }
467 static DEVICE_ATTR_RO(nr_cntr);
468
469 static ssize_t nr_ctxid_cmp_show(struct device *dev,
470                                  struct device_attribute *attr, char *buf)
471 {
472         unsigned long val;
473         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
474
475         val = drvdata->nr_ctxid_cmp;
476         return sprintf(buf, "%#lx\n", val);
477 }
478 static DEVICE_ATTR_RO(nr_ctxid_cmp);
479
480 static ssize_t etmsr_show(struct device *dev,
481                           struct device_attribute *attr, char *buf)
482 {
483         int ret;
484         unsigned long flags, val;
485         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
486
487         ret = clk_prepare_enable(drvdata->clk);
488         if (ret)
489                 return ret;
490
491         spin_lock_irqsave(&drvdata->spinlock, flags);
492         CS_UNLOCK(drvdata->base);
493
494         val = etm_readl(drvdata, ETMSR);
495
496         CS_LOCK(drvdata->base);
497         spin_unlock_irqrestore(&drvdata->spinlock, flags);
498         clk_disable_unprepare(drvdata->clk);
499
500         return sprintf(buf, "%#lx\n", val);
501 }
502 static DEVICE_ATTR_RO(etmsr);
503
504 static ssize_t reset_store(struct device *dev,
505                            struct device_attribute *attr,
506                            const char *buf, size_t size)
507 {
508         int i, ret;
509         unsigned long val;
510         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
511
512         ret = kstrtoul(buf, 16, &val);
513         if (ret)
514                 return ret;
515
516         if (val) {
517                 spin_lock(&drvdata->spinlock);
518                 drvdata->mode = ETM_MODE_EXCLUDE;
519                 drvdata->ctrl = 0x0;
520                 drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
521                 drvdata->startstop_ctrl = 0x0;
522                 drvdata->addr_idx = 0x0;
523                 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
524                         drvdata->addr_val[i] = 0x0;
525                         drvdata->addr_acctype[i] = 0x0;
526                         drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
527                 }
528                 drvdata->cntr_idx = 0x0;
529
530                 etm_set_default(drvdata);
531                 spin_unlock(&drvdata->spinlock);
532         }
533
534         return size;
535 }
536 static DEVICE_ATTR_WO(reset);
537
538 static ssize_t mode_show(struct device *dev,
539                          struct device_attribute *attr, char *buf)
540 {
541         unsigned long val;
542         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
543
544         val = drvdata->mode;
545         return sprintf(buf, "%#lx\n", val);
546 }
547
548 static ssize_t mode_store(struct device *dev,
549                           struct device_attribute *attr,
550                           const char *buf, size_t size)
551 {
552         int ret;
553         unsigned long val;
554         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
555
556         ret = kstrtoul(buf, 16, &val);
557         if (ret)
558                 return ret;
559
560         spin_lock(&drvdata->spinlock);
561         drvdata->mode = val & ETM_MODE_ALL;
562
563         if (drvdata->mode & ETM_MODE_EXCLUDE)
564                 drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
565         else
566                 drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
567
568         if (drvdata->mode & ETM_MODE_CYCACC)
569                 drvdata->ctrl |= ETMCR_CYC_ACC;
570         else
571                 drvdata->ctrl &= ~ETMCR_CYC_ACC;
572
573         if (drvdata->mode & ETM_MODE_STALL) {
574                 if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
575                         dev_warn(drvdata->dev, "stall mode not supported\n");
576                         ret = -EINVAL;
577                         goto err_unlock;
578                 }
579                 drvdata->ctrl |= ETMCR_STALL_MODE;
580          } else
581                 drvdata->ctrl &= ~ETMCR_STALL_MODE;
582
583         if (drvdata->mode & ETM_MODE_TIMESTAMP) {
584                 if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
585                         dev_warn(drvdata->dev, "timestamp not supported\n");
586                         ret = -EINVAL;
587                         goto err_unlock;
588                 }
589                 drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
590         } else
591                 drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
592
593         if (drvdata->mode & ETM_MODE_CTXID)
594                 drvdata->ctrl |= ETMCR_CTXID_SIZE;
595         else
596                 drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
597         spin_unlock(&drvdata->spinlock);
598
599         return size;
600
601 err_unlock:
602         spin_unlock(&drvdata->spinlock);
603         return ret;
604 }
605 static DEVICE_ATTR_RW(mode);
606
607 static ssize_t trigger_event_show(struct device *dev,
608                                   struct device_attribute *attr, char *buf)
609 {
610         unsigned long val;
611         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
612
613         val = drvdata->trigger_event;
614         return sprintf(buf, "%#lx\n", val);
615 }
616
617 static ssize_t trigger_event_store(struct device *dev,
618                                    struct device_attribute *attr,
619                                    const char *buf, size_t size)
620 {
621         int ret;
622         unsigned long val;
623         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
624
625         ret = kstrtoul(buf, 16, &val);
626         if (ret)
627                 return ret;
628
629         drvdata->trigger_event = val & ETM_EVENT_MASK;
630
631         return size;
632 }
633 static DEVICE_ATTR_RW(trigger_event);
634
635 static ssize_t enable_event_show(struct device *dev,
636                                  struct device_attribute *attr, char *buf)
637 {
638         unsigned long val;
639         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
640
641         val = drvdata->enable_event;
642         return sprintf(buf, "%#lx\n", val);
643 }
644
645 static ssize_t enable_event_store(struct device *dev,
646                                   struct device_attribute *attr,
647                                   const char *buf, size_t size)
648 {
649         int ret;
650         unsigned long val;
651         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
652
653         ret = kstrtoul(buf, 16, &val);
654         if (ret)
655                 return ret;
656
657         drvdata->enable_event = val & ETM_EVENT_MASK;
658
659         return size;
660 }
661 static DEVICE_ATTR_RW(enable_event);
662
663 static ssize_t fifofull_level_show(struct device *dev,
664                                    struct device_attribute *attr, char *buf)
665 {
666         unsigned long val;
667         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
668
669         val = drvdata->fifofull_level;
670         return sprintf(buf, "%#lx\n", val);
671 }
672
673 static ssize_t fifofull_level_store(struct device *dev,
674                                     struct device_attribute *attr,
675                                     const char *buf, size_t size)
676 {
677         int ret;
678         unsigned long val;
679         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
680
681         ret = kstrtoul(buf, 16, &val);
682         if (ret)
683                 return ret;
684
685         drvdata->fifofull_level = val;
686
687         return size;
688 }
689 static DEVICE_ATTR_RW(fifofull_level);
690
691 static ssize_t addr_idx_show(struct device *dev,
692                              struct device_attribute *attr, char *buf)
693 {
694         unsigned long val;
695         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
696
697         val = drvdata->addr_idx;
698         return sprintf(buf, "%#lx\n", val);
699 }
700
701 static ssize_t addr_idx_store(struct device *dev,
702                               struct device_attribute *attr,
703                               const char *buf, size_t size)
704 {
705         int ret;
706         unsigned long val;
707         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
708
709         ret = kstrtoul(buf, 16, &val);
710         if (ret)
711                 return ret;
712
713         if (val >= drvdata->nr_addr_cmp)
714                 return -EINVAL;
715
716         /*
717          * Use spinlock to ensure index doesn't change while it gets
718          * dereferenced multiple times within a spinlock block elsewhere.
719          */
720         spin_lock(&drvdata->spinlock);
721         drvdata->addr_idx = val;
722         spin_unlock(&drvdata->spinlock);
723
724         return size;
725 }
726 static DEVICE_ATTR_RW(addr_idx);
727
728 static ssize_t addr_single_show(struct device *dev,
729                                 struct device_attribute *attr, char *buf)
730 {
731         u8 idx;
732         unsigned long val;
733         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
734
735         spin_lock(&drvdata->spinlock);
736         idx = drvdata->addr_idx;
737         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
738               drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
739                 spin_unlock(&drvdata->spinlock);
740                 return -EINVAL;
741         }
742
743         val = drvdata->addr_val[idx];
744         spin_unlock(&drvdata->spinlock);
745
746         return sprintf(buf, "%#lx\n", val);
747 }
748
749 static ssize_t addr_single_store(struct device *dev,
750                                  struct device_attribute *attr,
751                                  const char *buf, size_t size)
752 {
753         u8 idx;
754         int ret;
755         unsigned long val;
756         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
757
758         ret = kstrtoul(buf, 16, &val);
759         if (ret)
760                 return ret;
761
762         spin_lock(&drvdata->spinlock);
763         idx = drvdata->addr_idx;
764         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
765               drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
766                 spin_unlock(&drvdata->spinlock);
767                 return -EINVAL;
768         }
769
770         drvdata->addr_val[idx] = val;
771         drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
772         spin_unlock(&drvdata->spinlock);
773
774         return size;
775 }
776 static DEVICE_ATTR_RW(addr_single);
777
778 static ssize_t addr_range_show(struct device *dev,
779                                struct device_attribute *attr, char *buf)
780 {
781         u8 idx;
782         unsigned long val1, val2;
783         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
784
785         spin_lock(&drvdata->spinlock);
786         idx = drvdata->addr_idx;
787         if (idx % 2 != 0) {
788                 spin_unlock(&drvdata->spinlock);
789                 return -EPERM;
790         }
791         if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
792                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
793               (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
794                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
795                 spin_unlock(&drvdata->spinlock);
796                 return -EPERM;
797         }
798
799         val1 = drvdata->addr_val[idx];
800         val2 = drvdata->addr_val[idx + 1];
801         spin_unlock(&drvdata->spinlock);
802
803         return sprintf(buf, "%#lx %#lx\n", val1, val2);
804 }
805
806 static ssize_t addr_range_store(struct device *dev,
807                               struct device_attribute *attr,
808                               const char *buf, size_t size)
809 {
810         u8 idx;
811         unsigned long val1, val2;
812         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
813
814         if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
815                 return -EINVAL;
816         /* Lower address comparator cannot have a higher address value */
817         if (val1 > val2)
818                 return -EINVAL;
819
820         spin_lock(&drvdata->spinlock);
821         idx = drvdata->addr_idx;
822         if (idx % 2 != 0) {
823                 spin_unlock(&drvdata->spinlock);
824                 return -EPERM;
825         }
826         if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
827                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
828               (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
829                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
830                 spin_unlock(&drvdata->spinlock);
831                 return -EPERM;
832         }
833
834         drvdata->addr_val[idx] = val1;
835         drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
836         drvdata->addr_val[idx + 1] = val2;
837         drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
838         drvdata->enable_ctrl1 |= (1 << (idx/2));
839         spin_unlock(&drvdata->spinlock);
840
841         return size;
842 }
843 static DEVICE_ATTR_RW(addr_range);
844
845 static ssize_t addr_start_show(struct device *dev,
846                                struct device_attribute *attr, char *buf)
847 {
848         u8 idx;
849         unsigned long val;
850         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
851
852         spin_lock(&drvdata->spinlock);
853         idx = drvdata->addr_idx;
854         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
855               drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
856                 spin_unlock(&drvdata->spinlock);
857                 return -EPERM;
858         }
859
860         val = drvdata->addr_val[idx];
861         spin_unlock(&drvdata->spinlock);
862
863         return sprintf(buf, "%#lx\n", val);
864 }
865
866 static ssize_t addr_start_store(struct device *dev,
867                                 struct device_attribute *attr,
868                                 const char *buf, size_t size)
869 {
870         u8 idx;
871         int ret;
872         unsigned long val;
873         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
874
875         ret = kstrtoul(buf, 16, &val);
876         if (ret)
877                 return ret;
878
879         spin_lock(&drvdata->spinlock);
880         idx = drvdata->addr_idx;
881         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
882               drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
883                 spin_unlock(&drvdata->spinlock);
884                 return -EPERM;
885         }
886
887         drvdata->addr_val[idx] = val;
888         drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
889         drvdata->startstop_ctrl |= (1 << idx);
890         drvdata->enable_ctrl1 |= BIT(25);
891         spin_unlock(&drvdata->spinlock);
892
893         return size;
894 }
895 static DEVICE_ATTR_RW(addr_start);
896
897 static ssize_t addr_stop_show(struct device *dev,
898                               struct device_attribute *attr, char *buf)
899 {
900         u8 idx;
901         unsigned long val;
902         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
903
904         spin_lock(&drvdata->spinlock);
905         idx = drvdata->addr_idx;
906         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
907               drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
908                 spin_unlock(&drvdata->spinlock);
909                 return -EPERM;
910         }
911
912         val = drvdata->addr_val[idx];
913         spin_unlock(&drvdata->spinlock);
914
915         return sprintf(buf, "%#lx\n", val);
916 }
917
918 static ssize_t addr_stop_store(struct device *dev,
919                                struct device_attribute *attr,
920                                const char *buf, size_t size)
921 {
922         u8 idx;
923         int ret;
924         unsigned long val;
925         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
926
927         ret = kstrtoul(buf, 16, &val);
928         if (ret)
929                 return ret;
930
931         spin_lock(&drvdata->spinlock);
932         idx = drvdata->addr_idx;
933         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
934               drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
935                 spin_unlock(&drvdata->spinlock);
936                 return -EPERM;
937         }
938
939         drvdata->addr_val[idx] = val;
940         drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
941         drvdata->startstop_ctrl |= (1 << (idx + 16));
942         drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
943         spin_unlock(&drvdata->spinlock);
944
945         return size;
946 }
947 static DEVICE_ATTR_RW(addr_stop);
948
949 static ssize_t addr_acctype_show(struct device *dev,
950                                  struct device_attribute *attr, char *buf)
951 {
952         unsigned long val;
953         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
954
955         spin_lock(&drvdata->spinlock);
956         val = drvdata->addr_acctype[drvdata->addr_idx];
957         spin_unlock(&drvdata->spinlock);
958
959         return sprintf(buf, "%#lx\n", val);
960 }
961
962 static ssize_t addr_acctype_store(struct device *dev,
963                                   struct device_attribute *attr,
964                                   const char *buf, size_t size)
965 {
966         int ret;
967         unsigned long val;
968         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
969
970         ret = kstrtoul(buf, 16, &val);
971         if (ret)
972                 return ret;
973
974         spin_lock(&drvdata->spinlock);
975         drvdata->addr_acctype[drvdata->addr_idx] = val;
976         spin_unlock(&drvdata->spinlock);
977
978         return size;
979 }
980 static DEVICE_ATTR_RW(addr_acctype);
981
982 static ssize_t cntr_idx_show(struct device *dev,
983                              struct device_attribute *attr, char *buf)
984 {
985         unsigned long val;
986         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
987
988         val = drvdata->cntr_idx;
989         return sprintf(buf, "%#lx\n", val);
990 }
991
992 static ssize_t cntr_idx_store(struct device *dev,
993                               struct device_attribute *attr,
994                               const char *buf, size_t size)
995 {
996         int ret;
997         unsigned long val;
998         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
999
1000         ret = kstrtoul(buf, 16, &val);
1001         if (ret)
1002                 return ret;
1003
1004         if (val >= drvdata->nr_cntr)
1005                 return -EINVAL;
1006         /*
1007          * Use spinlock to ensure index doesn't change while it gets
1008          * dereferenced multiple times within a spinlock block elsewhere.
1009          */
1010         spin_lock(&drvdata->spinlock);
1011         drvdata->cntr_idx = val;
1012         spin_unlock(&drvdata->spinlock);
1013
1014         return size;
1015 }
1016 static DEVICE_ATTR_RW(cntr_idx);
1017
1018 static ssize_t cntr_rld_val_show(struct device *dev,
1019                                  struct device_attribute *attr, char *buf)
1020 {
1021         unsigned long val;
1022         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1023
1024         spin_lock(&drvdata->spinlock);
1025         val = drvdata->cntr_rld_val[drvdata->cntr_idx];
1026         spin_unlock(&drvdata->spinlock);
1027
1028         return sprintf(buf, "%#lx\n", val);
1029 }
1030
1031 static ssize_t cntr_rld_val_store(struct device *dev,
1032                                   struct device_attribute *attr,
1033                                   const char *buf, size_t size)
1034 {
1035         int ret;
1036         unsigned long val;
1037         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1038
1039         ret = kstrtoul(buf, 16, &val);
1040         if (ret)
1041                 return ret;
1042
1043         spin_lock(&drvdata->spinlock);
1044         drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
1045         spin_unlock(&drvdata->spinlock);
1046
1047         return size;
1048 }
1049 static DEVICE_ATTR_RW(cntr_rld_val);
1050
1051 static ssize_t cntr_event_show(struct device *dev,
1052                                struct device_attribute *attr, char *buf)
1053 {
1054         unsigned long val;
1055         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1056
1057         spin_lock(&drvdata->spinlock);
1058         val = drvdata->cntr_event[drvdata->cntr_idx];
1059         spin_unlock(&drvdata->spinlock);
1060
1061         return sprintf(buf, "%#lx\n", val);
1062 }
1063
1064 static ssize_t cntr_event_store(struct device *dev,
1065                                 struct device_attribute *attr,
1066                                 const char *buf, size_t size)
1067 {
1068         int ret;
1069         unsigned long val;
1070         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1071
1072         ret = kstrtoul(buf, 16, &val);
1073         if (ret)
1074                 return ret;
1075
1076         spin_lock(&drvdata->spinlock);
1077         drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1078         spin_unlock(&drvdata->spinlock);
1079
1080         return size;
1081 }
1082 static DEVICE_ATTR_RW(cntr_event);
1083
1084 static ssize_t cntr_rld_event_show(struct device *dev,
1085                                    struct device_attribute *attr, char *buf)
1086 {
1087         unsigned long val;
1088         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1089
1090         spin_lock(&drvdata->spinlock);
1091         val = drvdata->cntr_rld_event[drvdata->cntr_idx];
1092         spin_unlock(&drvdata->spinlock);
1093
1094         return sprintf(buf, "%#lx\n", val);
1095 }
1096
1097 static ssize_t cntr_rld_event_store(struct device *dev,
1098                                     struct device_attribute *attr,
1099                                     const char *buf, size_t size)
1100 {
1101         int ret;
1102         unsigned long val;
1103         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1104
1105         ret = kstrtoul(buf, 16, &val);
1106         if (ret)
1107                 return ret;
1108
1109         spin_lock(&drvdata->spinlock);
1110         drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1111         spin_unlock(&drvdata->spinlock);
1112
1113         return size;
1114 }
1115 static DEVICE_ATTR_RW(cntr_rld_event);
1116
1117 static ssize_t cntr_val_show(struct device *dev,
1118                              struct device_attribute *attr, char *buf)
1119 {
1120         int i, ret = 0;
1121         u32 val;
1122         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1123
1124         if (!drvdata->enable) {
1125                 spin_lock(&drvdata->spinlock);
1126                 for (i = 0; i < drvdata->nr_cntr; i++)
1127                         ret += sprintf(buf, "counter %d: %x\n",
1128                                        i, drvdata->cntr_val[i]);
1129                 spin_unlock(&drvdata->spinlock);
1130                 return ret;
1131         }
1132
1133         for (i = 0; i < drvdata->nr_cntr; i++) {
1134                 val = etm_readl(drvdata, ETMCNTVRn(i));
1135                 ret += sprintf(buf, "counter %d: %x\n", i, val);
1136         }
1137
1138         return ret;
1139 }
1140
1141 static ssize_t cntr_val_store(struct device *dev,
1142                               struct device_attribute *attr,
1143                               const char *buf, size_t size)
1144 {
1145         int ret;
1146         unsigned long val;
1147         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1148
1149         ret = kstrtoul(buf, 16, &val);
1150         if (ret)
1151                 return ret;
1152
1153         spin_lock(&drvdata->spinlock);
1154         drvdata->cntr_val[drvdata->cntr_idx] = val;
1155         spin_unlock(&drvdata->spinlock);
1156
1157         return size;
1158 }
1159 static DEVICE_ATTR_RW(cntr_val);
1160
1161 static ssize_t seq_12_event_show(struct device *dev,
1162                                  struct device_attribute *attr, char *buf)
1163 {
1164         unsigned long val;
1165         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1166
1167         val = drvdata->seq_12_event;
1168         return sprintf(buf, "%#lx\n", val);
1169 }
1170
1171 static ssize_t seq_12_event_store(struct device *dev,
1172                                   struct device_attribute *attr,
1173                                   const char *buf, size_t size)
1174 {
1175         int ret;
1176         unsigned long val;
1177         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1178
1179         ret = kstrtoul(buf, 16, &val);
1180         if (ret)
1181                 return ret;
1182
1183         drvdata->seq_12_event = val & ETM_EVENT_MASK;
1184         return size;
1185 }
1186 static DEVICE_ATTR_RW(seq_12_event);
1187
1188 static ssize_t seq_21_event_show(struct device *dev,
1189                                  struct device_attribute *attr, char *buf)
1190 {
1191         unsigned long val;
1192         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1193
1194         val = drvdata->seq_21_event;
1195         return sprintf(buf, "%#lx\n", val);
1196 }
1197
1198 static ssize_t seq_21_event_store(struct device *dev,
1199                                   struct device_attribute *attr,
1200                                   const char *buf, size_t size)
1201 {
1202         int ret;
1203         unsigned long val;
1204         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1205
1206         ret = kstrtoul(buf, 16, &val);
1207         if (ret)
1208                 return ret;
1209
1210         drvdata->seq_21_event = val & ETM_EVENT_MASK;
1211         return size;
1212 }
1213 static DEVICE_ATTR_RW(seq_21_event);
1214
1215 static ssize_t seq_23_event_show(struct device *dev,
1216                                  struct device_attribute *attr, char *buf)
1217 {
1218         unsigned long val;
1219         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1220
1221         val = drvdata->seq_23_event;
1222         return sprintf(buf, "%#lx\n", val);
1223 }
1224
1225 static ssize_t seq_23_event_store(struct device *dev,
1226                                   struct device_attribute *attr,
1227                                   const char *buf, size_t size)
1228 {
1229         int ret;
1230         unsigned long val;
1231         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1232
1233         ret = kstrtoul(buf, 16, &val);
1234         if (ret)
1235                 return ret;
1236
1237         drvdata->seq_23_event = val & ETM_EVENT_MASK;
1238         return size;
1239 }
1240 static DEVICE_ATTR_RW(seq_23_event);
1241
1242 static ssize_t seq_31_event_show(struct device *dev,
1243                                  struct device_attribute *attr, char *buf)
1244 {
1245         unsigned long val;
1246         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1247
1248         val = drvdata->seq_31_event;
1249         return sprintf(buf, "%#lx\n", val);
1250 }
1251
1252 static ssize_t seq_31_event_store(struct device *dev,
1253                                   struct device_attribute *attr,
1254                                   const char *buf, size_t size)
1255 {
1256         int ret;
1257         unsigned long val;
1258         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1259
1260         ret = kstrtoul(buf, 16, &val);
1261         if (ret)
1262                 return ret;
1263
1264         drvdata->seq_31_event = val & ETM_EVENT_MASK;
1265         return size;
1266 }
1267 static DEVICE_ATTR_RW(seq_31_event);
1268
1269 static ssize_t seq_32_event_show(struct device *dev,
1270                                  struct device_attribute *attr, char *buf)
1271 {
1272         unsigned long val;
1273         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1274
1275         val = drvdata->seq_32_event;
1276         return sprintf(buf, "%#lx\n", val);
1277 }
1278
1279 static ssize_t seq_32_event_store(struct device *dev,
1280                                   struct device_attribute *attr,
1281                                   const char *buf, size_t size)
1282 {
1283         int ret;
1284         unsigned long val;
1285         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1286
1287         ret = kstrtoul(buf, 16, &val);
1288         if (ret)
1289                 return ret;
1290
1291         drvdata->seq_32_event = val & ETM_EVENT_MASK;
1292         return size;
1293 }
1294 static DEVICE_ATTR_RW(seq_32_event);
1295
1296 static ssize_t seq_13_event_show(struct device *dev,
1297                                  struct device_attribute *attr, char *buf)
1298 {
1299         unsigned long val;
1300         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1301
1302         val = drvdata->seq_13_event;
1303         return sprintf(buf, "%#lx\n", val);
1304 }
1305
1306 static ssize_t seq_13_event_store(struct device *dev,
1307                                   struct device_attribute *attr,
1308                                   const char *buf, size_t size)
1309 {
1310         int ret;
1311         unsigned long val;
1312         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1313
1314         ret = kstrtoul(buf, 16, &val);
1315         if (ret)
1316                 return ret;
1317
1318         drvdata->seq_13_event = val & ETM_EVENT_MASK;
1319         return size;
1320 }
1321 static DEVICE_ATTR_RW(seq_13_event);
1322
1323 static ssize_t seq_curr_state_show(struct device *dev,
1324                                    struct device_attribute *attr, char *buf)
1325 {
1326         int ret;
1327         unsigned long val, flags;
1328         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1329
1330         if (!drvdata->enable) {
1331                 val = drvdata->seq_curr_state;
1332                 goto out;
1333         }
1334
1335         ret = clk_prepare_enable(drvdata->clk);
1336         if (ret)
1337                 return ret;
1338
1339         spin_lock_irqsave(&drvdata->spinlock, flags);
1340
1341         CS_UNLOCK(drvdata->base);
1342         val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
1343         CS_LOCK(drvdata->base);
1344
1345         spin_unlock_irqrestore(&drvdata->spinlock, flags);
1346         clk_disable_unprepare(drvdata->clk);
1347 out:
1348         return sprintf(buf, "%#lx\n", val);
1349 }
1350
1351 static ssize_t seq_curr_state_store(struct device *dev,
1352                                     struct device_attribute *attr,
1353                                     const char *buf, size_t size)
1354 {
1355         int ret;
1356         unsigned long val;
1357         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1358
1359         ret = kstrtoul(buf, 16, &val);
1360         if (ret)
1361                 return ret;
1362
1363         if (val > ETM_SEQ_STATE_MAX_VAL)
1364                 return -EINVAL;
1365
1366         drvdata->seq_curr_state = val;
1367
1368         return size;
1369 }
1370 static DEVICE_ATTR_RW(seq_curr_state);
1371
1372 static ssize_t ctxid_idx_show(struct device *dev,
1373                               struct device_attribute *attr, char *buf)
1374 {
1375         unsigned long val;
1376         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1377
1378         val = drvdata->ctxid_idx;
1379         return sprintf(buf, "%#lx\n", val);
1380 }
1381
1382 static ssize_t ctxid_idx_store(struct device *dev,
1383                                 struct device_attribute *attr,
1384                                 const char *buf, size_t size)
1385 {
1386         int ret;
1387         unsigned long val;
1388         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1389
1390         ret = kstrtoul(buf, 16, &val);
1391         if (ret)
1392                 return ret;
1393
1394         if (val >= drvdata->nr_ctxid_cmp)
1395                 return -EINVAL;
1396
1397         /*
1398          * Use spinlock to ensure index doesn't change while it gets
1399          * dereferenced multiple times within a spinlock block elsewhere.
1400          */
1401         spin_lock(&drvdata->spinlock);
1402         drvdata->ctxid_idx = val;
1403         spin_unlock(&drvdata->spinlock);
1404
1405         return size;
1406 }
1407 static DEVICE_ATTR_RW(ctxid_idx);
1408
1409 static ssize_t ctxid_val_show(struct device *dev,
1410                               struct device_attribute *attr, char *buf)
1411 {
1412         unsigned long val;
1413         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1414
1415         spin_lock(&drvdata->spinlock);
1416         val = drvdata->ctxid_val[drvdata->ctxid_idx];
1417         spin_unlock(&drvdata->spinlock);
1418
1419         return sprintf(buf, "%#lx\n", val);
1420 }
1421
1422 static ssize_t ctxid_val_store(struct device *dev,
1423                                struct device_attribute *attr,
1424                                const char *buf, size_t size)
1425 {
1426         int ret;
1427         unsigned long val;
1428         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1429
1430         ret = kstrtoul(buf, 16, &val);
1431         if (ret)
1432                 return ret;
1433
1434         spin_lock(&drvdata->spinlock);
1435         drvdata->ctxid_val[drvdata->ctxid_idx] = val;
1436         spin_unlock(&drvdata->spinlock);
1437
1438         return size;
1439 }
1440 static DEVICE_ATTR_RW(ctxid_val);
1441
1442 static ssize_t ctxid_mask_show(struct device *dev,
1443                                struct device_attribute *attr, char *buf)
1444 {
1445         unsigned long val;
1446         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1447
1448         val = drvdata->ctxid_mask;
1449         return sprintf(buf, "%#lx\n", val);
1450 }
1451
1452 static ssize_t ctxid_mask_store(struct device *dev,
1453                                 struct device_attribute *attr,
1454                                 const char *buf, size_t size)
1455 {
1456         int ret;
1457         unsigned long val;
1458         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1459
1460         ret = kstrtoul(buf, 16, &val);
1461         if (ret)
1462                 return ret;
1463
1464         drvdata->ctxid_mask = val;
1465         return size;
1466 }
1467 static DEVICE_ATTR_RW(ctxid_mask);
1468
1469 static ssize_t sync_freq_show(struct device *dev,
1470                               struct device_attribute *attr, char *buf)
1471 {
1472         unsigned long val;
1473         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1474
1475         val = drvdata->sync_freq;
1476         return sprintf(buf, "%#lx\n", val);
1477 }
1478
1479 static ssize_t sync_freq_store(struct device *dev,
1480                                struct device_attribute *attr,
1481                                const char *buf, size_t size)
1482 {
1483         int ret;
1484         unsigned long val;
1485         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1486
1487         ret = kstrtoul(buf, 16, &val);
1488         if (ret)
1489                 return ret;
1490
1491         drvdata->sync_freq = val & ETM_SYNC_MASK;
1492         return size;
1493 }
1494 static DEVICE_ATTR_RW(sync_freq);
1495
1496 static ssize_t timestamp_event_show(struct device *dev,
1497                                     struct device_attribute *attr, char *buf)
1498 {
1499         unsigned long val;
1500         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1501
1502         val = drvdata->timestamp_event;
1503         return sprintf(buf, "%#lx\n", val);
1504 }
1505
1506 static ssize_t timestamp_event_store(struct device *dev,
1507                                      struct device_attribute *attr,
1508                                      const char *buf, size_t size)
1509 {
1510         int ret;
1511         unsigned long val;
1512         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1513
1514         ret = kstrtoul(buf, 16, &val);
1515         if (ret)
1516                 return ret;
1517
1518         drvdata->timestamp_event = val & ETM_EVENT_MASK;
1519         return size;
1520 }
1521 static DEVICE_ATTR_RW(timestamp_event);
1522
1523 static ssize_t status_show(struct device *dev,
1524                            struct device_attribute *attr, char *buf)
1525 {
1526         int ret;
1527         unsigned long flags;
1528         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1529
1530         ret = clk_prepare_enable(drvdata->clk);
1531         if (ret)
1532                 return ret;
1533
1534         spin_lock_irqsave(&drvdata->spinlock, flags);
1535
1536         CS_UNLOCK(drvdata->base);
1537         ret = sprintf(buf,
1538                       "ETMCCR: 0x%08x\n"
1539                       "ETMCCER: 0x%08x\n"
1540                       "ETMSCR: 0x%08x\n"
1541                       "ETMIDR: 0x%08x\n"
1542                       "ETMCR: 0x%08x\n"
1543                       "ETMTRACEIDR: 0x%08x\n"
1544                       "Enable event: 0x%08x\n"
1545                       "Enable start/stop: 0x%08x\n"
1546                       "Enable control: CR1 0x%08x CR2 0x%08x\n"
1547                       "CPU affinity: %d\n",
1548                       drvdata->etmccr, drvdata->etmccer,
1549                       etm_readl(drvdata, ETMSCR), etm_readl(drvdata, ETMIDR),
1550                       etm_readl(drvdata, ETMCR), etm_trace_id_simple(drvdata),
1551                       etm_readl(drvdata, ETMTEEVR),
1552                       etm_readl(drvdata, ETMTSSCR),
1553                       etm_readl(drvdata, ETMTECR1),
1554                       etm_readl(drvdata, ETMTECR2),
1555                       drvdata->cpu);
1556         CS_LOCK(drvdata->base);
1557
1558         spin_unlock_irqrestore(&drvdata->spinlock, flags);
1559         clk_disable_unprepare(drvdata->clk);
1560
1561         return ret;
1562 }
1563 static DEVICE_ATTR_RO(status);
1564
1565 static ssize_t traceid_show(struct device *dev,
1566                             struct device_attribute *attr, char *buf)
1567 {
1568         int ret;
1569         unsigned long val, flags;
1570         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1571
1572         if (!drvdata->enable) {
1573                 val = drvdata->traceid;
1574                 goto out;
1575         }
1576
1577         ret = clk_prepare_enable(drvdata->clk);
1578         if (ret)
1579                 return ret;
1580
1581         spin_lock_irqsave(&drvdata->spinlock, flags);
1582         CS_UNLOCK(drvdata->base);
1583
1584         val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
1585
1586         CS_LOCK(drvdata->base);
1587         spin_unlock_irqrestore(&drvdata->spinlock, flags);
1588         clk_disable_unprepare(drvdata->clk);
1589 out:
1590         return sprintf(buf, "%#lx\n", val);
1591 }
1592
1593 static ssize_t traceid_store(struct device *dev,
1594                              struct device_attribute *attr,
1595                              const char *buf, size_t size)
1596 {
1597         int ret;
1598         unsigned long val;
1599         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1600
1601         ret = kstrtoul(buf, 16, &val);
1602         if (ret)
1603                 return ret;
1604
1605         drvdata->traceid = val & ETM_TRACEID_MASK;
1606         return size;
1607 }
1608 static DEVICE_ATTR_RW(traceid);
1609
1610 static struct attribute *coresight_etm_attrs[] = {
1611         &dev_attr_nr_addr_cmp.attr,
1612         &dev_attr_nr_cntr.attr,
1613         &dev_attr_nr_ctxid_cmp.attr,
1614         &dev_attr_etmsr.attr,
1615         &dev_attr_reset.attr,
1616         &dev_attr_mode.attr,
1617         &dev_attr_trigger_event.attr,
1618         &dev_attr_enable_event.attr,
1619         &dev_attr_fifofull_level.attr,
1620         &dev_attr_addr_idx.attr,
1621         &dev_attr_addr_single.attr,
1622         &dev_attr_addr_range.attr,
1623         &dev_attr_addr_start.attr,
1624         &dev_attr_addr_stop.attr,
1625         &dev_attr_addr_acctype.attr,
1626         &dev_attr_cntr_idx.attr,
1627         &dev_attr_cntr_rld_val.attr,
1628         &dev_attr_cntr_event.attr,
1629         &dev_attr_cntr_rld_event.attr,
1630         &dev_attr_cntr_val.attr,
1631         &dev_attr_seq_12_event.attr,
1632         &dev_attr_seq_21_event.attr,
1633         &dev_attr_seq_23_event.attr,
1634         &dev_attr_seq_31_event.attr,
1635         &dev_attr_seq_32_event.attr,
1636         &dev_attr_seq_13_event.attr,
1637         &dev_attr_seq_curr_state.attr,
1638         &dev_attr_ctxid_idx.attr,
1639         &dev_attr_ctxid_val.attr,
1640         &dev_attr_ctxid_mask.attr,
1641         &dev_attr_sync_freq.attr,
1642         &dev_attr_timestamp_event.attr,
1643         &dev_attr_status.attr,
1644         &dev_attr_traceid.attr,
1645         NULL,
1646 };
1647 ATTRIBUTE_GROUPS(coresight_etm);
1648
1649 static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
1650                             void *hcpu)
1651 {
1652         unsigned int cpu = (unsigned long)hcpu;
1653
1654         if (!etmdrvdata[cpu])
1655                 goto out;
1656
1657         switch (action & (~CPU_TASKS_FROZEN)) {
1658         case CPU_STARTING:
1659                 spin_lock(&etmdrvdata[cpu]->spinlock);
1660                 if (!etmdrvdata[cpu]->os_unlock) {
1661                         etm_os_unlock(etmdrvdata[cpu]);
1662                         etmdrvdata[cpu]->os_unlock = true;
1663                 }
1664
1665                 if (etmdrvdata[cpu]->enable)
1666                         etm_enable_hw(etmdrvdata[cpu]);
1667                 spin_unlock(&etmdrvdata[cpu]->spinlock);
1668                 break;
1669
1670         case CPU_ONLINE:
1671                 if (etmdrvdata[cpu]->boot_enable &&
1672                     !etmdrvdata[cpu]->sticky_enable)
1673                         coresight_enable(etmdrvdata[cpu]->csdev);
1674                 break;
1675
1676         case CPU_DYING:
1677                 spin_lock(&etmdrvdata[cpu]->spinlock);
1678                 if (etmdrvdata[cpu]->enable)
1679                         etm_disable_hw(etmdrvdata[cpu]);
1680                 spin_unlock(&etmdrvdata[cpu]->spinlock);
1681                 break;
1682         }
1683 out:
1684         return NOTIFY_OK;
1685 }
1686
1687 static struct notifier_block etm_cpu_notifier = {
1688         .notifier_call = etm_cpu_callback,
1689 };
1690
1691 static bool etm_arch_supported(u8 arch)
1692 {
1693         switch (arch) {
1694         case ETM_ARCH_V3_3:
1695                 break;
1696         case ETM_ARCH_V3_5:
1697                 break;
1698         case PFT_ARCH_V1_0:
1699                 break;
1700         case PFT_ARCH_V1_1:
1701                 break;
1702         default:
1703                 return false;
1704         }
1705         return true;
1706 }
1707
1708 static void etm_init_arch_data(void *info)
1709 {
1710         u32 etmidr;
1711         u32 etmccr;
1712         struct etm_drvdata *drvdata = info;
1713
1714         CS_UNLOCK(drvdata->base);
1715
1716         /* First dummy read */
1717         (void)etm_readl(drvdata, ETMPDSR);
1718         /* Provide power to ETM: ETMPDCR[3] == 1 */
1719         etm_set_pwrup(drvdata);
1720         /*
1721          * Clear power down bit since when this bit is set writes to
1722          * certain registers might be ignored.
1723          */
1724         etm_clr_pwrdwn(drvdata);
1725         /*
1726          * Set prog bit. It will be set from reset but this is included to
1727          * ensure it is set
1728          */
1729         etm_set_prog(drvdata);
1730
1731         /* Find all capabilities */
1732         etmidr = etm_readl(drvdata, ETMIDR);
1733         drvdata->arch = BMVAL(etmidr, 4, 11);
1734         drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
1735
1736         drvdata->etmccer = etm_readl(drvdata, ETMCCER);
1737         etmccr = etm_readl(drvdata, ETMCCR);
1738         drvdata->etmccr = etmccr;
1739         drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
1740         drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
1741         drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
1742         drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
1743         drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
1744
1745         etm_set_pwrdwn(drvdata);
1746         etm_clr_pwrup(drvdata);
1747         CS_LOCK(drvdata->base);
1748 }
1749
1750 static void etm_init_default_data(struct etm_drvdata *drvdata)
1751 {
1752         /*
1753          * A trace ID of value 0 is invalid, so let's start at some
1754          * random value that fits in 7 bits and will be just as good.
1755          */
1756         static int etm3x_traceid = 0x10;
1757
1758         u32 flags = (1 << 0 | /* instruction execute*/
1759                      3 << 3 | /* ARM instruction */
1760                      0 << 5 | /* No data value comparison */
1761                      0 << 7 | /* No exact mach */
1762                      0 << 8 | /* Ignore context ID */
1763                      0 << 10); /* Security ignored */
1764
1765         /*
1766          * Initial configuration only - guarantees sources handled by
1767          * this driver have a unique ID at startup time but not between
1768          * all other types of sources.  For that we lean on the core
1769          * framework.
1770          */
1771         drvdata->traceid = etm3x_traceid++;
1772         drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
1773         drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
1774         if (drvdata->nr_addr_cmp >= 2) {
1775                 drvdata->addr_val[0] = (u32) _stext;
1776                 drvdata->addr_val[1] = (u32) _etext;
1777                 drvdata->addr_acctype[0] = flags;
1778                 drvdata->addr_acctype[1] = flags;
1779                 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
1780                 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
1781         }
1782
1783         etm_set_default(drvdata);
1784 }
1785
1786 static int etm_probe(struct amba_device *adev, const struct amba_id *id)
1787 {
1788         int ret;
1789         void __iomem *base;
1790         struct device *dev = &adev->dev;
1791         struct coresight_platform_data *pdata = NULL;
1792         struct etm_drvdata *drvdata;
1793         struct resource *res = &adev->res;
1794         struct coresight_desc *desc;
1795         struct device_node *np = adev->dev.of_node;
1796
1797         desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
1798         if (!desc)
1799                 return -ENOMEM;
1800
1801         drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1802         if (!drvdata)
1803                 return -ENOMEM;
1804
1805         if (np) {
1806                 pdata = of_get_coresight_platform_data(dev, np);
1807                 if (IS_ERR(pdata))
1808                         return PTR_ERR(pdata);
1809
1810                 adev->dev.platform_data = pdata;
1811                 drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
1812         }
1813
1814         drvdata->dev = &adev->dev;
1815         dev_set_drvdata(dev, drvdata);
1816
1817         /* Validity for the resource is already checked by the AMBA core */
1818         base = devm_ioremap_resource(dev, res);
1819         if (IS_ERR(base))
1820                 return PTR_ERR(base);
1821
1822         drvdata->base = base;
1823
1824         spin_lock_init(&drvdata->spinlock);
1825
1826         drvdata->clk = adev->pclk;
1827         ret = clk_prepare_enable(drvdata->clk);
1828         if (ret)
1829                 return ret;
1830
1831         drvdata->cpu = pdata ? pdata->cpu : 0;
1832
1833         get_online_cpus();
1834         etmdrvdata[drvdata->cpu] = drvdata;
1835
1836         if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
1837                 drvdata->os_unlock = true;
1838
1839         if (smp_call_function_single(drvdata->cpu,
1840                                      etm_init_arch_data,  drvdata, 1))
1841                 dev_err(dev, "ETM arch init failed\n");
1842
1843         if (!etm_count++)
1844                 register_hotcpu_notifier(&etm_cpu_notifier);
1845
1846         put_online_cpus();
1847
1848         if (etm_arch_supported(drvdata->arch) == false) {
1849                 ret = -EINVAL;
1850                 goto err_arch_supported;
1851         }
1852         etm_init_default_data(drvdata);
1853
1854         clk_disable_unprepare(drvdata->clk);
1855
1856         desc->type = CORESIGHT_DEV_TYPE_SOURCE;
1857         desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
1858         desc->ops = &etm_cs_ops;
1859         desc->pdata = pdata;
1860         desc->dev = dev;
1861         desc->groups = coresight_etm_groups;
1862         drvdata->csdev = coresight_register(desc);
1863         if (IS_ERR(drvdata->csdev)) {
1864                 ret = PTR_ERR(drvdata->csdev);
1865                 goto err_arch_supported;
1866         }
1867
1868         dev_info(dev, "ETM initialized\n");
1869
1870         if (boot_enable) {
1871                 coresight_enable(drvdata->csdev);
1872                 drvdata->boot_enable = true;
1873         }
1874
1875         return 0;
1876
1877 err_arch_supported:
1878         clk_disable_unprepare(drvdata->clk);
1879         if (--etm_count == 0)
1880                 unregister_hotcpu_notifier(&etm_cpu_notifier);
1881         return ret;
1882 }
1883
1884 static int etm_remove(struct amba_device *adev)
1885 {
1886         struct etm_drvdata *drvdata = amba_get_drvdata(adev);
1887
1888         coresight_unregister(drvdata->csdev);
1889         if (--etm_count == 0)
1890                 unregister_hotcpu_notifier(&etm_cpu_notifier);
1891
1892         return 0;
1893 }
1894
1895 static struct amba_id etm_ids[] = {
1896         {       /* ETM 3.3 */
1897                 .id     = 0x0003b921,
1898                 .mask   = 0x0003ffff,
1899         },
1900         {       /* ETM 3.5 */
1901                 .id     = 0x0003b956,
1902                 .mask   = 0x0003ffff,
1903         },
1904         {       /* PTM 1.0 */
1905                 .id     = 0x0003b950,
1906                 .mask   = 0x0003ffff,
1907         },
1908         {       /* PTM 1.1 */
1909                 .id     = 0x0003b95f,
1910                 .mask   = 0x0003ffff,
1911         },
1912         { 0, 0},
1913 };
1914
1915 static struct amba_driver etm_driver = {
1916         .drv = {
1917                 .name   = "coresight-etm3x",
1918                 .owner  = THIS_MODULE,
1919         },
1920         .probe          = etm_probe,
1921         .remove         = etm_remove,
1922         .id_table       = etm_ids,
1923 };
1924
1925 int __init etm_init(void)
1926 {
1927         return amba_driver_register(&etm_driver);
1928 }
1929 module_init(etm_init);
1930
1931 void __exit etm_exit(void)
1932 {
1933         amba_driver_unregister(&etm_driver);
1934 }
1935 module_exit(etm_exit);
1936
1937 MODULE_LICENSE("GPL v2");
1938 MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");