coresight: etm3x: moving sysFS entries to dedicated file
[firefly-linux-kernel-4.4.55.git] / drivers / hwtracing / coresight / coresight-etm3x.c
1 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/io.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/smp.h>
24 #include <linux/sysfs.h>
25 #include <linux/stat.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/cpu.h>
28 #include <linux/of.h>
29 #include <linux/coresight.h>
30 #include <linux/amba/bus.h>
31 #include <linux/seq_file.h>
32 #include <linux/uaccess.h>
33 #include <linux/clk.h>
34 #include <asm/sections.h>
35
36 #include "coresight-etm.h"
37
38 static int boot_enable;
39 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
40
41 /* The number of ETM/PTM currently registered */
42 static int etm_count;
43 static struct etm_drvdata *etmdrvdata[NR_CPUS];
44
45 /*
46  * Memory mapped writes to clear os lock are not supported on some processors
47  * and OS lock must be unlocked before any memory mapped access on such
48  * processors, otherwise memory mapped reads/writes will be invalid.
49  */
50 static void etm_os_unlock(void *info)
51 {
52         struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
53         /* Writing any value to ETMOSLAR unlocks the trace registers */
54         etm_writel(drvdata, 0x0, ETMOSLAR);
55         isb();
56 }
57
58 static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
59 {
60         u32 etmcr;
61
62         /* Ensure pending cp14 accesses complete before setting pwrdwn */
63         mb();
64         isb();
65         etmcr = etm_readl(drvdata, ETMCR);
66         etmcr |= ETMCR_PWD_DWN;
67         etm_writel(drvdata, etmcr, ETMCR);
68 }
69
70 static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
71 {
72         u32 etmcr;
73
74         etmcr = etm_readl(drvdata, ETMCR);
75         etmcr &= ~ETMCR_PWD_DWN;
76         etm_writel(drvdata, etmcr, ETMCR);
77         /* Ensure pwrup completes before subsequent cp14 accesses */
78         mb();
79         isb();
80 }
81
82 static void etm_set_pwrup(struct etm_drvdata *drvdata)
83 {
84         u32 etmpdcr;
85
86         etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
87         etmpdcr |= ETMPDCR_PWD_UP;
88         writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
89         /* Ensure pwrup completes before subsequent cp14 accesses */
90         mb();
91         isb();
92 }
93
94 static void etm_clr_pwrup(struct etm_drvdata *drvdata)
95 {
96         u32 etmpdcr;
97
98         /* Ensure pending cp14 accesses complete before clearing pwrup */
99         mb();
100         isb();
101         etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
102         etmpdcr &= ~ETMPDCR_PWD_UP;
103         writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
104 }
105
106 /**
107  * coresight_timeout_etm - loop until a bit has changed to a specific state.
108  * @drvdata: etm's private data structure.
109  * @offset: address of a register, starting from @addr.
110  * @position: the position of the bit of interest.
111  * @value: the value the bit should have.
112  *
113  * Basically the same as @coresight_timeout except for the register access
114  * method where we have to account for CP14 configurations.
115
116  * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
117  * TIMEOUT_US has elapsed, which ever happens first.
118  */
119
120 static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
121                                   int position, int value)
122 {
123         int i;
124         u32 val;
125
126         for (i = TIMEOUT_US; i > 0; i--) {
127                 val = etm_readl(drvdata, offset);
128                 /* Waiting on the bit to go from 0 to 1 */
129                 if (value) {
130                         if (val & BIT(position))
131                                 return 0;
132                 /* Waiting on the bit to go from 1 to 0 */
133                 } else {
134                         if (!(val & BIT(position)))
135                                 return 0;
136                 }
137
138                 /*
139                  * Delay is arbitrary - the specification doesn't say how long
140                  * we are expected to wait.  Extra check required to make sure
141                  * we don't wait needlessly on the last iteration.
142                  */
143                 if (i - 1)
144                         udelay(1);
145         }
146
147         return -EAGAIN;
148 }
149
150
151 static void etm_set_prog(struct etm_drvdata *drvdata)
152 {
153         u32 etmcr;
154
155         etmcr = etm_readl(drvdata, ETMCR);
156         etmcr |= ETMCR_ETM_PRG;
157         etm_writel(drvdata, etmcr, ETMCR);
158         /*
159          * Recommended by spec for cp14 accesses to ensure etmcr write is
160          * complete before polling etmsr
161          */
162         isb();
163         if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
164                 dev_err(drvdata->dev,
165                         "%s: timeout observed when probing at offset %#x\n",
166                         __func__, ETMSR);
167         }
168 }
169
170 static void etm_clr_prog(struct etm_drvdata *drvdata)
171 {
172         u32 etmcr;
173
174         etmcr = etm_readl(drvdata, ETMCR);
175         etmcr &= ~ETMCR_ETM_PRG;
176         etm_writel(drvdata, etmcr, ETMCR);
177         /*
178          * Recommended by spec for cp14 accesses to ensure etmcr write is
179          * complete before polling etmsr
180          */
181         isb();
182         if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
183                 dev_err(drvdata->dev,
184                         "%s: timeout observed when probing at offset %#x\n",
185                         __func__, ETMSR);
186         }
187 }
188
189 void etm_set_default(struct etm_drvdata *drvdata)
190 {
191         int i;
192
193         drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
194         drvdata->enable_event = ETM_HARD_WIRE_RES_A;
195
196         drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
197         drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
198         drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
199         drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
200         drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
201         drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
202         drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
203
204         for (i = 0; i < drvdata->nr_cntr; i++) {
205                 drvdata->cntr_rld_val[i] = 0x0;
206                 drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
207                 drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
208                 drvdata->cntr_val[i] = 0x0;
209         }
210
211         drvdata->seq_curr_state = 0x0;
212         drvdata->ctxid_idx = 0x0;
213         for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
214                 drvdata->ctxid_pid[i] = 0x0;
215                 drvdata->ctxid_vpid[i] = 0x0;
216         }
217
218         drvdata->ctxid_mask = 0x0;
219 }
220
221 static void etm_enable_hw(void *info)
222 {
223         int i;
224         u32 etmcr;
225         struct etm_drvdata *drvdata = info;
226
227         CS_UNLOCK(drvdata->base);
228
229         /* Turn engine on */
230         etm_clr_pwrdwn(drvdata);
231         /* Apply power to trace registers */
232         etm_set_pwrup(drvdata);
233         /* Make sure all registers are accessible */
234         etm_os_unlock(drvdata);
235
236         etm_set_prog(drvdata);
237
238         etmcr = etm_readl(drvdata, ETMCR);
239         etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
240         etmcr |= drvdata->port_size;
241         etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
242         etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
243         etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
244         etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
245         etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
246         etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
247         for (i = 0; i < drvdata->nr_addr_cmp; i++) {
248                 etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
249                 etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
250         }
251         for (i = 0; i < drvdata->nr_cntr; i++) {
252                 etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
253                 etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
254                 etm_writel(drvdata, drvdata->cntr_rld_event[i],
255                            ETMCNTRLDEVRn(i));
256                 etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
257         }
258         etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
259         etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
260         etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
261         etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
262         etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
263         etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
264         etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
265         for (i = 0; i < drvdata->nr_ext_out; i++)
266                 etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
267         for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
268                 etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
269         etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
270         etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
271         /* No external input selected */
272         etm_writel(drvdata, 0x0, ETMEXTINSELR);
273         etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
274         /* No auxiliary control selected */
275         etm_writel(drvdata, 0x0, ETMAUXCR);
276         etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
277         /* No VMID comparator value selected */
278         etm_writel(drvdata, 0x0, ETMVMIDCVR);
279
280         /* Ensures trace output is enabled from this ETM */
281         etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
282
283         etm_clr_prog(drvdata);
284         CS_LOCK(drvdata->base);
285
286         dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
287 }
288
289 static int etm_cpu_id(struct coresight_device *csdev)
290 {
291         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
292
293         return drvdata->cpu;
294 }
295
296 int etm_get_trace_id(struct etm_drvdata *drvdata)
297 {
298         unsigned long flags;
299         int trace_id = -1;
300
301         if (!drvdata)
302                 goto out;
303
304         if (!drvdata->enable)
305                 return drvdata->traceid;
306
307         pm_runtime_get_sync(drvdata->dev);
308
309         spin_lock_irqsave(&drvdata->spinlock, flags);
310
311         CS_UNLOCK(drvdata->base);
312         trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
313         CS_LOCK(drvdata->base);
314
315         spin_unlock_irqrestore(&drvdata->spinlock, flags);
316         pm_runtime_put(drvdata->dev);
317
318 out:
319         return trace_id;
320
321 }
322
323 static int etm_trace_id(struct coresight_device *csdev)
324 {
325         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
326
327         return etm_get_trace_id(drvdata);
328 }
329
330 static int etm_enable(struct coresight_device *csdev)
331 {
332         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
333         int ret;
334
335         spin_lock(&drvdata->spinlock);
336
337         /*
338          * Configure the ETM only if the CPU is online.  If it isn't online
339          * hw configuration will take place when 'CPU_STARTING' is received
340          * in @etm_cpu_callback.
341          */
342         if (cpu_online(drvdata->cpu)) {
343                 ret = smp_call_function_single(drvdata->cpu,
344                                                etm_enable_hw, drvdata, 1);
345                 if (ret)
346                         goto err;
347         }
348
349         drvdata->enable = true;
350         drvdata->sticky_enable = true;
351
352         spin_unlock(&drvdata->spinlock);
353
354         dev_info(drvdata->dev, "ETM tracing enabled\n");
355         return 0;
356 err:
357         spin_unlock(&drvdata->spinlock);
358         return ret;
359 }
360
361 static void etm_disable_hw(void *info)
362 {
363         int i;
364         struct etm_drvdata *drvdata = info;
365
366         CS_UNLOCK(drvdata->base);
367         etm_set_prog(drvdata);
368
369         /* Program trace enable to low by using always false event */
370         etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
371
372         /* Read back sequencer and counters for post trace analysis */
373         drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
374
375         for (i = 0; i < drvdata->nr_cntr; i++)
376                 drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
377
378         etm_set_pwrdwn(drvdata);
379         CS_LOCK(drvdata->base);
380
381         dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
382 }
383
384 static void etm_disable(struct coresight_device *csdev)
385 {
386         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
387
388         /*
389          * Taking hotplug lock here protects from clocks getting disabled
390          * with tracing being left on (crash scenario) if user disable occurs
391          * after cpu online mask indicates the cpu is offline but before the
392          * DYING hotplug callback is serviced by the ETM driver.
393          */
394         get_online_cpus();
395         spin_lock(&drvdata->spinlock);
396
397         /*
398          * Executing etm_disable_hw on the cpu whose ETM is being disabled
399          * ensures that register writes occur when cpu is powered.
400          */
401         smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
402         drvdata->enable = false;
403
404         spin_unlock(&drvdata->spinlock);
405         put_online_cpus();
406
407         dev_info(drvdata->dev, "ETM tracing disabled\n");
408 }
409
410 static const struct coresight_ops_source etm_source_ops = {
411         .cpu_id         = etm_cpu_id,
412         .trace_id       = etm_trace_id,
413         .enable         = etm_enable,
414         .disable        = etm_disable,
415 };
416
417 static const struct coresight_ops etm_cs_ops = {
418         .source_ops     = &etm_source_ops,
419 };
420
421 static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
422                             void *hcpu)
423 {
424         unsigned int cpu = (unsigned long)hcpu;
425
426         if (!etmdrvdata[cpu])
427                 goto out;
428
429         switch (action & (~CPU_TASKS_FROZEN)) {
430         case CPU_STARTING:
431                 spin_lock(&etmdrvdata[cpu]->spinlock);
432                 if (!etmdrvdata[cpu]->os_unlock) {
433                         etm_os_unlock(etmdrvdata[cpu]);
434                         etmdrvdata[cpu]->os_unlock = true;
435                 }
436
437                 if (etmdrvdata[cpu]->enable)
438                         etm_enable_hw(etmdrvdata[cpu]);
439                 spin_unlock(&etmdrvdata[cpu]->spinlock);
440                 break;
441
442         case CPU_ONLINE:
443                 if (etmdrvdata[cpu]->boot_enable &&
444                     !etmdrvdata[cpu]->sticky_enable)
445                         coresight_enable(etmdrvdata[cpu]->csdev);
446                 break;
447
448         case CPU_DYING:
449                 spin_lock(&etmdrvdata[cpu]->spinlock);
450                 if (etmdrvdata[cpu]->enable)
451                         etm_disable_hw(etmdrvdata[cpu]);
452                 spin_unlock(&etmdrvdata[cpu]->spinlock);
453                 break;
454         }
455 out:
456         return NOTIFY_OK;
457 }
458
459 static struct notifier_block etm_cpu_notifier = {
460         .notifier_call = etm_cpu_callback,
461 };
462
463 static bool etm_arch_supported(u8 arch)
464 {
465         switch (arch) {
466         case ETM_ARCH_V3_3:
467                 break;
468         case ETM_ARCH_V3_5:
469                 break;
470         case PFT_ARCH_V1_0:
471                 break;
472         case PFT_ARCH_V1_1:
473                 break;
474         default:
475                 return false;
476         }
477         return true;
478 }
479
480 static void etm_init_arch_data(void *info)
481 {
482         u32 etmidr;
483         u32 etmccr;
484         struct etm_drvdata *drvdata = info;
485
486         CS_UNLOCK(drvdata->base);
487
488         /* First dummy read */
489         (void)etm_readl(drvdata, ETMPDSR);
490         /* Provide power to ETM: ETMPDCR[3] == 1 */
491         etm_set_pwrup(drvdata);
492         /*
493          * Clear power down bit since when this bit is set writes to
494          * certain registers might be ignored.
495          */
496         etm_clr_pwrdwn(drvdata);
497         /*
498          * Set prog bit. It will be set from reset but this is included to
499          * ensure it is set
500          */
501         etm_set_prog(drvdata);
502
503         /* Find all capabilities */
504         etmidr = etm_readl(drvdata, ETMIDR);
505         drvdata->arch = BMVAL(etmidr, 4, 11);
506         drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
507
508         drvdata->etmccer = etm_readl(drvdata, ETMCCER);
509         etmccr = etm_readl(drvdata, ETMCCR);
510         drvdata->etmccr = etmccr;
511         drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
512         drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
513         drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
514         drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
515         drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
516
517         etm_set_pwrdwn(drvdata);
518         etm_clr_pwrup(drvdata);
519         CS_LOCK(drvdata->base);
520 }
521
522 static void etm_init_default_data(struct etm_drvdata *drvdata)
523 {
524         /*
525          * A trace ID of value 0 is invalid, so let's start at some
526          * random value that fits in 7 bits and will be just as good.
527          */
528         static int etm3x_traceid = 0x10;
529
530         u32 flags = (1 << 0 | /* instruction execute*/
531                      3 << 3 | /* ARM instruction */
532                      0 << 5 | /* No data value comparison */
533                      0 << 7 | /* No exact mach */
534                      0 << 8 | /* Ignore context ID */
535                      0 << 10); /* Security ignored */
536
537         /*
538          * Initial configuration only - guarantees sources handled by
539          * this driver have a unique ID at startup time but not between
540          * all other types of sources.  For that we lean on the core
541          * framework.
542          */
543         drvdata->traceid = etm3x_traceid++;
544         drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
545         drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
546         if (drvdata->nr_addr_cmp >= 2) {
547                 drvdata->addr_val[0] = (u32) _stext;
548                 drvdata->addr_val[1] = (u32) _etext;
549                 drvdata->addr_acctype[0] = flags;
550                 drvdata->addr_acctype[1] = flags;
551                 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
552                 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
553         }
554
555         etm_set_default(drvdata);
556 }
557
558 static int etm_probe(struct amba_device *adev, const struct amba_id *id)
559 {
560         int ret;
561         void __iomem *base;
562         struct device *dev = &adev->dev;
563         struct coresight_platform_data *pdata = NULL;
564         struct etm_drvdata *drvdata;
565         struct resource *res = &adev->res;
566         struct coresight_desc *desc;
567         struct device_node *np = adev->dev.of_node;
568
569         desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
570         if (!desc)
571                 return -ENOMEM;
572
573         drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
574         if (!drvdata)
575                 return -ENOMEM;
576
577         if (np) {
578                 pdata = of_get_coresight_platform_data(dev, np);
579                 if (IS_ERR(pdata))
580                         return PTR_ERR(pdata);
581
582                 adev->dev.platform_data = pdata;
583                 drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
584         }
585
586         drvdata->dev = &adev->dev;
587         dev_set_drvdata(dev, drvdata);
588
589         /* Validity for the resource is already checked by the AMBA core */
590         base = devm_ioremap_resource(dev, res);
591         if (IS_ERR(base))
592                 return PTR_ERR(base);
593
594         drvdata->base = base;
595
596         spin_lock_init(&drvdata->spinlock);
597
598         drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
599         if (!IS_ERR(drvdata->atclk)) {
600                 ret = clk_prepare_enable(drvdata->atclk);
601                 if (ret)
602                         return ret;
603         }
604
605         drvdata->cpu = pdata ? pdata->cpu : 0;
606
607         get_online_cpus();
608         etmdrvdata[drvdata->cpu] = drvdata;
609
610         if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
611                 drvdata->os_unlock = true;
612
613         if (smp_call_function_single(drvdata->cpu,
614                                      etm_init_arch_data,  drvdata, 1))
615                 dev_err(dev, "ETM arch init failed\n");
616
617         if (!etm_count++)
618                 register_hotcpu_notifier(&etm_cpu_notifier);
619
620         put_online_cpus();
621
622         if (etm_arch_supported(drvdata->arch) == false) {
623                 ret = -EINVAL;
624                 goto err_arch_supported;
625         }
626         etm_init_default_data(drvdata);
627
628         desc->type = CORESIGHT_DEV_TYPE_SOURCE;
629         desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
630         desc->ops = &etm_cs_ops;
631         desc->pdata = pdata;
632         desc->dev = dev;
633         desc->groups = coresight_etm_groups;
634         drvdata->csdev = coresight_register(desc);
635         if (IS_ERR(drvdata->csdev)) {
636                 ret = PTR_ERR(drvdata->csdev);
637                 goto err_arch_supported;
638         }
639
640         pm_runtime_put(&adev->dev);
641         dev_info(dev, "%s initialized\n", (char *)id->data);
642
643         if (boot_enable) {
644                 coresight_enable(drvdata->csdev);
645                 drvdata->boot_enable = true;
646         }
647
648         return 0;
649
650 err_arch_supported:
651         if (--etm_count == 0)
652                 unregister_hotcpu_notifier(&etm_cpu_notifier);
653         return ret;
654 }
655
656 #ifdef CONFIG_PM
657 static int etm_runtime_suspend(struct device *dev)
658 {
659         struct etm_drvdata *drvdata = dev_get_drvdata(dev);
660
661         if (drvdata && !IS_ERR(drvdata->atclk))
662                 clk_disable_unprepare(drvdata->atclk);
663
664         return 0;
665 }
666
667 static int etm_runtime_resume(struct device *dev)
668 {
669         struct etm_drvdata *drvdata = dev_get_drvdata(dev);
670
671         if (drvdata && !IS_ERR(drvdata->atclk))
672                 clk_prepare_enable(drvdata->atclk);
673
674         return 0;
675 }
676 #endif
677
678 static const struct dev_pm_ops etm_dev_pm_ops = {
679         SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
680 };
681
682 static struct amba_id etm_ids[] = {
683         {       /* ETM 3.3 */
684                 .id     = 0x0003b921,
685                 .mask   = 0x0003ffff,
686                 .data   = "ETM 3.3",
687         },
688         {       /* ETM 3.5 */
689                 .id     = 0x0003b956,
690                 .mask   = 0x0003ffff,
691                 .data   = "ETM 3.5",
692         },
693         {       /* PTM 1.0 */
694                 .id     = 0x0003b950,
695                 .mask   = 0x0003ffff,
696                 .data   = "PTM 1.0",
697         },
698         {       /* PTM 1.1 */
699                 .id     = 0x0003b95f,
700                 .mask   = 0x0003ffff,
701                 .data   = "PTM 1.1",
702         },
703         {       /* PTM 1.1 Qualcomm */
704                 .id     = 0x0003006f,
705                 .mask   = 0x0003ffff,
706                 .data   = "PTM 1.1",
707         },
708         { 0, 0},
709 };
710
711 static struct amba_driver etm_driver = {
712         .drv = {
713                 .name   = "coresight-etm3x",
714                 .owner  = THIS_MODULE,
715                 .pm     = &etm_dev_pm_ops,
716                 .suppress_bind_attrs = true,
717         },
718         .probe          = etm_probe,
719         .id_table       = etm_ids,
720 };
721
722 module_amba_driver(etm_driver);
723
724 MODULE_LICENSE("GPL v2");
725 MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");