2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/interrupt.h>
26 #include <linux/iommu.h>
27 #include <linux/iopoll.h>
28 #include <linux/module.h>
29 #include <linux/msi.h>
31 #include <linux/of_address.h>
32 #include <linux/of_platform.h>
33 #include <linux/pci.h>
34 #include <linux/platform_device.h>
36 #include "io-pgtable.h"
39 #define ARM_SMMU_IDR0 0x0
40 #define IDR0_ST_LVL_SHIFT 27
41 #define IDR0_ST_LVL_MASK 0x3
42 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
43 #define IDR0_STALL_MODEL (3 << 24)
44 #define IDR0_TTENDIAN_SHIFT 21
45 #define IDR0_TTENDIAN_MASK 0x3
46 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
47 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
48 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
49 #define IDR0_CD2L (1 << 19)
50 #define IDR0_VMID16 (1 << 18)
51 #define IDR0_PRI (1 << 16)
52 #define IDR0_SEV (1 << 14)
53 #define IDR0_MSI (1 << 13)
54 #define IDR0_ASID16 (1 << 12)
55 #define IDR0_ATS (1 << 10)
56 #define IDR0_HYP (1 << 9)
57 #define IDR0_COHACC (1 << 4)
58 #define IDR0_TTF_SHIFT 2
59 #define IDR0_TTF_MASK 0x3
60 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
61 #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
62 #define IDR0_S1P (1 << 1)
63 #define IDR0_S2P (1 << 0)
65 #define ARM_SMMU_IDR1 0x4
66 #define IDR1_TABLES_PRESET (1 << 30)
67 #define IDR1_QUEUES_PRESET (1 << 29)
68 #define IDR1_REL (1 << 28)
69 #define IDR1_CMDQ_SHIFT 21
70 #define IDR1_CMDQ_MASK 0x1f
71 #define IDR1_EVTQ_SHIFT 16
72 #define IDR1_EVTQ_MASK 0x1f
73 #define IDR1_PRIQ_SHIFT 11
74 #define IDR1_PRIQ_MASK 0x1f
75 #define IDR1_SSID_SHIFT 6
76 #define IDR1_SSID_MASK 0x1f
77 #define IDR1_SID_SHIFT 0
78 #define IDR1_SID_MASK 0x3f
80 #define ARM_SMMU_IDR5 0x14
81 #define IDR5_STALL_MAX_SHIFT 16
82 #define IDR5_STALL_MAX_MASK 0xffff
83 #define IDR5_GRAN64K (1 << 6)
84 #define IDR5_GRAN16K (1 << 5)
85 #define IDR5_GRAN4K (1 << 4)
86 #define IDR5_OAS_SHIFT 0
87 #define IDR5_OAS_MASK 0x7
88 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
89 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
90 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
91 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
92 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
93 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
95 #define ARM_SMMU_CR0 0x20
96 #define CR0_CMDQEN (1 << 3)
97 #define CR0_EVTQEN (1 << 2)
98 #define CR0_PRIQEN (1 << 1)
99 #define CR0_SMMUEN (1 << 0)
101 #define ARM_SMMU_CR0ACK 0x24
103 #define ARM_SMMU_CR1 0x28
107 #define CR1_CACHE_NC 0
108 #define CR1_CACHE_WB 1
109 #define CR1_CACHE_WT 2
110 #define CR1_TABLE_SH_SHIFT 10
111 #define CR1_TABLE_OC_SHIFT 8
112 #define CR1_TABLE_IC_SHIFT 6
113 #define CR1_QUEUE_SH_SHIFT 4
114 #define CR1_QUEUE_OC_SHIFT 2
115 #define CR1_QUEUE_IC_SHIFT 0
117 #define ARM_SMMU_CR2 0x2c
118 #define CR2_PTM (1 << 2)
119 #define CR2_RECINVSID (1 << 1)
120 #define CR2_E2H (1 << 0)
122 #define ARM_SMMU_IRQ_CTRL 0x50
123 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
124 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
125 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
127 #define ARM_SMMU_IRQ_CTRLACK 0x54
129 #define ARM_SMMU_GERROR 0x60
130 #define GERROR_SFM_ERR (1 << 8)
131 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
132 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
133 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
134 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
135 #define GERROR_PRIQ_ABT_ERR (1 << 3)
136 #define GERROR_EVTQ_ABT_ERR (1 << 2)
137 #define GERROR_CMDQ_ERR (1 << 0)
138 #define GERROR_ERR_MASK 0xfd
140 #define ARM_SMMU_GERRORN 0x64
142 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
143 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
144 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
146 #define ARM_SMMU_STRTAB_BASE 0x80
147 #define STRTAB_BASE_RA (1UL << 62)
148 #define STRTAB_BASE_ADDR_SHIFT 6
149 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
151 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
152 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
153 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
154 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
155 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
156 #define STRTAB_BASE_CFG_FMT_SHIFT 16
157 #define STRTAB_BASE_CFG_FMT_MASK 0x3
158 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
159 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
161 #define ARM_SMMU_CMDQ_BASE 0x90
162 #define ARM_SMMU_CMDQ_PROD 0x98
163 #define ARM_SMMU_CMDQ_CONS 0x9c
165 #define ARM_SMMU_EVTQ_BASE 0xa0
166 #define ARM_SMMU_EVTQ_PROD 0x100a8
167 #define ARM_SMMU_EVTQ_CONS 0x100ac
168 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
169 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
170 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
172 #define ARM_SMMU_PRIQ_BASE 0xc0
173 #define ARM_SMMU_PRIQ_PROD 0x100c8
174 #define ARM_SMMU_PRIQ_CONS 0x100cc
175 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
176 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
177 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
179 /* Common MSI config fields */
180 #define MSI_CFG0_ADDR_SHIFT 2
181 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
182 #define MSI_CFG2_SH_SHIFT 4
183 #define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
184 #define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
185 #define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
186 #define MSI_CFG2_MEMATTR_SHIFT 0
187 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
189 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
190 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
191 #define Q_OVERFLOW_FLAG (1 << 31)
192 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
193 #define Q_ENT(q, p) ((q)->base + \
194 Q_IDX(q, p) * (q)->ent_dwords)
196 #define Q_BASE_RWA (1UL << 62)
197 #define Q_BASE_ADDR_SHIFT 5
198 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
199 #define Q_BASE_LOG2SIZE_SHIFT 0
200 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
205 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
206 * 2lvl: 128k L1 entries,
207 * 256 lazy entries per table (each table covers a PCI bus)
209 #define STRTAB_L1_SZ_SHIFT 20
210 #define STRTAB_SPLIT 8
212 #define STRTAB_L1_DESC_DWORDS 1
213 #define STRTAB_L1_DESC_SPAN_SHIFT 0
214 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
215 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
216 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
218 #define STRTAB_STE_DWORDS 8
219 #define STRTAB_STE_0_V (1UL << 0)
220 #define STRTAB_STE_0_CFG_SHIFT 1
221 #define STRTAB_STE_0_CFG_MASK 0x7UL
222 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
223 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
224 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
225 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
227 #define STRTAB_STE_0_S1FMT_SHIFT 4
228 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
229 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
230 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
231 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
232 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
234 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
235 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
236 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
237 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
238 #define STRTAB_STE_1_S1C_SH_NSH 0UL
239 #define STRTAB_STE_1_S1C_SH_OSH 2UL
240 #define STRTAB_STE_1_S1C_SH_ISH 3UL
241 #define STRTAB_STE_1_S1CIR_SHIFT 2
242 #define STRTAB_STE_1_S1COR_SHIFT 4
243 #define STRTAB_STE_1_S1CSH_SHIFT 6
245 #define STRTAB_STE_1_S1STALLD (1UL << 27)
247 #define STRTAB_STE_1_EATS_ABT 0UL
248 #define STRTAB_STE_1_EATS_TRANS 1UL
249 #define STRTAB_STE_1_EATS_S1CHK 2UL
250 #define STRTAB_STE_1_EATS_SHIFT 28
252 #define STRTAB_STE_1_STRW_NSEL1 0UL
253 #define STRTAB_STE_1_STRW_EL2 2UL
254 #define STRTAB_STE_1_STRW_SHIFT 30
256 #define STRTAB_STE_2_S2VMID_SHIFT 0
257 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
258 #define STRTAB_STE_2_VTCR_SHIFT 32
259 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
260 #define STRTAB_STE_2_S2AA64 (1UL << 51)
261 #define STRTAB_STE_2_S2ENDI (1UL << 52)
262 #define STRTAB_STE_2_S2PTW (1UL << 54)
263 #define STRTAB_STE_2_S2R (1UL << 58)
265 #define STRTAB_STE_3_S2TTB_SHIFT 4
266 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
268 /* Context descriptor (stage-1 only) */
269 #define CTXDESC_CD_DWORDS 8
270 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
271 #define ARM64_TCR_T0SZ_SHIFT 0
272 #define ARM64_TCR_T0SZ_MASK 0x1fUL
273 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
274 #define ARM64_TCR_TG0_SHIFT 14
275 #define ARM64_TCR_TG0_MASK 0x3UL
276 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
277 #define ARM64_TCR_IRGN0_SHIFT 8
278 #define ARM64_TCR_IRGN0_MASK 0x3UL
279 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
280 #define ARM64_TCR_ORGN0_SHIFT 10
281 #define ARM64_TCR_ORGN0_MASK 0x3UL
282 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
283 #define ARM64_TCR_SH0_SHIFT 12
284 #define ARM64_TCR_SH0_MASK 0x3UL
285 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
286 #define ARM64_TCR_EPD0_SHIFT 7
287 #define ARM64_TCR_EPD0_MASK 0x1UL
288 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
289 #define ARM64_TCR_EPD1_SHIFT 23
290 #define ARM64_TCR_EPD1_MASK 0x1UL
292 #define CTXDESC_CD_0_ENDI (1UL << 15)
293 #define CTXDESC_CD_0_V (1UL << 31)
295 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
296 #define ARM64_TCR_IPS_SHIFT 32
297 #define ARM64_TCR_IPS_MASK 0x7UL
298 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
299 #define ARM64_TCR_TBI0_SHIFT 37
300 #define ARM64_TCR_TBI0_MASK 0x1UL
302 #define CTXDESC_CD_0_AA64 (1UL << 41)
303 #define CTXDESC_CD_0_R (1UL << 45)
304 #define CTXDESC_CD_0_A (1UL << 46)
305 #define CTXDESC_CD_0_ASET_SHIFT 47
306 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
307 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
308 #define CTXDESC_CD_0_ASID_SHIFT 48
309 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
311 #define CTXDESC_CD_1_TTB0_SHIFT 4
312 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
314 #define CTXDESC_CD_3_MAIR_SHIFT 0
316 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
317 #define ARM_SMMU_TCR2CD(tcr, fld) \
318 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
319 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
322 #define CMDQ_ENT_DWORDS 2
323 #define CMDQ_MAX_SZ_SHIFT 8
325 #define CMDQ_ERR_SHIFT 24
326 #define CMDQ_ERR_MASK 0x7f
327 #define CMDQ_ERR_CERROR_NONE_IDX 0
328 #define CMDQ_ERR_CERROR_ILL_IDX 1
329 #define CMDQ_ERR_CERROR_ABT_IDX 2
331 #define CMDQ_0_OP_SHIFT 0
332 #define CMDQ_0_OP_MASK 0xffUL
333 #define CMDQ_0_SSV (1UL << 11)
335 #define CMDQ_PREFETCH_0_SID_SHIFT 32
336 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
337 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
339 #define CMDQ_CFGI_0_SID_SHIFT 32
340 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
341 #define CMDQ_CFGI_1_LEAF (1UL << 0)
342 #define CMDQ_CFGI_1_RANGE_SHIFT 0
343 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
345 #define CMDQ_TLBI_0_VMID_SHIFT 32
346 #define CMDQ_TLBI_0_ASID_SHIFT 48
347 #define CMDQ_TLBI_1_LEAF (1UL << 0)
348 #define CMDQ_TLBI_1_VA_MASK ~0xfffUL
349 #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
351 #define CMDQ_PRI_0_SSID_SHIFT 12
352 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
353 #define CMDQ_PRI_0_SID_SHIFT 32
354 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
355 #define CMDQ_PRI_1_GRPID_SHIFT 0
356 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
357 #define CMDQ_PRI_1_RESP_SHIFT 12
358 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
359 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
360 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
362 #define CMDQ_SYNC_0_CS_SHIFT 12
363 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
364 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
367 #define EVTQ_ENT_DWORDS 4
368 #define EVTQ_MAX_SZ_SHIFT 7
370 #define EVTQ_0_ID_SHIFT 0
371 #define EVTQ_0_ID_MASK 0xffUL
374 #define PRIQ_ENT_DWORDS 2
375 #define PRIQ_MAX_SZ_SHIFT 8
377 #define PRIQ_0_SID_SHIFT 0
378 #define PRIQ_0_SID_MASK 0xffffffffUL
379 #define PRIQ_0_SSID_SHIFT 32
380 #define PRIQ_0_SSID_MASK 0xfffffUL
381 #define PRIQ_0_OF (1UL << 57)
382 #define PRIQ_0_PERM_PRIV (1UL << 58)
383 #define PRIQ_0_PERM_EXEC (1UL << 59)
384 #define PRIQ_0_PERM_READ (1UL << 60)
385 #define PRIQ_0_PERM_WRITE (1UL << 61)
386 #define PRIQ_0_PRG_LAST (1UL << 62)
387 #define PRIQ_0_SSID_V (1UL << 63)
389 #define PRIQ_1_PRG_IDX_SHIFT 0
390 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
391 #define PRIQ_1_ADDR_SHIFT 12
392 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
394 /* High-level queue structures */
395 #define ARM_SMMU_POLL_TIMEOUT_US 100
397 static bool disable_bypass;
398 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
399 MODULE_PARM_DESC(disable_bypass,
400 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
408 enum arm_smmu_msi_index {
415 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
417 ARM_SMMU_EVTQ_IRQ_CFG0,
418 ARM_SMMU_EVTQ_IRQ_CFG1,
419 ARM_SMMU_EVTQ_IRQ_CFG2,
421 [GERROR_MSI_INDEX] = {
422 ARM_SMMU_GERROR_IRQ_CFG0,
423 ARM_SMMU_GERROR_IRQ_CFG1,
424 ARM_SMMU_GERROR_IRQ_CFG2,
427 ARM_SMMU_PRIQ_IRQ_CFG0,
428 ARM_SMMU_PRIQ_IRQ_CFG1,
429 ARM_SMMU_PRIQ_IRQ_CFG2,
433 struct arm_smmu_cmdq_ent {
436 bool substream_valid;
438 /* Command-specific fields */
440 #define CMDQ_OP_PREFETCH_CFG 0x1
447 #define CMDQ_OP_CFGI_STE 0x3
448 #define CMDQ_OP_CFGI_ALL 0x4
457 #define CMDQ_OP_TLBI_NH_ASID 0x11
458 #define CMDQ_OP_TLBI_NH_VA 0x12
459 #define CMDQ_OP_TLBI_EL2_ALL 0x20
460 #define CMDQ_OP_TLBI_S12_VMALL 0x28
461 #define CMDQ_OP_TLBI_S2_IPA 0x2a
462 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
470 #define CMDQ_OP_PRI_RESP 0x41
478 #define CMDQ_OP_CMD_SYNC 0x46
482 struct arm_smmu_queue {
483 int irq; /* Wired interrupt */
494 u32 __iomem *prod_reg;
495 u32 __iomem *cons_reg;
498 struct arm_smmu_cmdq {
499 struct arm_smmu_queue q;
503 struct arm_smmu_evtq {
504 struct arm_smmu_queue q;
508 struct arm_smmu_priq {
509 struct arm_smmu_queue q;
512 /* High-level stream table and context descriptor structures */
513 struct arm_smmu_strtab_l1_desc {
517 dma_addr_t l2ptr_dma;
520 struct arm_smmu_s1_cfg {
522 dma_addr_t cdptr_dma;
524 struct arm_smmu_ctx_desc {
532 struct arm_smmu_s2_cfg {
538 struct arm_smmu_strtab_ent {
541 bool bypass; /* Overrides s1/s2 config */
542 struct arm_smmu_s1_cfg *s1_cfg;
543 struct arm_smmu_s2_cfg *s2_cfg;
546 struct arm_smmu_strtab_cfg {
548 dma_addr_t strtab_dma;
549 struct arm_smmu_strtab_l1_desc *l1_desc;
550 unsigned int num_l1_ents;
556 /* An SMMUv3 instance */
557 struct arm_smmu_device {
561 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
562 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
563 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
564 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
565 #define ARM_SMMU_FEAT_PRI (1 << 4)
566 #define ARM_SMMU_FEAT_ATS (1 << 5)
567 #define ARM_SMMU_FEAT_SEV (1 << 6)
568 #define ARM_SMMU_FEAT_MSI (1 << 7)
569 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
570 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
571 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
572 #define ARM_SMMU_FEAT_STALLS (1 << 11)
573 #define ARM_SMMU_FEAT_HYP (1 << 12)
576 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
579 struct arm_smmu_cmdq cmdq;
580 struct arm_smmu_evtq evtq;
581 struct arm_smmu_priq priq;
585 unsigned long ias; /* IPA */
586 unsigned long oas; /* PA */
588 #define ARM_SMMU_MAX_ASIDS (1 << 16)
589 unsigned int asid_bits;
590 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
592 #define ARM_SMMU_MAX_VMIDS (1 << 16)
593 unsigned int vmid_bits;
594 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
596 unsigned int ssid_bits;
597 unsigned int sid_bits;
599 struct arm_smmu_strtab_cfg strtab_cfg;
602 /* SMMU private data for an IOMMU group */
603 struct arm_smmu_group {
604 struct arm_smmu_device *smmu;
605 struct arm_smmu_domain *domain;
608 struct arm_smmu_strtab_ent ste;
611 /* SMMU private data for an IOMMU domain */
612 enum arm_smmu_domain_stage {
613 ARM_SMMU_DOMAIN_S1 = 0,
615 ARM_SMMU_DOMAIN_NESTED,
618 struct arm_smmu_domain {
619 struct arm_smmu_device *smmu;
620 struct mutex init_mutex; /* Protects smmu pointer */
622 struct io_pgtable_ops *pgtbl_ops;
623 spinlock_t pgtbl_lock;
625 enum arm_smmu_domain_stage stage;
627 struct arm_smmu_s1_cfg s1_cfg;
628 struct arm_smmu_s2_cfg s2_cfg;
631 struct iommu_domain domain;
634 struct arm_smmu_option_prop {
639 static struct arm_smmu_option_prop arm_smmu_options[] = {
640 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
644 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
646 return container_of(dom, struct arm_smmu_domain, domain);
649 static void parse_driver_options(struct arm_smmu_device *smmu)
654 if (of_property_read_bool(smmu->dev->of_node,
655 arm_smmu_options[i].prop)) {
656 smmu->options |= arm_smmu_options[i].opt;
657 dev_notice(smmu->dev, "option %s\n",
658 arm_smmu_options[i].prop);
660 } while (arm_smmu_options[++i].opt);
663 /* Low-level queue manipulation functions */
664 static bool queue_full(struct arm_smmu_queue *q)
666 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
667 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
670 static bool queue_empty(struct arm_smmu_queue *q)
672 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
673 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
676 static void queue_sync_cons(struct arm_smmu_queue *q)
678 q->cons = readl_relaxed(q->cons_reg);
681 static void queue_inc_cons(struct arm_smmu_queue *q)
683 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
685 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
686 writel(q->cons, q->cons_reg);
689 static int queue_sync_prod(struct arm_smmu_queue *q)
692 u32 prod = readl_relaxed(q->prod_reg);
694 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
701 static void queue_inc_prod(struct arm_smmu_queue *q)
703 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
705 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
706 writel(q->prod, q->prod_reg);
709 static bool __queue_cons_before(struct arm_smmu_queue *q, u32 until)
711 if (Q_WRP(q, q->cons) == Q_WRP(q, until))
712 return Q_IDX(q, q->cons) < Q_IDX(q, until);
714 return Q_IDX(q, q->cons) >= Q_IDX(q, until);
717 static int queue_poll_cons(struct arm_smmu_queue *q, u32 until, bool wfe)
719 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
721 while (queue_sync_cons(q), __queue_cons_before(q, until)) {
722 if (ktime_compare(ktime_get(), timeout) > 0)
736 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
740 for (i = 0; i < n_dwords; ++i)
741 *dst++ = cpu_to_le64(*src++);
744 static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
749 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
754 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
758 for (i = 0; i < n_dwords; ++i)
759 *dst++ = le64_to_cpu(*src++);
762 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
767 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
772 /* High-level queue accessors */
773 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
775 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
776 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
778 switch (ent->opcode) {
779 case CMDQ_OP_TLBI_EL2_ALL:
780 case CMDQ_OP_TLBI_NSNH_ALL:
782 case CMDQ_OP_PREFETCH_CFG:
783 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
784 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
785 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
787 case CMDQ_OP_CFGI_STE:
788 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
789 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
791 case CMDQ_OP_CFGI_ALL:
792 /* Cover the entire SID range */
793 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
795 case CMDQ_OP_TLBI_NH_VA:
796 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
797 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
798 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
800 case CMDQ_OP_TLBI_S2_IPA:
801 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
802 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
803 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
805 case CMDQ_OP_TLBI_NH_ASID:
806 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
808 case CMDQ_OP_TLBI_S12_VMALL:
809 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
811 case CMDQ_OP_PRI_RESP:
812 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
813 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
814 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
815 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
816 switch (ent->pri.resp) {
818 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
821 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
824 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
830 case CMDQ_OP_CMD_SYNC:
831 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
840 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
842 static const char *cerror_str[] = {
843 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
844 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
845 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
849 u64 cmd[CMDQ_ENT_DWORDS];
850 struct arm_smmu_queue *q = &smmu->cmdq.q;
851 u32 cons = readl_relaxed(q->cons_reg);
852 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
853 struct arm_smmu_cmdq_ent cmd_sync = {
854 .opcode = CMDQ_OP_CMD_SYNC,
857 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
861 case CMDQ_ERR_CERROR_ILL_IDX:
863 case CMDQ_ERR_CERROR_ABT_IDX:
864 dev_err(smmu->dev, "retrying command fetch\n");
865 case CMDQ_ERR_CERROR_NONE_IDX:
870 * We may have concurrent producers, so we need to be careful
871 * not to touch any of the shadow cmdq state.
873 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
874 dev_err(smmu->dev, "skipping command in error state:\n");
875 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
876 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
878 /* Convert the erroneous command into a CMD_SYNC */
879 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
880 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
884 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
887 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
888 struct arm_smmu_cmdq_ent *ent)
891 u64 cmd[CMDQ_ENT_DWORDS];
892 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
893 struct arm_smmu_queue *q = &smmu->cmdq.q;
895 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
896 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
901 spin_lock(&smmu->cmdq.lock);
902 while (until = q->prod + 1, queue_insert_raw(q, cmd) == -ENOSPC) {
904 * Keep the queue locked, otherwise the producer could wrap
905 * twice and we could see a future consumer pointer that looks
906 * like it's behind us.
908 if (queue_poll_cons(q, until, wfe))
909 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
912 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, until, wfe))
913 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
914 spin_unlock(&smmu->cmdq.lock);
917 /* Context descriptor manipulation functions */
918 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
922 /* Repack the TCR. Just care about TTBR0 for now */
923 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
924 val |= ARM_SMMU_TCR2CD(tcr, TG0);
925 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
926 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
927 val |= ARM_SMMU_TCR2CD(tcr, SH0);
928 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
929 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
930 val |= ARM_SMMU_TCR2CD(tcr, IPS);
931 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
936 static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
937 struct arm_smmu_s1_cfg *cfg)
942 * We don't need to issue any invalidation here, as we'll invalidate
943 * the STE when installing the new entry anyway.
945 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
949 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
950 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
952 cfg->cdptr[0] = cpu_to_le64(val);
954 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
955 cfg->cdptr[1] = cpu_to_le64(val);
957 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
960 /* Stream table manipulation functions */
962 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
966 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
967 << STRTAB_L1_DESC_SPAN_SHIFT;
968 val |= desc->l2ptr_dma &
969 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
971 *dst = cpu_to_le64(val);
974 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
976 struct arm_smmu_cmdq_ent cmd = {
977 .opcode = CMDQ_OP_CFGI_STE,
984 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
985 cmd.opcode = CMDQ_OP_CMD_SYNC;
986 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
989 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
990 __le64 *dst, struct arm_smmu_strtab_ent *ste)
993 * This is hideously complicated, but we only really care about
994 * three cases at the moment:
996 * 1. Invalid (all zero) -> bypass (init)
997 * 2. Bypass -> translation (attach)
998 * 3. Translation -> bypass (detach)
1000 * Given that we can't update the STE atomically and the SMMU
1001 * doesn't read the thing in a defined order, that leaves us
1002 * with the following maintenance requirements:
1004 * 1. Update Config, return (init time STEs aren't live)
1005 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1006 * 3. Update Config, sync
1008 u64 val = le64_to_cpu(dst[0]);
1009 bool ste_live = false;
1010 struct arm_smmu_cmdq_ent prefetch_cmd = {
1011 .opcode = CMDQ_OP_PREFETCH_CFG,
1017 if (val & STRTAB_STE_0_V) {
1020 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1022 case STRTAB_STE_0_CFG_BYPASS:
1024 case STRTAB_STE_0_CFG_S1_TRANS:
1025 case STRTAB_STE_0_CFG_S2_TRANS:
1028 case STRTAB_STE_0_CFG_ABORT:
1032 BUG(); /* STE corruption */
1036 /* Nuke the existing Config, as we're going to rewrite it */
1037 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
1040 val |= STRTAB_STE_0_V;
1042 val &= ~STRTAB_STE_0_V;
1045 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
1046 : STRTAB_STE_0_CFG_BYPASS;
1047 dst[0] = cpu_to_le64(val);
1048 dst[2] = 0; /* Nuke the VMID */
1050 arm_smmu_sync_ste_for_sid(smmu, sid);
1056 dst[1] = cpu_to_le64(
1057 STRTAB_STE_1_S1C_CACHE_WBRA
1058 << STRTAB_STE_1_S1CIR_SHIFT |
1059 STRTAB_STE_1_S1C_CACHE_WBRA
1060 << STRTAB_STE_1_S1COR_SHIFT |
1061 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1062 STRTAB_STE_1_S1STALLD |
1063 #ifdef CONFIG_PCI_ATS
1064 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1066 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1068 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1069 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1070 STRTAB_STE_0_CFG_S1_TRANS;
1076 dst[2] = cpu_to_le64(
1077 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1078 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1079 << STRTAB_STE_2_VTCR_SHIFT |
1081 STRTAB_STE_2_S2ENDI |
1083 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1086 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1087 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1089 val |= STRTAB_STE_0_CFG_S2_TRANS;
1092 arm_smmu_sync_ste_for_sid(smmu, sid);
1093 dst[0] = cpu_to_le64(val);
1094 arm_smmu_sync_ste_for_sid(smmu, sid);
1096 /* It's likely that we'll want to use the new STE soon */
1097 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1098 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1101 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1104 struct arm_smmu_strtab_ent ste = {
1109 for (i = 0; i < nent; ++i) {
1110 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1111 strtab += STRTAB_STE_DWORDS;
1115 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1119 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1120 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1125 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1126 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1128 desc->span = STRTAB_SPLIT + 1;
1129 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1133 "failed to allocate l2 stream table for SID %u\n",
1138 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1139 arm_smmu_write_strtab_l1_desc(strtab, desc);
1143 /* IRQ and event handlers */
1144 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1147 struct arm_smmu_device *smmu = dev;
1148 struct arm_smmu_queue *q = &smmu->evtq.q;
1149 u64 evt[EVTQ_ENT_DWORDS];
1151 while (!queue_remove_raw(q, evt)) {
1152 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1154 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1155 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1156 dev_info(smmu->dev, "\t0x%016llx\n",
1157 (unsigned long long)evt[i]);
1160 /* Sync our overflow flag, as we believe we're up to speed */
1161 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1165 static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev)
1167 irqreturn_t ret = IRQ_WAKE_THREAD;
1168 struct arm_smmu_device *smmu = dev;
1169 struct arm_smmu_queue *q = &smmu->evtq.q;
1172 * Not much we can do on overflow, so scream and pretend we're
1175 if (queue_sync_prod(q) == -EOVERFLOW)
1176 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1177 else if (queue_empty(q))
1183 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1185 struct arm_smmu_device *smmu = dev;
1186 struct arm_smmu_queue *q = &smmu->priq.q;
1187 u64 evt[PRIQ_ENT_DWORDS];
1189 while (!queue_remove_raw(q, evt)) {
1194 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1195 ssv = evt[0] & PRIQ_0_SSID_V;
1196 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1197 last = evt[0] & PRIQ_0_PRG_LAST;
1198 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1200 dev_info(smmu->dev, "unexpected PRI request received:\n");
1202 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1203 sid, ssid, grpid, last ? "L" : "",
1204 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1205 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1206 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1207 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1208 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1211 struct arm_smmu_cmdq_ent cmd = {
1212 .opcode = CMDQ_OP_PRI_RESP,
1213 .substream_valid = ssv,
1218 .resp = PRI_RESP_DENY,
1222 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1226 /* Sync our overflow flag, as we believe we're up to speed */
1227 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1231 static irqreturn_t arm_smmu_priq_handler(int irq, void *dev)
1233 irqreturn_t ret = IRQ_WAKE_THREAD;
1234 struct arm_smmu_device *smmu = dev;
1235 struct arm_smmu_queue *q = &smmu->priq.q;
1237 /* PRIQ overflow indicates a programming error */
1238 if (queue_sync_prod(q) == -EOVERFLOW)
1239 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1240 else if (queue_empty(q))
1246 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1248 /* We don't actually use CMD_SYNC interrupts for anything */
1252 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1254 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1256 u32 gerror, gerrorn;
1257 struct arm_smmu_device *smmu = dev;
1259 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1260 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1263 if (!(gerror & GERROR_ERR_MASK))
1264 return IRQ_NONE; /* No errors pending */
1267 "unexpected global error reported (0x%08x), this could be serious\n",
1270 if (gerror & GERROR_SFM_ERR) {
1271 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1272 arm_smmu_device_disable(smmu);
1275 if (gerror & GERROR_MSI_GERROR_ABT_ERR)
1276 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1278 if (gerror & GERROR_MSI_PRIQ_ABT_ERR) {
1279 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1280 arm_smmu_priq_handler(irq, smmu->dev);
1283 if (gerror & GERROR_MSI_EVTQ_ABT_ERR) {
1284 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1285 arm_smmu_evtq_handler(irq, smmu->dev);
1288 if (gerror & GERROR_MSI_CMDQ_ABT_ERR) {
1289 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1290 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1293 if (gerror & GERROR_PRIQ_ABT_ERR)
1294 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1296 if (gerror & GERROR_EVTQ_ABT_ERR)
1297 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1299 if (gerror & GERROR_CMDQ_ERR)
1300 arm_smmu_cmdq_skip_err(smmu);
1302 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1306 /* IO_PGTABLE API */
1307 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1309 struct arm_smmu_cmdq_ent cmd;
1311 cmd.opcode = CMDQ_OP_CMD_SYNC;
1312 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1315 static void arm_smmu_tlb_sync(void *cookie)
1317 struct arm_smmu_domain *smmu_domain = cookie;
1318 __arm_smmu_tlb_sync(smmu_domain->smmu);
1321 static void arm_smmu_tlb_inv_context(void *cookie)
1323 struct arm_smmu_domain *smmu_domain = cookie;
1324 struct arm_smmu_device *smmu = smmu_domain->smmu;
1325 struct arm_smmu_cmdq_ent cmd;
1327 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1328 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1329 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1332 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1333 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1336 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1337 __arm_smmu_tlb_sync(smmu);
1340 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1341 bool leaf, void *cookie)
1343 struct arm_smmu_domain *smmu_domain = cookie;
1344 struct arm_smmu_device *smmu = smmu_domain->smmu;
1345 struct arm_smmu_cmdq_ent cmd = {
1352 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1353 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1354 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1356 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1357 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1360 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1363 static struct iommu_gather_ops arm_smmu_gather_ops = {
1364 .tlb_flush_all = arm_smmu_tlb_inv_context,
1365 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1366 .tlb_sync = arm_smmu_tlb_sync,
1370 static bool arm_smmu_capable(enum iommu_cap cap)
1373 case IOMMU_CAP_CACHE_COHERENCY:
1375 case IOMMU_CAP_INTR_REMAP:
1376 return true; /* MSIs are just memory writes */
1377 case IOMMU_CAP_NOEXEC:
1384 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1386 struct arm_smmu_domain *smmu_domain;
1388 if (type != IOMMU_DOMAIN_UNMANAGED)
1392 * Allocate the domain and initialise some of its data structures.
1393 * We can't really do anything meaningful until we've added a
1396 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1400 mutex_init(&smmu_domain->init_mutex);
1401 spin_lock_init(&smmu_domain->pgtbl_lock);
1402 return &smmu_domain->domain;
1405 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1407 int idx, size = 1 << span;
1410 idx = find_first_zero_bit(map, size);
1413 } while (test_and_set_bit(idx, map));
1418 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1420 clear_bit(idx, map);
1423 static void arm_smmu_domain_free(struct iommu_domain *domain)
1425 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1426 struct arm_smmu_device *smmu = smmu_domain->smmu;
1428 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1430 /* Free the CD and ASID, if we allocated them */
1431 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1432 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1435 dma_free_coherent(smmu_domain->smmu->dev,
1436 CTXDESC_CD_DWORDS << 3,
1440 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1443 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1445 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1451 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1452 struct io_pgtable_cfg *pgtbl_cfg)
1456 struct arm_smmu_device *smmu = smmu_domain->smmu;
1457 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1459 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1460 if (IS_ERR_VALUE(asid))
1463 cfg->cdptr = dma_zalloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1464 &cfg->cdptr_dma, GFP_KERNEL);
1466 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1471 cfg->cd.asid = (u16)asid;
1472 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1473 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1474 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1478 arm_smmu_bitmap_free(smmu->asid_map, asid);
1482 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1483 struct io_pgtable_cfg *pgtbl_cfg)
1486 struct arm_smmu_device *smmu = smmu_domain->smmu;
1487 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1489 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1490 if (IS_ERR_VALUE(vmid))
1493 cfg->vmid = (u16)vmid;
1494 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1495 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1499 static struct iommu_ops arm_smmu_ops;
1501 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1504 unsigned long ias, oas;
1505 enum io_pgtable_fmt fmt;
1506 struct io_pgtable_cfg pgtbl_cfg;
1507 struct io_pgtable_ops *pgtbl_ops;
1508 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1509 struct io_pgtable_cfg *);
1510 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1511 struct arm_smmu_device *smmu = smmu_domain->smmu;
1513 /* Restrict the stage to what we can actually support */
1514 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1515 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1516 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1517 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1519 switch (smmu_domain->stage) {
1520 case ARM_SMMU_DOMAIN_S1:
1523 fmt = ARM_64_LPAE_S1;
1524 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1526 case ARM_SMMU_DOMAIN_NESTED:
1527 case ARM_SMMU_DOMAIN_S2:
1530 fmt = ARM_64_LPAE_S2;
1531 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1537 pgtbl_cfg = (struct io_pgtable_cfg) {
1538 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
1541 .tlb = &arm_smmu_gather_ops,
1542 .iommu_dev = smmu->dev,
1545 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1549 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1550 smmu_domain->pgtbl_ops = pgtbl_ops;
1552 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1553 if (IS_ERR_VALUE(ret))
1554 free_io_pgtable_ops(pgtbl_ops);
1559 static struct arm_smmu_group *arm_smmu_group_get(struct device *dev)
1561 struct iommu_group *group;
1562 struct arm_smmu_group *smmu_group;
1564 group = iommu_group_get(dev);
1568 smmu_group = iommu_group_get_iommudata(group);
1569 iommu_group_put(group);
1573 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1576 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1578 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1579 struct arm_smmu_strtab_l1_desc *l1_desc;
1582 /* Two-level walk */
1583 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1584 l1_desc = &cfg->l1_desc[idx];
1585 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1586 step = &l1_desc->l2ptr[idx];
1588 /* Simple linear lookup */
1589 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1595 static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
1598 struct arm_smmu_domain *smmu_domain = smmu_group->domain;
1599 struct arm_smmu_strtab_ent *ste = &smmu_group->ste;
1600 struct arm_smmu_device *smmu = smmu_group->smmu;
1602 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1603 ste->s1_cfg = &smmu_domain->s1_cfg;
1605 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1608 ste->s2_cfg = &smmu_domain->s2_cfg;
1611 for (i = 0; i < smmu_group->num_sids; ++i) {
1612 u32 sid = smmu_group->sids[i];
1613 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1615 arm_smmu_write_strtab_ent(smmu, sid, step, ste);
1621 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1624 struct arm_smmu_device *smmu;
1625 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1626 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1631 /* Already attached to a different domain? */
1632 if (smmu_group->domain && smmu_group->domain != smmu_domain)
1635 smmu = smmu_group->smmu;
1636 mutex_lock(&smmu_domain->init_mutex);
1638 if (!smmu_domain->smmu) {
1639 smmu_domain->smmu = smmu;
1640 ret = arm_smmu_domain_finalise(domain);
1642 smmu_domain->smmu = NULL;
1645 } else if (smmu_domain->smmu != smmu) {
1647 "cannot attach to SMMU %s (upstream of %s)\n",
1648 dev_name(smmu_domain->smmu->dev),
1649 dev_name(smmu->dev));
1654 /* Group already attached to this domain? */
1655 if (smmu_group->domain)
1658 smmu_group->domain = smmu_domain;
1659 smmu_group->ste.bypass = false;
1661 ret = arm_smmu_install_ste_for_group(smmu_group);
1662 if (IS_ERR_VALUE(ret))
1663 smmu_group->domain = NULL;
1666 mutex_unlock(&smmu_domain->init_mutex);
1670 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1672 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1673 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1675 BUG_ON(!smmu_domain);
1676 BUG_ON(!smmu_group);
1678 mutex_lock(&smmu_domain->init_mutex);
1679 BUG_ON(smmu_group->domain != smmu_domain);
1681 smmu_group->ste.bypass = true;
1682 if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
1683 dev_warn(dev, "failed to install bypass STE\n");
1685 smmu_group->domain = NULL;
1686 mutex_unlock(&smmu_domain->init_mutex);
1689 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1690 phys_addr_t paddr, size_t size, int prot)
1693 unsigned long flags;
1694 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1695 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1700 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1701 ret = ops->map(ops, iova, paddr, size, prot);
1702 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1707 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1710 unsigned long flags;
1711 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1712 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1717 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1718 ret = ops->unmap(ops, iova, size);
1719 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1724 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1727 unsigned long flags;
1728 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1729 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1734 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1735 ret = ops->iova_to_phys(ops, iova);
1736 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1741 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp)
1743 *(u32 *)sidp = alias;
1744 return 0; /* Continue walking */
1747 static void __arm_smmu_release_pci_iommudata(void *data)
1752 static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev)
1754 struct device_node *of_node;
1755 struct platform_device *smmu_pdev;
1756 struct arm_smmu_device *smmu = NULL;
1757 struct pci_bus *bus = pdev->bus;
1759 /* Walk up to the root bus */
1760 while (!pci_is_root_bus(bus))
1763 /* Follow the "iommus" phandle from the host controller */
1764 of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0);
1768 /* See if we can find an SMMU corresponding to the phandle */
1769 smmu_pdev = of_find_device_by_node(of_node);
1771 smmu = platform_get_drvdata(smmu_pdev);
1773 of_node_put(of_node);
1777 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1779 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1781 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1782 limit *= 1UL << STRTAB_SPLIT;
1787 static int arm_smmu_add_device(struct device *dev)
1791 struct pci_dev *pdev;
1792 struct iommu_group *group;
1793 struct arm_smmu_group *smmu_group;
1794 struct arm_smmu_device *smmu;
1796 /* We only support PCI, for now */
1797 if (!dev_is_pci(dev))
1800 pdev = to_pci_dev(dev);
1801 group = iommu_group_get_for_dev(dev);
1803 return PTR_ERR(group);
1805 smmu_group = iommu_group_get_iommudata(group);
1807 smmu = arm_smmu_get_for_pci_dev(pdev);
1813 smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL);
1819 smmu_group->ste.valid = true;
1820 smmu_group->smmu = smmu;
1821 iommu_group_set_iommudata(group, smmu_group,
1822 __arm_smmu_release_pci_iommudata);
1824 smmu = smmu_group->smmu;
1827 /* Assume SID == RID until firmware tells us otherwise */
1828 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1829 for (i = 0; i < smmu_group->num_sids; ++i) {
1830 /* If we already know about this SID, then we're done */
1831 if (smmu_group->sids[i] == sid)
1835 /* Check the SID is in range of the SMMU and our stream table */
1836 if (!arm_smmu_sid_in_range(smmu, sid)) {
1841 /* Ensure l2 strtab is initialised */
1842 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1843 ret = arm_smmu_init_l2_strtab(smmu, sid);
1848 /* Resize the SID array for the group */
1849 smmu_group->num_sids++;
1850 sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids),
1853 smmu_group->num_sids--;
1858 /* Add the new SID */
1859 sids[smmu_group->num_sids - 1] = sid;
1860 smmu_group->sids = sids;
1864 iommu_group_put(group);
1868 static void arm_smmu_remove_device(struct device *dev)
1870 iommu_group_remove_device(dev);
1873 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1874 enum iommu_attr attr, void *data)
1876 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1879 case DOMAIN_ATTR_NESTING:
1880 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1887 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1888 enum iommu_attr attr, void *data)
1891 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1893 mutex_lock(&smmu_domain->init_mutex);
1896 case DOMAIN_ATTR_NESTING:
1897 if (smmu_domain->smmu) {
1903 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1905 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1913 mutex_unlock(&smmu_domain->init_mutex);
1917 static struct iommu_ops arm_smmu_ops = {
1918 .capable = arm_smmu_capable,
1919 .domain_alloc = arm_smmu_domain_alloc,
1920 .domain_free = arm_smmu_domain_free,
1921 .attach_dev = arm_smmu_attach_dev,
1922 .detach_dev = arm_smmu_detach_dev,
1923 .map = arm_smmu_map,
1924 .unmap = arm_smmu_unmap,
1925 .map_sg = default_iommu_map_sg,
1926 .iova_to_phys = arm_smmu_iova_to_phys,
1927 .add_device = arm_smmu_add_device,
1928 .remove_device = arm_smmu_remove_device,
1929 .device_group = pci_device_group,
1930 .domain_get_attr = arm_smmu_domain_get_attr,
1931 .domain_set_attr = arm_smmu_domain_set_attr,
1932 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1935 /* Probing and initialisation functions */
1936 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1937 struct arm_smmu_queue *q,
1938 unsigned long prod_off,
1939 unsigned long cons_off,
1942 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1944 q->base = dma_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1946 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1951 q->prod_reg = smmu->base + prod_off;
1952 q->cons_reg = smmu->base + cons_off;
1953 q->ent_dwords = dwords;
1955 q->q_base = Q_BASE_RWA;
1956 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1957 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1958 << Q_BASE_LOG2SIZE_SHIFT;
1960 q->prod = q->cons = 0;
1964 static void arm_smmu_free_one_queue(struct arm_smmu_device *smmu,
1965 struct arm_smmu_queue *q)
1967 size_t qsz = ((1 << q->max_n_shift) * q->ent_dwords) << 3;
1969 dma_free_coherent(smmu->dev, qsz, q->base, q->base_dma);
1972 static void arm_smmu_free_queues(struct arm_smmu_device *smmu)
1974 arm_smmu_free_one_queue(smmu, &smmu->cmdq.q);
1975 arm_smmu_free_one_queue(smmu, &smmu->evtq.q);
1977 if (smmu->features & ARM_SMMU_FEAT_PRI)
1978 arm_smmu_free_one_queue(smmu, &smmu->priq.q);
1981 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1986 spin_lock_init(&smmu->cmdq.lock);
1987 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1988 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1993 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1994 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
1999 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
2002 ret = arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
2003 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
2010 arm_smmu_free_one_queue(smmu, &smmu->evtq.q);
2012 arm_smmu_free_one_queue(smmu, &smmu->cmdq.q);
2017 static void arm_smmu_free_l2_strtab(struct arm_smmu_device *smmu)
2021 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2023 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
2024 for (i = 0; i < cfg->num_l1_ents; ++i) {
2025 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[i];
2030 dma_free_coherent(smmu->dev, size, desc->l2ptr,
2035 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2038 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2039 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
2040 void *strtab = smmu->strtab_cfg.strtab;
2042 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2043 if (!cfg->l1_desc) {
2044 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2048 for (i = 0; i < cfg->num_l1_ents; ++i) {
2049 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2050 strtab += STRTAB_L1_DESC_DWORDS << 3;
2056 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2062 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2065 * If we can resolve everything with a single L2 table, then we
2066 * just need a single L1 descriptor. Otherwise, calculate the L1
2067 * size, capped to the SIDSIZE.
2069 if (smmu->sid_bits < STRTAB_SPLIT) {
2072 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2073 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2075 cfg->num_l1_ents = 1 << size;
2077 size += STRTAB_SPLIT;
2078 if (size < smmu->sid_bits)
2080 "2-level strtab only covers %u/%u bits of SID\n",
2081 size, smmu->sid_bits);
2083 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2084 strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2088 "failed to allocate l1 stream table (%u bytes)\n",
2092 cfg->strtab = strtab;
2094 /* Configure strtab_base_cfg for 2 levels */
2095 reg = STRTAB_BASE_CFG_FMT_2LVL;
2096 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2097 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2098 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2099 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2100 cfg->strtab_base_cfg = reg;
2102 ret = arm_smmu_init_l1_strtab(smmu);
2104 dma_free_coherent(smmu->dev,
2111 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2116 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2118 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2119 strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2123 "failed to allocate linear stream table (%u bytes)\n",
2127 cfg->strtab = strtab;
2128 cfg->num_l1_ents = 1 << smmu->sid_bits;
2130 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2131 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2132 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2133 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2134 cfg->strtab_base_cfg = reg;
2136 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2140 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2145 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2146 ret = arm_smmu_init_strtab_2lvl(smmu);
2148 ret = arm_smmu_init_strtab_linear(smmu);
2153 /* Set the strtab base address */
2154 reg = smmu->strtab_cfg.strtab_dma &
2155 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2156 reg |= STRTAB_BASE_RA;
2157 smmu->strtab_cfg.strtab_base = reg;
2159 /* Allocate the first VMID for stage-2 bypass STEs */
2160 set_bit(0, smmu->vmid_map);
2164 static void arm_smmu_free_strtab(struct arm_smmu_device *smmu)
2166 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2167 u32 size = cfg->num_l1_ents;
2169 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
2170 arm_smmu_free_l2_strtab(smmu);
2171 size *= STRTAB_L1_DESC_DWORDS << 3;
2173 size *= STRTAB_STE_DWORDS * 3;
2176 dma_free_coherent(smmu->dev, size, cfg->strtab, cfg->strtab_dma);
2179 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2183 ret = arm_smmu_init_queues(smmu);
2187 ret = arm_smmu_init_strtab(smmu);
2189 goto out_free_queues;
2194 arm_smmu_free_queues(smmu);
2198 static void arm_smmu_free_structures(struct arm_smmu_device *smmu)
2200 arm_smmu_free_strtab(smmu);
2201 arm_smmu_free_queues(smmu);
2204 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2205 unsigned int reg_off, unsigned int ack_off)
2209 writel_relaxed(val, smmu->base + reg_off);
2210 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2211 1, ARM_SMMU_POLL_TIMEOUT_US);
2214 static void arm_smmu_free_msis(void *data)
2216 struct device *dev = data;
2217 platform_msi_domain_free_irqs(dev);
2220 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2222 phys_addr_t doorbell;
2223 struct device *dev = msi_desc_to_dev(desc);
2224 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2225 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2227 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2228 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2230 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2231 writel_relaxed(msg->data, smmu->base + cfg[1]);
2232 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2235 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2237 struct msi_desc *desc;
2238 int ret, nvec = ARM_SMMU_MAX_MSIS;
2239 struct device *dev = smmu->dev;
2241 /* Clear the MSI address regs */
2242 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2243 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2245 if (smmu->features & ARM_SMMU_FEAT_PRI)
2246 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2250 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2253 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2254 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2256 dev_warn(dev, "failed to allocate MSIs\n");
2260 for_each_msi_entry(desc, dev) {
2261 switch (desc->platform.msi_index) {
2262 case EVTQ_MSI_INDEX:
2263 smmu->evtq.q.irq = desc->irq;
2265 case GERROR_MSI_INDEX:
2266 smmu->gerr_irq = desc->irq;
2268 case PRIQ_MSI_INDEX:
2269 smmu->priq.q.irq = desc->irq;
2271 default: /* Unknown */
2276 /* Add callback to free MSIs on teardown */
2277 devm_add_action(dev, arm_smmu_free_msis, dev);
2280 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2283 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2285 /* Disable IRQs first */
2286 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2287 ARM_SMMU_IRQ_CTRLACK);
2289 dev_err(smmu->dev, "failed to disable irqs\n");
2293 arm_smmu_setup_msis(smmu);
2295 /* Request interrupt lines */
2296 irq = smmu->evtq.q.irq;
2298 ret = devm_request_threaded_irq(smmu->dev, irq,
2299 arm_smmu_evtq_handler,
2300 arm_smmu_evtq_thread,
2301 0, "arm-smmu-v3-evtq", smmu);
2302 if (IS_ERR_VALUE(ret))
2303 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2306 irq = smmu->cmdq.q.irq;
2308 ret = devm_request_irq(smmu->dev, irq,
2309 arm_smmu_cmdq_sync_handler, 0,
2310 "arm-smmu-v3-cmdq-sync", smmu);
2311 if (IS_ERR_VALUE(ret))
2312 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2315 irq = smmu->gerr_irq;
2317 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2318 0, "arm-smmu-v3-gerror", smmu);
2319 if (IS_ERR_VALUE(ret))
2320 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2323 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2324 irq = smmu->priq.q.irq;
2326 ret = devm_request_threaded_irq(smmu->dev, irq,
2327 arm_smmu_priq_handler,
2328 arm_smmu_priq_thread,
2329 0, "arm-smmu-v3-priq",
2331 if (IS_ERR_VALUE(ret))
2333 "failed to enable priq irq\n");
2335 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
2339 /* Enable interrupt generation on the SMMU */
2340 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
2341 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2343 dev_warn(smmu->dev, "failed to enable irqs\n");
2348 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2352 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2354 dev_err(smmu->dev, "failed to clear cr0\n");
2359 static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
2363 struct arm_smmu_cmdq_ent cmd;
2365 /* Clear CR0 and sync (disables SMMU and queue processing) */
2366 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2367 if (reg & CR0_SMMUEN)
2368 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2370 ret = arm_smmu_device_disable(smmu);
2374 /* CR1 (table and queue memory attributes) */
2375 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2376 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2377 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2378 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2379 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2380 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2381 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2383 /* CR2 (random crap) */
2384 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2385 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2388 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2389 smmu->base + ARM_SMMU_STRTAB_BASE);
2390 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2391 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2394 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2395 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2396 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2398 enables = CR0_CMDQEN;
2399 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2402 dev_err(smmu->dev, "failed to enable command queue\n");
2406 /* Invalidate any cached configuration */
2407 cmd.opcode = CMDQ_OP_CFGI_ALL;
2408 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2409 cmd.opcode = CMDQ_OP_CMD_SYNC;
2410 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2412 /* Invalidate any stale TLB entries */
2413 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2414 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2415 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2418 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2419 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2420 cmd.opcode = CMDQ_OP_CMD_SYNC;
2421 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2424 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2425 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2426 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2428 enables |= CR0_EVTQEN;
2429 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2432 dev_err(smmu->dev, "failed to enable event queue\n");
2437 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2438 writeq_relaxed(smmu->priq.q.q_base,
2439 smmu->base + ARM_SMMU_PRIQ_BASE);
2440 writel_relaxed(smmu->priq.q.prod,
2441 smmu->base + ARM_SMMU_PRIQ_PROD);
2442 writel_relaxed(smmu->priq.q.cons,
2443 smmu->base + ARM_SMMU_PRIQ_CONS);
2445 enables |= CR0_PRIQEN;
2446 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2449 dev_err(smmu->dev, "failed to enable PRI queue\n");
2454 ret = arm_smmu_setup_irqs(smmu);
2456 dev_err(smmu->dev, "failed to setup irqs\n");
2460 /* Enable the SMMU interface */
2461 enables |= CR0_SMMUEN;
2462 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2465 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2472 static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
2476 unsigned long pgsize_bitmap = 0;
2479 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2481 /* 2-level structures */
2482 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2483 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2485 if (reg & IDR0_CD2L)
2486 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2489 * Translation table endianness.
2490 * We currently require the same endianness as the CPU, but this
2491 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2493 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2494 case IDR0_TTENDIAN_MIXED:
2495 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2498 case IDR0_TTENDIAN_BE:
2499 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2502 case IDR0_TTENDIAN_LE:
2503 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2507 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2511 /* Boolean feature flags */
2512 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2513 smmu->features |= ARM_SMMU_FEAT_PRI;
2515 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2516 smmu->features |= ARM_SMMU_FEAT_ATS;
2519 smmu->features |= ARM_SMMU_FEAT_SEV;
2522 smmu->features |= ARM_SMMU_FEAT_MSI;
2525 smmu->features |= ARM_SMMU_FEAT_HYP;
2528 * The dma-coherent property is used in preference to the ID
2529 * register, but warn on mismatch.
2531 coherent = of_dma_is_coherent(smmu->dev->of_node);
2533 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2535 if (!!(reg & IDR0_COHACC) != coherent)
2536 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2537 coherent ? "true" : "false");
2539 if (reg & IDR0_STALL_MODEL)
2540 smmu->features |= ARM_SMMU_FEAT_STALLS;
2543 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2546 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2548 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2549 dev_err(smmu->dev, "no translation support!\n");
2553 /* We only support the AArch64 table format at present */
2554 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2555 case IDR0_TTF_AARCH32_64:
2558 case IDR0_TTF_AARCH64:
2561 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2565 /* ASID/VMID sizes */
2566 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2567 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2570 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2571 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2572 dev_err(smmu->dev, "embedded implementation not supported\n");
2576 /* Queue sizes, capped at 4k */
2577 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2578 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2579 if (!smmu->cmdq.q.max_n_shift) {
2580 /* Odd alignment restrictions on the base, so ignore for now */
2581 dev_err(smmu->dev, "unit-length command queue not supported\n");
2585 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2586 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2587 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2588 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2590 /* SID/SSID sizes */
2591 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2592 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2595 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2597 /* Maximum number of outstanding stalls */
2598 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2599 & IDR5_STALL_MAX_MASK;
2602 if (reg & IDR5_GRAN64K)
2603 pgsize_bitmap |= SZ_64K | SZ_512M;
2604 if (reg & IDR5_GRAN16K)
2605 pgsize_bitmap |= SZ_16K | SZ_32M;
2606 if (reg & IDR5_GRAN4K)
2607 pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2609 arm_smmu_ops.pgsize_bitmap &= pgsize_bitmap;
2611 /* Output address size */
2612 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2613 case IDR5_OAS_32_BIT:
2616 case IDR5_OAS_36_BIT:
2619 case IDR5_OAS_40_BIT:
2622 case IDR5_OAS_42_BIT:
2625 case IDR5_OAS_44_BIT:
2630 "unknown output address size. Truncating to 48-bit\n");
2632 case IDR5_OAS_48_BIT:
2636 /* Set the DMA mask for our table walker */
2637 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2639 "failed to set DMA mask for table walker\n");
2641 smmu->ias = max(smmu->ias, smmu->oas);
2643 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2644 smmu->ias, smmu->oas, smmu->features);
2648 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2651 struct resource *res;
2652 struct arm_smmu_device *smmu;
2653 struct device *dev = &pdev->dev;
2655 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2657 dev_err(dev, "failed to allocate arm_smmu_device\n");
2663 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2664 if (resource_size(res) + 1 < SZ_128K) {
2665 dev_err(dev, "MMIO region too small (%pr)\n", res);
2669 smmu->base = devm_ioremap_resource(dev, res);
2670 if (IS_ERR(smmu->base))
2671 return PTR_ERR(smmu->base);
2673 /* Interrupt lines */
2674 irq = platform_get_irq_byname(pdev, "eventq");
2676 smmu->evtq.q.irq = irq;
2678 irq = platform_get_irq_byname(pdev, "priq");
2680 smmu->priq.q.irq = irq;
2682 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2684 smmu->cmdq.q.irq = irq;
2686 irq = platform_get_irq_byname(pdev, "gerror");
2688 smmu->gerr_irq = irq;
2690 parse_driver_options(smmu);
2693 ret = arm_smmu_device_probe(smmu);
2697 /* Initialise in-memory data structures */
2698 ret = arm_smmu_init_structures(smmu);
2702 /* Record our private device structure */
2703 platform_set_drvdata(pdev, smmu);
2705 /* Reset the device */
2706 ret = arm_smmu_device_reset(smmu);
2708 goto out_free_structures;
2712 out_free_structures:
2713 arm_smmu_free_structures(smmu);
2717 static int arm_smmu_device_remove(struct platform_device *pdev)
2719 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2721 arm_smmu_device_disable(smmu);
2722 arm_smmu_free_structures(smmu);
2726 static struct of_device_id arm_smmu_of_match[] = {
2727 { .compatible = "arm,smmu-v3", },
2730 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2732 static struct platform_driver arm_smmu_driver = {
2734 .name = "arm-smmu-v3",
2735 .of_match_table = of_match_ptr(arm_smmu_of_match),
2737 .probe = arm_smmu_device_dt_probe,
2738 .remove = arm_smmu_device_remove,
2741 static int __init arm_smmu_init(void)
2743 struct device_node *np;
2746 np = of_find_matching_node(NULL, arm_smmu_of_match);
2752 ret = platform_driver_register(&arm_smmu_driver);
2756 return bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2759 static void __exit arm_smmu_exit(void)
2761 return platform_driver_unregister(&arm_smmu_driver);
2764 subsys_initcall(arm_smmu_init);
2765 module_exit(arm_smmu_exit);
2767 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2768 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2769 MODULE_LICENSE("GPL v2");