2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - 4k and 64k pages, with contiguous pte hints.
27 * - Up to 39-bit addressing
28 * - Context fault reporting
31 #define pr_fmt(fmt) "arm-smmu: " fmt
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
38 #include <linux/iommu.h>
40 #include <linux/module.h>
42 #include <linux/platform_device.h>
43 #include <linux/slab.h>
44 #include <linux/spinlock.h>
46 #include <linux/amba/bus.h>
48 #include <asm/pgalloc.h>
50 /* Maximum number of stream IDs assigned to a single device */
51 #define MAX_MASTER_STREAMIDS 8
53 /* Maximum number of context banks per SMMU */
54 #define ARM_SMMU_MAX_CBS 128
56 /* Maximum number of mapping groups per SMMU */
57 #define ARM_SMMU_MAX_SMRS 128
59 /* Number of VMIDs per SMMU */
60 #define ARM_SMMU_NUM_VMIDS 256
62 /* SMMU global address space */
63 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
64 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
67 #define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
68 #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
69 #define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
70 #define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
71 #define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
72 #define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
74 #if PAGE_SIZE == SZ_4K
75 #define ARM_SMMU_PTE_CONT_ENTRIES 16
76 #elif PAGE_SIZE == SZ_64K
77 #define ARM_SMMU_PTE_CONT_ENTRIES 32
79 #define ARM_SMMU_PTE_CONT_ENTRIES 1
82 #define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
83 #define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
84 #define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t))
87 #define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
88 #define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
89 #define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
92 #define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
93 #define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6)
94 #define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6)
95 #define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2)
96 #define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2)
97 #define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2)
99 /* Configuration registers */
100 #define ARM_SMMU_GR0_sCR0 0x0
101 #define sCR0_CLIENTPD (1 << 0)
102 #define sCR0_GFRE (1 << 1)
103 #define sCR0_GFIE (1 << 2)
104 #define sCR0_GCFGFRE (1 << 4)
105 #define sCR0_GCFGFIE (1 << 5)
106 #define sCR0_USFCFG (1 << 10)
107 #define sCR0_VMIDPNE (1 << 11)
108 #define sCR0_PTM (1 << 12)
109 #define sCR0_FB (1 << 13)
110 #define sCR0_BSU_SHIFT 14
111 #define sCR0_BSU_MASK 0x3
113 /* Identification registers */
114 #define ARM_SMMU_GR0_ID0 0x20
115 #define ARM_SMMU_GR0_ID1 0x24
116 #define ARM_SMMU_GR0_ID2 0x28
117 #define ARM_SMMU_GR0_ID3 0x2c
118 #define ARM_SMMU_GR0_ID4 0x30
119 #define ARM_SMMU_GR0_ID5 0x34
120 #define ARM_SMMU_GR0_ID6 0x38
121 #define ARM_SMMU_GR0_ID7 0x3c
122 #define ARM_SMMU_GR0_sGFSR 0x48
123 #define ARM_SMMU_GR0_sGFSYNR0 0x50
124 #define ARM_SMMU_GR0_sGFSYNR1 0x54
125 #define ARM_SMMU_GR0_sGFSYNR2 0x58
126 #define ARM_SMMU_GR0_PIDR0 0xfe0
127 #define ARM_SMMU_GR0_PIDR1 0xfe4
128 #define ARM_SMMU_GR0_PIDR2 0xfe8
130 #define ID0_S1TS (1 << 30)
131 #define ID0_S2TS (1 << 29)
132 #define ID0_NTS (1 << 28)
133 #define ID0_SMS (1 << 27)
134 #define ID0_PTFS_SHIFT 24
135 #define ID0_PTFS_MASK 0x2
136 #define ID0_PTFS_V8_ONLY 0x2
137 #define ID0_CTTW (1 << 14)
138 #define ID0_NUMIRPT_SHIFT 16
139 #define ID0_NUMIRPT_MASK 0xff
140 #define ID0_NUMSMRG_SHIFT 0
141 #define ID0_NUMSMRG_MASK 0xff
143 #define ID1_PAGESIZE (1 << 31)
144 #define ID1_NUMPAGENDXB_SHIFT 28
145 #define ID1_NUMPAGENDXB_MASK 7
146 #define ID1_NUMS2CB_SHIFT 16
147 #define ID1_NUMS2CB_MASK 0xff
148 #define ID1_NUMCB_SHIFT 0
149 #define ID1_NUMCB_MASK 0xff
151 #define ID2_OAS_SHIFT 4
152 #define ID2_OAS_MASK 0xf
153 #define ID2_IAS_SHIFT 0
154 #define ID2_IAS_MASK 0xf
155 #define ID2_UBS_SHIFT 8
156 #define ID2_UBS_MASK 0xf
157 #define ID2_PTFS_4K (1 << 12)
158 #define ID2_PTFS_16K (1 << 13)
159 #define ID2_PTFS_64K (1 << 14)
161 #define PIDR2_ARCH_SHIFT 4
162 #define PIDR2_ARCH_MASK 0xf
164 /* Global TLB invalidation */
165 #define ARM_SMMU_GR0_STLBIALL 0x60
166 #define ARM_SMMU_GR0_TLBIVMID 0x64
167 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
168 #define ARM_SMMU_GR0_TLBIALLH 0x6c
169 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
170 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
171 #define sTLBGSTATUS_GSACTIVE (1 << 0)
172 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
174 /* Stream mapping registers */
175 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
176 #define SMR_VALID (1 << 31)
177 #define SMR_MASK_SHIFT 16
178 #define SMR_MASK_MASK 0x7fff
179 #define SMR_ID_SHIFT 0
180 #define SMR_ID_MASK 0x7fff
182 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
183 #define S2CR_CBNDX_SHIFT 0
184 #define S2CR_CBNDX_MASK 0xff
185 #define S2CR_TYPE_SHIFT 16
186 #define S2CR_TYPE_MASK 0x3
187 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
188 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
189 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
191 /* Context bank attribute registers */
192 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
193 #define CBAR_VMID_SHIFT 0
194 #define CBAR_VMID_MASK 0xff
195 #define CBAR_S1_MEMATTR_SHIFT 12
196 #define CBAR_S1_MEMATTR_MASK 0xf
197 #define CBAR_S1_MEMATTR_WB 0xf
198 #define CBAR_TYPE_SHIFT 16
199 #define CBAR_TYPE_MASK 0x3
200 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
201 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
202 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
203 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
204 #define CBAR_IRPTNDX_SHIFT 24
205 #define CBAR_IRPTNDX_MASK 0xff
207 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
208 #define CBA2R_RW64_32BIT (0 << 0)
209 #define CBA2R_RW64_64BIT (1 << 0)
211 /* Translation context bank */
212 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
213 #define ARM_SMMU_CB(smmu, n) ((n) * (smmu)->pagesize)
215 #define ARM_SMMU_CB_SCTLR 0x0
216 #define ARM_SMMU_CB_RESUME 0x8
217 #define ARM_SMMU_CB_TTBCR2 0x10
218 #define ARM_SMMU_CB_TTBR0_LO 0x20
219 #define ARM_SMMU_CB_TTBR0_HI 0x24
220 #define ARM_SMMU_CB_TTBCR 0x30
221 #define ARM_SMMU_CB_S1_MAIR0 0x38
222 #define ARM_SMMU_CB_FSR 0x58
223 #define ARM_SMMU_CB_FAR_LO 0x60
224 #define ARM_SMMU_CB_FAR_HI 0x64
225 #define ARM_SMMU_CB_FSYNR0 0x68
227 #define SCTLR_S1_ASIDPNE (1 << 12)
228 #define SCTLR_CFCFG (1 << 7)
229 #define SCTLR_CFIE (1 << 6)
230 #define SCTLR_CFRE (1 << 5)
231 #define SCTLR_E (1 << 4)
232 #define SCTLR_AFE (1 << 2)
233 #define SCTLR_TRE (1 << 1)
234 #define SCTLR_M (1 << 0)
235 #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
237 #define RESUME_RETRY (0 << 0)
238 #define RESUME_TERMINATE (1 << 0)
240 #define TTBCR_EAE (1 << 31)
242 #define TTBCR_PASIZE_SHIFT 16
243 #define TTBCR_PASIZE_MASK 0x7
245 #define TTBCR_TG0_4K (0 << 14)
246 #define TTBCR_TG0_64K (1 << 14)
248 #define TTBCR_SH0_SHIFT 12
249 #define TTBCR_SH0_MASK 0x3
250 #define TTBCR_SH_NS 0
251 #define TTBCR_SH_OS 2
252 #define TTBCR_SH_IS 3
254 #define TTBCR_ORGN0_SHIFT 10
255 #define TTBCR_IRGN0_SHIFT 8
256 #define TTBCR_RGN_MASK 0x3
257 #define TTBCR_RGN_NC 0
258 #define TTBCR_RGN_WBWA 1
259 #define TTBCR_RGN_WT 2
260 #define TTBCR_RGN_WB 3
262 #define TTBCR_SL0_SHIFT 6
263 #define TTBCR_SL0_MASK 0x3
264 #define TTBCR_SL0_LVL_2 0
265 #define TTBCR_SL0_LVL_1 1
267 #define TTBCR_T1SZ_SHIFT 16
268 #define TTBCR_T0SZ_SHIFT 0
269 #define TTBCR_SZ_MASK 0xf
271 #define TTBCR2_SEP_SHIFT 15
272 #define TTBCR2_SEP_MASK 0x7
274 #define TTBCR2_PASIZE_SHIFT 0
275 #define TTBCR2_PASIZE_MASK 0x7
277 /* Common definitions for PASize and SEP fields */
278 #define TTBCR2_ADDR_32 0
279 #define TTBCR2_ADDR_36 1
280 #define TTBCR2_ADDR_40 2
281 #define TTBCR2_ADDR_42 3
282 #define TTBCR2_ADDR_44 4
283 #define TTBCR2_ADDR_48 5
285 #define MAIR_ATTR_SHIFT(n) ((n) << 3)
286 #define MAIR_ATTR_MASK 0xff
287 #define MAIR_ATTR_DEVICE 0x04
288 #define MAIR_ATTR_NC 0x44
289 #define MAIR_ATTR_WBRWA 0xff
290 #define MAIR_ATTR_IDX_NC 0
291 #define MAIR_ATTR_IDX_CACHE 1
292 #define MAIR_ATTR_IDX_DEV 2
294 #define FSR_MULTI (1 << 31)
295 #define FSR_SS (1 << 30)
296 #define FSR_UUT (1 << 8)
297 #define FSR_ASF (1 << 7)
298 #define FSR_TLBLKF (1 << 6)
299 #define FSR_TLBMCF (1 << 5)
300 #define FSR_EF (1 << 4)
301 #define FSR_PF (1 << 3)
302 #define FSR_AFF (1 << 2)
303 #define FSR_TF (1 << 1)
305 #define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \
307 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
308 FSR_EF | FSR_PF | FSR_TF)
310 #define FSYNR0_WNR (1 << 4)
312 struct arm_smmu_smr {
318 struct arm_smmu_master {
319 struct device_node *of_node;
322 * The following is specific to the master's position in the
327 u16 streamids[MAX_MASTER_STREAMIDS];
330 * We only need to allocate these on the root SMMU, as we
331 * configure unmatched streams to bypass translation.
333 struct arm_smmu_smr *smrs;
336 struct arm_smmu_device {
338 struct device_node *parent_of_node;
342 unsigned long pagesize;
344 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
345 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
346 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
347 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
348 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
352 u32 num_context_banks;
353 u32 num_s2_context_banks;
354 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
357 u32 num_mapping_groups;
358 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
360 unsigned long input_size;
361 unsigned long s1_output_size;
362 unsigned long s2_output_size;
365 u32 num_context_irqs;
368 DECLARE_BITMAP(vmid_map, ARM_SMMU_NUM_VMIDS);
370 struct list_head list;
371 struct rb_root masters;
374 struct arm_smmu_cfg {
375 struct arm_smmu_device *smmu;
383 struct arm_smmu_domain {
385 * A domain can span across multiple, chained SMMUs and requires
386 * all devices within the domain to follow the same translation
389 struct arm_smmu_device *leaf_smmu;
390 struct arm_smmu_cfg root_cfg;
391 phys_addr_t output_mask;
396 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
397 static LIST_HEAD(arm_smmu_devices);
399 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
400 struct device_node *dev_node)
402 struct rb_node *node = smmu->masters.rb_node;
405 struct arm_smmu_master *master;
406 master = container_of(node, struct arm_smmu_master, node);
408 if (dev_node < master->of_node)
409 node = node->rb_left;
410 else if (dev_node > master->of_node)
411 node = node->rb_right;
419 static int insert_smmu_master(struct arm_smmu_device *smmu,
420 struct arm_smmu_master *master)
422 struct rb_node **new, *parent;
424 new = &smmu->masters.rb_node;
427 struct arm_smmu_master *this;
428 this = container_of(*new, struct arm_smmu_master, node);
431 if (master->of_node < this->of_node)
432 new = &((*new)->rb_left);
433 else if (master->of_node > this->of_node)
434 new = &((*new)->rb_right);
439 rb_link_node(&master->node, parent, new);
440 rb_insert_color(&master->node, &smmu->masters);
444 static int register_smmu_master(struct arm_smmu_device *smmu,
446 struct of_phandle_args *masterspec)
449 struct arm_smmu_master *master;
451 master = find_smmu_master(smmu, masterspec->np);
454 "rejecting multiple registrations for master device %s\n",
455 masterspec->np->name);
459 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
461 "reached maximum number (%d) of stream IDs for master device %s\n",
462 MAX_MASTER_STREAMIDS, masterspec->np->name);
466 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
470 master->of_node = masterspec->np;
471 master->num_streamids = masterspec->args_count;
473 for (i = 0; i < master->num_streamids; ++i)
474 master->streamids[i] = masterspec->args[i];
476 return insert_smmu_master(smmu, master);
479 static struct arm_smmu_device *find_parent_smmu(struct arm_smmu_device *smmu)
481 struct arm_smmu_device *parent;
483 if (!smmu->parent_of_node)
486 spin_lock(&arm_smmu_devices_lock);
487 list_for_each_entry(parent, &arm_smmu_devices, list)
488 if (parent->dev->of_node == smmu->parent_of_node)
493 "Failed to find SMMU parent despite parent in DT\n");
495 spin_unlock(&arm_smmu_devices_lock);
499 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
504 idx = find_next_zero_bit(map, end, start);
507 } while (test_and_set_bit(idx, map));
512 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
517 /* Wait for any pending TLB invalidations to complete */
518 static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
521 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
523 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
524 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
525 & sTLBGSTATUS_GSACTIVE) {
527 if (++count == TLB_LOOP_TIMEOUT) {
528 dev_err_ratelimited(smmu->dev,
529 "TLB sync timed out -- SMMU may be deadlocked\n");
536 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
539 u32 fsr, far, fsynr, resume;
541 struct iommu_domain *domain = dev;
542 struct arm_smmu_domain *smmu_domain = domain->priv;
543 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
544 struct arm_smmu_device *smmu = root_cfg->smmu;
545 void __iomem *cb_base;
547 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
548 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
550 if (!(fsr & FSR_FAULT))
554 dev_err_ratelimited(smmu->dev,
555 "Unexpected context fault (fsr 0x%u)\n",
558 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
559 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
561 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
564 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
565 iova |= ((unsigned long)far << 32);
568 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
570 resume = RESUME_RETRY;
573 resume = RESUME_TERMINATE;
576 /* Clear the faulting FSR */
577 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
579 /* Retry or terminate any stalled transactions */
581 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
586 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
588 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
589 struct arm_smmu_device *smmu = dev;
590 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
592 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
593 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
594 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
595 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
597 dev_err_ratelimited(smmu->dev,
598 "Unexpected global fault, this could be serious\n");
599 dev_err_ratelimited(smmu->dev,
600 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
601 gfsr, gfsynr0, gfsynr1, gfsynr2);
603 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
607 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
611 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
612 struct arm_smmu_device *smmu = root_cfg->smmu;
613 void __iomem *cb_base, *gr0_base, *gr1_base;
615 gr0_base = ARM_SMMU_GR0(smmu);
616 gr1_base = ARM_SMMU_GR1(smmu);
617 stage1 = root_cfg->cbar != CBAR_TYPE_S2_TRANS;
618 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
621 reg = root_cfg->cbar |
622 (root_cfg->vmid << CBAR_VMID_SHIFT);
623 if (smmu->version == 1)
624 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
626 /* Use the weakest memory type, so it is overridden by the pte */
628 reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
629 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
631 if (smmu->version > 1) {
634 reg = CBA2R_RW64_64BIT;
636 reg = CBA2R_RW64_32BIT;
639 gr1_base + ARM_SMMU_GR1_CBA2R(root_cfg->cbndx));
642 switch (smmu->input_size) {
644 reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
647 reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
650 reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
653 reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
656 reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
659 reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
663 switch (smmu->s1_output_size) {
665 reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT);
668 reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
671 reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
674 reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT);
677 reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT);
680 reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT);
685 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
689 reg = __pa(root_cfg->pgd);
691 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
692 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
693 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
695 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
696 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
697 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
702 * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
704 if (smmu->version > 1) {
705 if (PAGE_SIZE == SZ_4K)
711 switch (smmu->s2_output_size) {
713 reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT);
716 reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT);
719 reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT);
722 reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT);
725 reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT);
728 reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT);
732 reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT;
739 (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
740 (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
741 (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
742 (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
743 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
745 /* MAIR0 (stage-1 only) */
747 reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) |
748 (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) |
749 (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV));
750 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
754 writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
755 arm_smmu_tlb_sync(smmu);
758 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
760 reg |= SCTLR_S1_ASIDPNE;
764 writel(reg, cb_base + ARM_SMMU_CB_SCTLR);
767 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
771 struct arm_smmu_domain *smmu_domain = domain->priv;
772 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
773 struct arm_smmu_device *smmu, *parent;
776 * Walk the SMMU chain to find the root device for this chain.
777 * We assume that no masters have translations which terminate
778 * early, and therefore check that the root SMMU does indeed have
779 * a StreamID for the master in question.
781 parent = dev->archdata.iommu;
782 smmu_domain->output_mask = -1;
785 smmu_domain->output_mask &= (1ULL << smmu->s2_output_size) - 1;
786 } while ((parent = find_parent_smmu(smmu)));
788 if (!find_smmu_master(smmu, dev->of_node)) {
789 dev_err(dev, "unable to find root SMMU for device\n");
793 ret = __arm_smmu_alloc_bitmap(smmu->vmid_map, 0, ARM_SMMU_NUM_VMIDS);
794 if (IS_ERR_VALUE(ret))
797 root_cfg->vmid = ret;
798 if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
800 * We will likely want to change this if/when KVM gets
803 root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
804 start = smmu->num_s2_context_banks;
805 } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) {
806 root_cfg->cbar = CBAR_TYPE_S2_TRANS;
809 root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
810 start = smmu->num_s2_context_banks;
813 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
814 smmu->num_context_banks);
815 if (IS_ERR_VALUE(ret))
818 root_cfg->cbndx = ret;
820 if (smmu->version == 1) {
821 root_cfg->irptndx = atomic_inc_return(&smmu->irptndx);
822 root_cfg->irptndx %= smmu->num_context_irqs;
824 root_cfg->irptndx = root_cfg->cbndx;
827 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
828 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
829 "arm-smmu-context-fault", domain);
830 if (IS_ERR_VALUE(ret)) {
831 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
832 root_cfg->irptndx, irq);
833 root_cfg->irptndx = -1;
834 goto out_free_context;
837 root_cfg->smmu = smmu;
838 arm_smmu_init_context_bank(smmu_domain);
842 __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
844 __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
848 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
850 struct arm_smmu_domain *smmu_domain = domain->priv;
851 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
852 struct arm_smmu_device *smmu = root_cfg->smmu;
858 if (root_cfg->irptndx != -1) {
859 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
860 free_irq(irq, domain);
863 __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
864 __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
867 static int arm_smmu_domain_init(struct iommu_domain *domain)
869 struct arm_smmu_domain *smmu_domain;
873 * Allocate the domain and initialise some of its data structures.
874 * We can't really do anything meaningful until we've added a
877 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
881 pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
883 goto out_free_domain;
884 smmu_domain->root_cfg.pgd = pgd;
886 spin_lock_init(&smmu_domain->lock);
887 domain->priv = smmu_domain;
895 static void arm_smmu_free_ptes(pmd_t *pmd)
897 pgtable_t table = pmd_pgtable(*pmd);
898 pgtable_page_dtor(table);
902 static void arm_smmu_free_pmds(pud_t *pud)
905 pmd_t *pmd, *pmd_base = pmd_offset(pud, 0);
908 for (i = 0; i < PTRS_PER_PMD; ++i) {
912 arm_smmu_free_ptes(pmd);
916 pmd_free(NULL, pmd_base);
919 static void arm_smmu_free_puds(pgd_t *pgd)
922 pud_t *pud, *pud_base = pud_offset(pgd, 0);
925 for (i = 0; i < PTRS_PER_PUD; ++i) {
929 arm_smmu_free_pmds(pud);
933 pud_free(NULL, pud_base);
936 static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
939 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
940 pgd_t *pgd, *pgd_base = root_cfg->pgd;
943 * Recursively free the page tables for this domain. We don't
944 * care about speculative TLB filling, because the TLB will be
945 * nuked next time this context bank is re-allocated and no devices
946 * currently map to these tables.
949 for (i = 0; i < PTRS_PER_PGD; ++i) {
952 arm_smmu_free_puds(pgd);
959 static void arm_smmu_domain_destroy(struct iommu_domain *domain)
961 struct arm_smmu_domain *smmu_domain = domain->priv;
962 arm_smmu_destroy_domain_context(domain);
963 arm_smmu_free_pgtables(smmu_domain);
967 static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
968 struct arm_smmu_master *master)
971 struct arm_smmu_smr *smrs;
972 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
974 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
980 smrs = kmalloc(sizeof(*smrs) * master->num_streamids, GFP_KERNEL);
982 dev_err(smmu->dev, "failed to allocate %d SMRs for master %s\n",
983 master->num_streamids, master->of_node->name);
987 /* Allocate the SMRs on the root SMMU */
988 for (i = 0; i < master->num_streamids; ++i) {
989 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
990 smmu->num_mapping_groups);
991 if (IS_ERR_VALUE(idx)) {
992 dev_err(smmu->dev, "failed to allocate free SMR\n");
996 smrs[i] = (struct arm_smmu_smr) {
998 .mask = 0, /* We don't currently share SMRs */
999 .id = master->streamids[i],
1003 /* It worked! Now, poke the actual hardware */
1004 for (i = 0; i < master->num_streamids; ++i) {
1005 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1006 smrs[i].mask << SMR_MASK_SHIFT;
1007 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1010 master->smrs = smrs;
1015 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1020 static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
1021 struct arm_smmu_master *master)
1024 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1025 struct arm_smmu_smr *smrs = master->smrs;
1027 /* Invalidate the SMRs before freeing back to the allocator */
1028 for (i = 0; i < master->num_streamids; ++i) {
1029 u8 idx = smrs[i].idx;
1030 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1031 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1034 master->smrs = NULL;
1038 static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu,
1039 struct arm_smmu_master *master)
1042 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1044 for (i = 0; i < master->num_streamids; ++i) {
1045 u16 sid = master->streamids[i];
1046 writel_relaxed(S2CR_TYPE_BYPASS,
1047 gr0_base + ARM_SMMU_GR0_S2CR(sid));
1051 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1052 struct arm_smmu_master *master)
1055 struct arm_smmu_device *parent, *smmu = smmu_domain->root_cfg.smmu;
1056 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1058 ret = arm_smmu_master_configure_smrs(smmu, master);
1062 /* Bypass the leaves */
1063 smmu = smmu_domain->leaf_smmu;
1064 while ((parent = find_parent_smmu(smmu))) {
1066 * We won't have a StreamID match for anything but the root
1067 * smmu, so we only need to worry about StreamID indexing,
1068 * where we must install bypass entries in the S2CRs.
1070 if (smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)
1073 arm_smmu_bypass_stream_mapping(smmu, master);
1077 /* Now we're at the root, time to point at our context bank */
1078 for (i = 0; i < master->num_streamids; ++i) {
1080 idx = master->smrs ? master->smrs[i].idx : master->streamids[i];
1081 s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) |
1082 (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT);
1083 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1089 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
1090 struct arm_smmu_master *master)
1092 struct arm_smmu_device *smmu = smmu_domain->root_cfg.smmu;
1095 * We *must* clear the S2CR first, because freeing the SMR means
1096 * that it can be re-allocated immediately.
1098 arm_smmu_bypass_stream_mapping(smmu, master);
1099 arm_smmu_master_free_smrs(smmu, master);
1102 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1105 struct arm_smmu_domain *smmu_domain = domain->priv;
1106 struct arm_smmu_device *device_smmu = dev->archdata.iommu;
1107 struct arm_smmu_master *master;
1110 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1115 * Sanity check the domain. We don't currently support domains
1116 * that cross between different SMMU chains.
1118 spin_lock(&smmu_domain->lock);
1119 if (!smmu_domain->leaf_smmu) {
1120 /* Now that we have a master, we can finalise the domain */
1121 ret = arm_smmu_init_domain_context(domain, dev);
1122 if (IS_ERR_VALUE(ret))
1125 smmu_domain->leaf_smmu = device_smmu;
1126 } else if (smmu_domain->leaf_smmu != device_smmu) {
1128 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1129 dev_name(smmu_domain->leaf_smmu->dev),
1130 dev_name(device_smmu->dev));
1133 spin_unlock(&smmu_domain->lock);
1135 /* Looks ok, so add the device to the domain */
1136 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
1140 return arm_smmu_domain_add_master(smmu_domain, master);
1143 spin_unlock(&smmu_domain->lock);
1147 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1149 struct arm_smmu_domain *smmu_domain = domain->priv;
1150 struct arm_smmu_master *master;
1152 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
1154 arm_smmu_domain_remove_master(smmu_domain, master);
1157 static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
1160 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
1163 * If the SMMU can't walk tables in the CPU caches, treat them
1164 * like non-coherent DMA since we need to flush the new entries
1165 * all the way out to memory. There's no possibility of recursion
1166 * here as the SMMU table walker will not be wired through another
1169 if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK))
1170 dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
1174 static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
1177 return !(addr & ~ARM_SMMU_PTE_CONT_MASK) &&
1178 (addr + ARM_SMMU_PTE_CONT_SIZE <= end);
1181 static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1182 unsigned long addr, unsigned long end,
1183 unsigned long pfn, int flags, int stage)
1186 pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
1188 if (pmd_none(*pmd)) {
1189 /* Allocate a new set of tables */
1190 pgtable_t table = alloc_page(PGALLOC_GFP);
1194 arm_smmu_flush_pgtable(smmu, page_address(table),
1195 ARM_SMMU_PTE_HWTABLE_SIZE);
1196 pgtable_page_ctor(table);
1197 pmd_populate(NULL, pmd, table);
1198 arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
1202 pteval |= ARM_SMMU_PTE_AP_UNPRIV;
1203 if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ))
1204 pteval |= ARM_SMMU_PTE_AP_RDONLY;
1206 if (flags & IOMMU_CACHE)
1207 pteval |= (MAIR_ATTR_IDX_CACHE <<
1208 ARM_SMMU_PTE_ATTRINDX_SHIFT);
1210 pteval |= ARM_SMMU_PTE_HAP_FAULT;
1211 if (flags & IOMMU_READ)
1212 pteval |= ARM_SMMU_PTE_HAP_READ;
1213 if (flags & IOMMU_WRITE)
1214 pteval |= ARM_SMMU_PTE_HAP_WRITE;
1215 if (flags & IOMMU_CACHE)
1216 pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
1218 pteval |= ARM_SMMU_PTE_MEMATTR_NC;
1221 /* If no access, create a faulting entry to avoid TLB fills */
1222 if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
1223 pteval &= ~ARM_SMMU_PTE_PAGE;
1225 pteval |= ARM_SMMU_PTE_SH_IS;
1226 start = pmd_page_vaddr(*pmd) + pte_index(addr);
1230 * Install the page table entries. This is fairly complicated
1231 * since we attempt to make use of the contiguous hint in the
1232 * ptes where possible. The contiguous hint indicates a series
1233 * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
1234 * contiguous region with the following constraints:
1236 * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
1237 * - Each pte in the region has the contiguous hint bit set
1239 * This complicates unmapping (also handled by this code, when
1240 * neither IOMMU_READ or IOMMU_WRITE are set) because it is
1241 * possible, yet highly unlikely, that a client may unmap only
1242 * part of a contiguous range. This requires clearing of the
1243 * contiguous hint bits in the range before installing the new
1246 * Note that re-mapping an address range without first unmapping
1247 * it is not supported, so TLB invalidation is not required here
1248 * and is instead performed at unmap and domain-init time.
1252 pteval &= ~ARM_SMMU_PTE_CONT;
1254 if (arm_smmu_pte_is_contiguous_range(addr, end)) {
1255 i = ARM_SMMU_PTE_CONT_ENTRIES;
1256 pteval |= ARM_SMMU_PTE_CONT;
1257 } else if (pte_val(*pte) &
1258 (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) {
1261 unsigned long idx = pte_index(addr);
1263 idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1);
1264 cont_start = pmd_page_vaddr(*pmd) + idx;
1265 for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j)
1266 pte_val(*(cont_start + j)) &= ~ARM_SMMU_PTE_CONT;
1268 arm_smmu_flush_pgtable(smmu, cont_start,
1270 ARM_SMMU_PTE_CONT_ENTRIES);
1274 *pte = pfn_pte(pfn, __pgprot(pteval));
1275 } while (pte++, pfn++, addr += PAGE_SIZE, --i);
1276 } while (addr != end);
1278 arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start));
1282 static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1283 unsigned long addr, unsigned long end,
1284 phys_addr_t phys, int flags, int stage)
1288 unsigned long next, pfn = __phys_to_pfn(phys);
1290 #ifndef __PAGETABLE_PMD_FOLDED
1291 if (pud_none(*pud)) {
1292 pmd = pmd_alloc_one(NULL, addr);
1297 pmd = pmd_offset(pud, addr);
1300 next = pmd_addr_end(addr, end);
1301 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
1303 pud_populate(NULL, pud, pmd);
1304 arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
1305 phys += next - addr;
1306 } while (pmd++, addr = next, addr < end);
1311 static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
1312 unsigned long addr, unsigned long end,
1313 phys_addr_t phys, int flags, int stage)
1319 #ifndef __PAGETABLE_PUD_FOLDED
1320 if (pgd_none(*pgd)) {
1321 pud = pud_alloc_one(NULL, addr);
1326 pud = pud_offset(pgd, addr);
1329 next = pud_addr_end(addr, end);
1330 ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
1332 pgd_populate(NULL, pud, pgd);
1333 arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
1334 phys += next - addr;
1335 } while (pud++, addr = next, addr < end);
1340 static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1341 unsigned long iova, phys_addr_t paddr,
1342 size_t size, int flags)
1346 phys_addr_t input_mask, output_mask;
1347 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1348 pgd_t *pgd = root_cfg->pgd;
1349 struct arm_smmu_device *smmu = root_cfg->smmu;
1351 if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
1353 output_mask = (1ULL << smmu->s2_output_size) - 1;
1356 output_mask = (1ULL << smmu->s1_output_size) - 1;
1362 if (size & ~PAGE_MASK)
1365 input_mask = (1ULL << smmu->input_size) - 1;
1366 if ((phys_addr_t)iova & ~input_mask)
1369 if (paddr & ~output_mask)
1372 spin_lock(&smmu_domain->lock);
1373 pgd += pgd_index(iova);
1376 unsigned long next = pgd_addr_end(iova, end);
1378 ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
1383 paddr += next - iova;
1385 } while (pgd++, iova != end);
1388 spin_unlock(&smmu_domain->lock);
1390 /* Ensure new page tables are visible to the hardware walker */
1391 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1397 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1398 phys_addr_t paddr, size_t size, int flags)
1400 struct arm_smmu_domain *smmu_domain = domain->priv;
1401 struct arm_smmu_device *smmu = smmu_domain->leaf_smmu;
1403 if (!smmu_domain || !smmu)
1406 /* Check for silent address truncation up the SMMU chain. */
1407 if ((phys_addr_t)iova & ~smmu_domain->output_mask)
1410 return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags);
1413 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1417 struct arm_smmu_domain *smmu_domain = domain->priv;
1418 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1419 struct arm_smmu_device *smmu = root_cfg->smmu;
1420 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1422 ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
1423 writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
1424 arm_smmu_tlb_sync(smmu);
1425 return ret ? ret : size;
1428 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1435 struct arm_smmu_domain *smmu_domain = domain->priv;
1436 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1437 struct arm_smmu_device *smmu = root_cfg->smmu;
1439 spin_lock(&smmu_domain->lock);
1440 pgd = root_cfg->pgd;
1444 pgd += pgd_index(iova);
1445 if (pgd_none_or_clear_bad(pgd))
1448 pud = pud_offset(pgd, iova);
1449 if (pud_none_or_clear_bad(pud))
1452 pmd = pmd_offset(pud, iova);
1453 if (pmd_none_or_clear_bad(pmd))
1456 pte = pmd_page_vaddr(*pmd) + pte_index(iova);
1460 spin_unlock(&smmu_domain->lock);
1461 return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
1464 spin_unlock(&smmu_domain->lock);
1466 "invalid (corrupt?) page tables detected for iova 0x%llx\n",
1467 (unsigned long long)iova);
1471 static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
1474 unsigned long caps = 0;
1475 struct arm_smmu_domain *smmu_domain = domain->priv;
1477 if (smmu_domain->root_cfg.smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1478 caps |= IOMMU_CAP_CACHE_COHERENCY;
1480 return !!(cap & caps);
1483 static int arm_smmu_add_device(struct device *dev)
1485 struct arm_smmu_device *child, *parent, *smmu;
1486 struct arm_smmu_master *master = NULL;
1488 spin_lock(&arm_smmu_devices_lock);
1489 list_for_each_entry(parent, &arm_smmu_devices, list) {
1492 /* Try to find a child of the current SMMU. */
1493 list_for_each_entry(child, &arm_smmu_devices, list) {
1494 if (child->parent_of_node == parent->dev->of_node) {
1495 /* Does the child sit above our master? */
1496 master = find_smmu_master(child, dev->of_node);
1504 /* We found some children, so keep searching. */
1510 master = find_smmu_master(smmu, dev->of_node);
1514 spin_unlock(&arm_smmu_devices_lock);
1519 dev->archdata.iommu = smmu;
1523 static void arm_smmu_remove_device(struct device *dev)
1525 dev->archdata.iommu = NULL;
1528 static struct iommu_ops arm_smmu_ops = {
1529 .domain_init = arm_smmu_domain_init,
1530 .domain_destroy = arm_smmu_domain_destroy,
1531 .attach_dev = arm_smmu_attach_dev,
1532 .detach_dev = arm_smmu_detach_dev,
1533 .map = arm_smmu_map,
1534 .unmap = arm_smmu_unmap,
1535 .iova_to_phys = arm_smmu_iova_to_phys,
1536 .domain_has_cap = arm_smmu_domain_has_cap,
1537 .add_device = arm_smmu_add_device,
1538 .remove_device = arm_smmu_remove_device,
1539 .pgsize_bitmap = (SECTION_SIZE |
1540 ARM_SMMU_PTE_CONT_SIZE |
1544 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1546 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1548 u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
1550 /* Mark all SMRn as invalid and all S2CRn as bypass */
1551 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1552 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i));
1553 writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
1556 /* Invalidate the TLB, just in case */
1557 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
1558 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1559 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1561 /* Enable fault reporting */
1562 scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1564 /* Disable TLB broadcasting. */
1565 scr0 |= (sCR0_VMIDPNE | sCR0_PTM);
1567 /* Enable client access, but bypass when no mapping is found */
1568 scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
1570 /* Disable forced broadcasting */
1573 /* Don't upgrade barriers */
1574 scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1576 /* Push the button */
1577 arm_smmu_tlb_sync(smmu);
1578 writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0);
1581 static int arm_smmu_id_size_to_bits(int size)
1600 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1603 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1606 dev_notice(smmu->dev, "probing hardware configuration...\n");
1609 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_PIDR2);
1610 smmu->version = ((id >> PIDR2_ARCH_SHIFT) & PIDR2_ARCH_MASK) + 1;
1611 dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1614 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1615 #ifndef CONFIG_64BIT
1616 if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) {
1617 dev_err(smmu->dev, "\tno v7 descriptor support!\n");
1621 if (id & ID0_S1TS) {
1622 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1623 dev_notice(smmu->dev, "\tstage 1 translation\n");
1626 if (id & ID0_S2TS) {
1627 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1628 dev_notice(smmu->dev, "\tstage 2 translation\n");
1632 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1633 dev_notice(smmu->dev, "\tnested translation\n");
1636 if (!(smmu->features &
1637 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 |
1638 ARM_SMMU_FEAT_TRANS_NESTED))) {
1639 dev_err(smmu->dev, "\tno translation support!\n");
1643 if (id & ID0_CTTW) {
1644 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1645 dev_notice(smmu->dev, "\tcoherent table walk\n");
1651 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1652 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1654 if (smmu->num_mapping_groups == 0) {
1656 "stream-matching supported, but no SMRs present!\n");
1660 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1661 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1662 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1663 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1665 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1666 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1667 if ((mask & sid) != sid) {
1669 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1674 dev_notice(smmu->dev,
1675 "\tstream matching with %u register groups, mask 0x%x",
1676 smmu->num_mapping_groups, mask);
1680 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1681 smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
1683 /* Check that we ioremapped enough */
1684 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1685 size *= (smmu->pagesize << 1);
1686 if (smmu->size < size)
1688 "device is 0x%lx bytes but only mapped 0x%lx!\n",
1691 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
1693 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1694 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1695 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1698 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1699 smmu->num_context_banks, smmu->num_s2_context_banks);
1702 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1703 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1706 * Stage-1 output limited by stage-2 input size due to pgd
1707 * allocation (PTRS_PER_PGD).
1710 /* Current maximum output size of 39 bits */
1711 smmu->s1_output_size = min(39UL, size);
1713 smmu->s1_output_size = min(32UL, size);
1716 /* The stage-2 output mask is also applied for bypass */
1717 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1718 smmu->s2_output_size = min((unsigned long)PHYS_MASK_SHIFT, size);
1720 if (smmu->version == 1) {
1721 smmu->input_size = 32;
1724 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1725 size = min(39, arm_smmu_id_size_to_bits(size));
1729 smmu->input_size = size;
1731 if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) ||
1732 (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) ||
1733 (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) {
1734 dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n",
1740 dev_notice(smmu->dev,
1741 "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n",
1742 smmu->input_size, smmu->s1_output_size, smmu->s2_output_size);
1746 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1748 struct resource *res;
1749 struct arm_smmu_device *smmu;
1750 struct device_node *dev_node;
1751 struct device *dev = &pdev->dev;
1752 struct rb_node *node;
1753 struct of_phandle_args masterspec;
1754 int num_irqs, i, err;
1756 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1758 dev_err(dev, "failed to allocate arm_smmu_device\n");
1763 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1765 dev_err(dev, "missing base address/size\n");
1769 smmu->size = resource_size(res);
1770 smmu->base = devm_request_and_ioremap(dev, res);
1772 return -EADDRNOTAVAIL;
1774 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1775 &smmu->num_global_irqs)) {
1776 dev_err(dev, "missing #global-interrupts property\n");
1781 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1783 if (num_irqs > smmu->num_global_irqs)
1784 smmu->num_context_irqs++;
1787 if (num_irqs < smmu->num_global_irqs) {
1788 dev_warn(dev, "found %d interrupts but expected at least %d\n",
1789 num_irqs, smmu->num_global_irqs);
1790 smmu->num_global_irqs = num_irqs;
1792 smmu->num_context_irqs = num_irqs - smmu->num_global_irqs;
1794 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1797 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1801 for (i = 0; i < num_irqs; ++i) {
1802 int irq = platform_get_irq(pdev, i);
1804 dev_err(dev, "failed to get irq index %d\n", i);
1807 smmu->irqs[i] = irq;
1811 smmu->masters = RB_ROOT;
1812 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1813 "#stream-id-cells", i,
1815 err = register_smmu_master(smmu, dev, &masterspec);
1817 dev_err(dev, "failed to add master %s\n",
1818 masterspec.np->name);
1819 goto out_put_masters;
1824 dev_notice(dev, "registered %d master devices\n", i);
1826 if ((dev_node = of_parse_phandle(dev->of_node, "smmu-parent", 0)))
1827 smmu->parent_of_node = dev_node;
1829 err = arm_smmu_device_cfg_probe(smmu);
1831 goto out_put_parent;
1833 if (smmu->version > 1 &&
1834 smmu->num_context_banks != smmu->num_context_irqs) {
1836 "found only %d context interrupt(s) but %d required\n",
1837 smmu->num_context_irqs, smmu->num_context_banks);
1838 goto out_put_parent;
1841 arm_smmu_device_reset(smmu);
1843 for (i = 0; i < smmu->num_global_irqs; ++i) {
1844 err = request_irq(smmu->irqs[i],
1845 arm_smmu_global_fault,
1847 "arm-smmu global fault",
1850 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1856 INIT_LIST_HEAD(&smmu->list);
1857 spin_lock(&arm_smmu_devices_lock);
1858 list_add(&smmu->list, &arm_smmu_devices);
1859 spin_unlock(&arm_smmu_devices_lock);
1864 free_irq(smmu->irqs[i], smmu);
1867 if (smmu->parent_of_node)
1868 of_node_put(smmu->parent_of_node);
1871 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1872 struct arm_smmu_master *master;
1873 master = container_of(node, struct arm_smmu_master, node);
1874 of_node_put(master->of_node);
1880 static int arm_smmu_device_remove(struct platform_device *pdev)
1883 struct device *dev = &pdev->dev;
1884 struct arm_smmu_device *curr, *smmu = NULL;
1885 struct rb_node *node;
1887 spin_lock(&arm_smmu_devices_lock);
1888 list_for_each_entry(curr, &arm_smmu_devices, list) {
1889 if (curr->dev == dev) {
1891 list_del(&smmu->list);
1895 spin_unlock(&arm_smmu_devices_lock);
1900 if (smmu->parent_of_node)
1901 of_node_put(smmu->parent_of_node);
1903 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1904 struct arm_smmu_master *master;
1905 master = container_of(node, struct arm_smmu_master, node);
1906 of_node_put(master->of_node);
1909 if (!bitmap_empty(smmu->vmid_map, ARM_SMMU_NUM_VMIDS))
1910 dev_err(dev, "removing device with active domains!\n");
1912 for (i = 0; i < smmu->num_global_irqs; ++i)
1913 free_irq(smmu->irqs[i], smmu);
1915 /* Turn the thing off */
1916 writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
1921 static struct of_device_id arm_smmu_of_match[] = {
1922 { .compatible = "arm,smmu-v1", },
1923 { .compatible = "arm,smmu-v2", },
1924 { .compatible = "arm,mmu-400", },
1925 { .compatible = "arm,mmu-500", },
1928 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1931 static struct platform_driver arm_smmu_driver = {
1933 .owner = THIS_MODULE,
1935 .of_match_table = of_match_ptr(arm_smmu_of_match),
1937 .probe = arm_smmu_device_dt_probe,
1938 .remove = arm_smmu_device_remove,
1941 static int __init arm_smmu_init(void)
1945 ret = platform_driver_register(&arm_smmu_driver);
1949 /* Oh, for a proper bus abstraction */
1950 if (!iommu_present(&platform_bus_type));
1951 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1953 if (!iommu_present(&amba_bustype));
1954 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1959 static void __exit arm_smmu_exit(void)
1961 return platform_driver_unregister(&arm_smmu_driver);
1964 module_init(arm_smmu_init);
1965 module_exit(arm_smmu_exit);
1967 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
1968 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1969 MODULE_LICENSE("GPL v2");