2 * IOMMU API for SMMU in Tegra30
4 * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #define pr_fmt(fmt) "%s(): " fmt, __func__
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/spinlock.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
28 #include <linux/pagemap.h>
29 #include <linux/device.h>
30 #include <linux/sched.h>
31 #include <linux/iommu.h>
34 #include <linux/of_iommu.h>
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
39 #include <asm/cacheflush.h>
41 #include <mach/iomap.h>
42 #include <mach/smmu.h>
43 #include <mach/tegra-ahb.h>
45 /* bitmap of the page sizes currently supported */
46 #define SMMU_IOMMU_PGSIZES (SZ_4K)
48 #define SMMU_CONFIG 0x10
49 #define SMMU_CONFIG_DISABLE 0
50 #define SMMU_CONFIG_ENABLE 1
52 /* REVISIT: To support multiple MCs */
62 #define SMMU_CACHE_CONFIG_BASE 0x14
63 #define __SMMU_CACHE_CONFIG(mc, cache) (SMMU_CACHE_CONFIG_BASE + 4 * cache)
64 #define SMMU_CACHE_CONFIG(cache) __SMMU_CACHE_CONFIG(_MC, cache)
66 #define SMMU_CACHE_CONFIG_STATS_SHIFT 31
67 #define SMMU_CACHE_CONFIG_STATS_ENABLE (1 << SMMU_CACHE_CONFIG_STATS_SHIFT)
68 #define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT 30
69 #define SMMU_CACHE_CONFIG_STATS_TEST (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT)
71 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29)
72 #define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10
73 #define SMMU_TLB_CONFIG_RESET_VAL 0x20000010
75 #define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29)
76 #define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f
77 #define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f
79 #define SMMU_PTB_ASID 0x1c
80 #define SMMU_PTB_ASID_CURRENT_SHIFT 0
82 #define SMMU_PTB_DATA 0x20
83 #define SMMU_PTB_DATA_RESET_VAL 0
84 #define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT 29
85 #define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT 30
86 #define SMMU_PTB_DATA_ASID_READABLE_SHIFT 31
88 #define SMMU_TLB_FLUSH 0x30
89 #define SMMU_TLB_FLUSH_VA_MATCH_ALL 0
90 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION 2
91 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP 3
92 #define SMMU_TLB_FLUSH_ASID_SHIFT 29
93 #define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE 0
94 #define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE 1
95 #define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT 31
97 #define SMMU_PTC_FLUSH 0x34
98 #define SMMU_PTC_FLUSH_TYPE_ALL 0
99 #define SMMU_PTC_FLUSH_TYPE_ADR 1
100 #define SMMU_PTC_FLUSH_ADR_SHIFT 4
102 #define SMMU_ASID_SECURITY 0x38
104 #define SMMU_STATS_CACHE_COUNT_BASE 0x1f0
106 #define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss) \
107 (SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss)
109 #define SMMU_TRANSLATION_ENABLE_0 0x228
110 #define SMMU_TRANSLATION_ENABLE_1 0x22c
111 #define SMMU_TRANSLATION_ENABLE_2 0x230
113 #define SMMU_AFI_ASID 0x238 /* PCIE */
114 #define SMMU_AVPC_ASID 0x23c /* AVP */
115 #define SMMU_DC_ASID 0x240 /* Display controller */
116 #define SMMU_DCB_ASID 0x244 /* Display controller B */
117 #define SMMU_EPP_ASID 0x248 /* Encoder pre-processor */
118 #define SMMU_G2_ASID 0x24c /* 2D engine */
119 #define SMMU_HC_ASID 0x250 /* Host1x */
120 #define SMMU_HDA_ASID 0x254 /* High-def audio */
121 #define SMMU_ISP_ASID 0x258 /* Image signal processor */
122 #define SMMU_MPE_ASID 0x264 /* MPEG encoder */
123 #define SMMU_NV_ASID 0x268 /* (3D) */
124 #define SMMU_NV2_ASID 0x26c /* (3D) */
125 #define SMMU_PPCS_ASID 0x270 /* AHB */
126 #define SMMU_SATA_ASID 0x278 /* SATA */
127 #define SMMU_VDE_ASID 0x27c /* Video decoder */
128 #define SMMU_VI_ASID 0x280 /* Video input */
130 #define SMMU_PDE_NEXT_SHIFT 28
132 #define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
133 #define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
134 #define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
135 #define SMMU_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */
136 #define SMMU_TLB_FLUSH_VA(iova, which) \
137 ((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \
138 SMMU_TLB_FLUSH_VA_##which##__SHIFT) | \
139 SMMU_TLB_FLUSH_VA_MATCH_##which)
140 #define SMMU_PTB_ASID_CUR(n) \
141 ((n) << SMMU_PTB_ASID_CURRENT_SHIFT)
142 #define SMMU_TLB_FLUSH_ASID_MATCH_disable \
143 (SMMU_TLB_FLUSH_ASID_MATCH_DISABLE << \
144 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
145 #define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE \
146 (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE << \
147 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
149 #define SMMU_PAGE_SHIFT 12
150 #define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
151 #define SMMU_PAGE_MASK ((1 << SMMU_PAGE_SHIFT) - 1)
153 #define SMMU_PDIR_COUNT 1024
154 #define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
155 #define SMMU_PTBL_COUNT 1024
156 #define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT)
157 #define SMMU_PDIR_SHIFT 12
158 #define SMMU_PDE_SHIFT 12
159 #define SMMU_PTE_SHIFT 12
160 #define SMMU_PFN_MASK 0x000fffff
162 #define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
163 #define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
164 #define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22)
166 #define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
167 #define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
168 #define _NONSECURE (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT)
169 #define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT)
170 #define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE)
172 #define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE)
174 #define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
175 #define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT)
176 #define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR)
178 #define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
179 #define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
181 #define SMMU_MK_PDIR(page, attr) \
182 ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
183 #define SMMU_MK_PDE(page, attr) \
184 (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
185 #define SMMU_EX_PTBL_PAGE(pde) \
186 pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
187 #define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr))
189 #define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31))
190 #define SMMU_ASID_DISABLE 0
191 #define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
193 #define NUM_SMMU_REG_BANKS 3
195 #define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1)
196 #define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0)
197 #define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
198 #define __smmu_client_disable_hwgrp(c) __smmu_client_set_hwgrp(c, 0, 0)
200 #define HWGRP_INIT(client) [HWGRP_##client] = SMMU_##client##_ASID
202 static const u32 smmu_hwgrp_asid_reg[] = {
220 #define HWGRP_ASID_REG(x) (smmu_hwgrp_asid_reg[x])
223 * Per client for address space
227 struct list_head list;
236 struct smmu_device *smmu; /* back pointer to container */
238 spinlock_t lock; /* for pagetable */
239 struct page *pdir_page;
240 unsigned long pdir_attr;
241 unsigned long pde_attr;
242 unsigned long pte_attr;
243 unsigned int *pte_count;
245 struct list_head client;
246 spinlock_t client_lock; /* for client list */
249 struct smmu_debugfs_info {
250 struct smmu_device *smmu;
256 * Per SMMU device - IOMMU device
259 void __iomem *regs[NUM_SMMU_REG_BANKS];
260 unsigned long iovmm_base; /* remappable base address */
261 unsigned long page_count; /* total remappable size */
265 struct page *avp_vector_page; /* dummy page shared by all AS's */
268 * Register image savers for suspend/resume
270 unsigned long translation_enable_0;
271 unsigned long translation_enable_1;
272 unsigned long translation_enable_2;
273 unsigned long asid_security;
275 struct dentry *debugfs_root;
276 struct smmu_debugfs_info *debugfs_info;
278 struct device_node *ahb;
281 struct smmu_as as[0]; /* Run-time allocated array */
284 static struct smmu_device *smmu_handle; /* unique for a system */
287 * SMMU register accessors
289 static inline u32 smmu_read(struct smmu_device *smmu, size_t offs)
293 return readl(smmu->regs[0] + offs - 0x10);
294 BUG_ON(offs < 0x1f0);
296 return readl(smmu->regs[1] + offs - 0x1f0);
297 BUG_ON(offs < 0x228);
299 return readl(smmu->regs[2] + offs - 0x228);
303 static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
307 writel(val, smmu->regs[0] + offs - 0x10);
310 BUG_ON(offs < 0x1f0);
312 writel(val, smmu->regs[1] + offs - 0x1f0);
315 BUG_ON(offs < 0x228);
317 writel(val, smmu->regs[2] + offs - 0x228);
323 #define VA_PAGE_TO_PA(va, page) \
324 (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK))
326 #define FLUSH_CPU_DCACHE(va, page, size) \
328 unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \
329 __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
330 outer_flush_range(_pa_, _pa_+(size_t)(size)); \
334 * Any interaction between any block on PPSB and a block on APB or AHB
335 * must have these read-back barriers to ensure the APB/AHB bus
336 * transaction is complete before initiating activity on the PPSB
339 #define FLUSH_SMMU_REGS(smmu) smmu_read(smmu, SMMU_CONFIG)
341 #define smmu_client_hwgrp(c) (u32)((c)->dev->platform_data)
343 static int __smmu_client_set_hwgrp(struct smmu_client *c,
344 unsigned long map, int on)
347 struct smmu_as *as = c->as;
348 u32 val, offs, mask = SMMU_ASID_ENABLE(as->asid);
349 struct smmu_device *smmu = as->smmu;
355 map = smmu_client_hwgrp(c);
357 for_each_set_bit(i, &map, HWGRP_COUNT) {
358 offs = HWGRP_ASID_REG(i);
359 val = smmu_read(smmu, offs);
361 if (WARN_ON(val & mask))
365 WARN_ON((val & mask) == mask);
368 smmu_write(smmu, val, offs);
370 FLUSH_SMMU_REGS(smmu);
375 for_each_set_bit(i, &map, HWGRP_COUNT) {
376 offs = HWGRP_ASID_REG(i);
377 val = smmu_read(smmu, offs);
379 smmu_write(smmu, val, offs);
384 static int smmu_client_set_hwgrp(struct smmu_client *c, u32 map, int on)
388 struct smmu_as *as = c->as;
389 struct smmu_device *smmu = as->smmu;
391 spin_lock_irqsave(&smmu->lock, flags);
392 val = __smmu_client_set_hwgrp(c, map, on);
393 spin_unlock_irqrestore(&smmu->lock, flags);
398 * Flush all TLB entries and all PTC entries
399 * Caller must lock smmu
401 static void smmu_flush_regs(struct smmu_device *smmu, int enable)
405 smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
406 FLUSH_SMMU_REGS(smmu);
407 val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
408 SMMU_TLB_FLUSH_ASID_MATCH_disable;
409 smmu_write(smmu, val, SMMU_TLB_FLUSH);
412 smmu_write(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
413 FLUSH_SMMU_REGS(smmu);
416 static int smmu_setup_regs(struct smmu_device *smmu)
421 for (i = 0; i < smmu->num_as; i++) {
422 struct smmu_as *as = &smmu->as[i];
423 struct smmu_client *c;
425 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
426 val = as->pdir_page ?
427 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) :
428 SMMU_PTB_DATA_RESET_VAL;
429 smmu_write(smmu, val, SMMU_PTB_DATA);
431 list_for_each_entry(c, &as->client, list)
432 __smmu_client_set_hwgrp(c, c->hwgrp, 1);
435 smmu_write(smmu, smmu->translation_enable_0, SMMU_TRANSLATION_ENABLE_0);
436 smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1);
437 smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2);
438 smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY);
439 smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_TLB));
440 smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_PTC));
442 smmu_flush_regs(smmu, 1);
444 return tegra_ahb_enable_smmu(smmu->ahb);
447 static void flush_ptc_and_tlb(struct smmu_device *smmu,
448 struct smmu_as *as, dma_addr_t iova,
449 unsigned long *pte, struct page *page, int is_pde)
452 unsigned long tlb_flush_va = is_pde
453 ? SMMU_TLB_FLUSH_VA(iova, SECTION)
454 : SMMU_TLB_FLUSH_VA(iova, GROUP);
456 val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pte, page);
457 smmu_write(smmu, val, SMMU_PTC_FLUSH);
458 FLUSH_SMMU_REGS(smmu);
460 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
461 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
462 smmu_write(smmu, val, SMMU_TLB_FLUSH);
463 FLUSH_SMMU_REGS(smmu);
466 static void free_ptbl(struct smmu_as *as, dma_addr_t iova)
468 unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
469 unsigned long *pdir = (unsigned long *)page_address(as->pdir_page);
471 if (pdir[pdn] != _PDE_VACANT(pdn)) {
472 dev_dbg(as->smmu->dev, "pdn: %lx\n", pdn);
474 ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
475 __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
476 pdir[pdn] = _PDE_VACANT(pdn);
477 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
478 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
483 static void free_pdir(struct smmu_as *as)
487 struct device *dev = as->smmu->dev;
492 addr = as->smmu->iovmm_base;
493 count = as->smmu->page_count;
494 while (count-- > 0) {
496 addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
498 ClearPageReserved(as->pdir_page);
499 __free_page(as->pdir_page);
500 as->pdir_page = NULL;
501 devm_kfree(dev, as->pte_count);
502 as->pte_count = NULL;
506 * Maps PTBL for given iova and returns the PTE address
507 * Caller must unmap the mapped PTBL returned in *ptbl_page_p
509 static unsigned long *locate_pte(struct smmu_as *as,
510 dma_addr_t iova, bool allocate,
511 struct page **ptbl_page_p,
512 unsigned int **count)
514 unsigned long ptn = SMMU_ADDR_TO_PFN(iova);
515 unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
516 unsigned long *pdir = page_address(as->pdir_page);
519 if (pdir[pdn] != _PDE_VACANT(pdn)) {
520 /* Mapped entry table already exists */
521 *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
522 ptbl = page_address(*ptbl_page_p);
523 } else if (!allocate) {
527 unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
529 /* Vacant - allocate a new page table */
530 dev_dbg(as->smmu->dev, "New PTBL pdn: %lx\n", pdn);
532 *ptbl_page_p = alloc_page(GFP_ATOMIC);
534 dev_err(as->smmu->dev,
535 "failed to allocate smmu_device page table\n");
538 SetPageReserved(*ptbl_page_p);
539 ptbl = (unsigned long *)page_address(*ptbl_page_p);
540 for (pn = 0; pn < SMMU_PTBL_COUNT;
541 pn++, addr += SMMU_PAGE_SIZE) {
542 ptbl[pn] = _PTE_VACANT(addr);
544 FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE);
545 pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p,
546 as->pde_attr | _PDE_NEXT);
547 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
548 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
551 *count = &as->pte_count[pdn];
553 return &ptbl[ptn % SMMU_PTBL_COUNT];
556 #ifdef CONFIG_SMMU_SIG_DEBUG
557 static void put_signature(struct smmu_as *as,
558 dma_addr_t iova, unsigned long pfn)
561 unsigned long *vaddr;
563 page = pfn_to_page(pfn);
564 vaddr = page_address(page);
569 vaddr[1] = pfn << PAGE_SHIFT;
570 FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2);
573 static inline void put_signature(struct smmu_as *as,
574 unsigned long addr, unsigned long pfn)
580 * Caller must not hold as->lock
582 static int alloc_pdir(struct smmu_as *as)
584 unsigned long *pdir, flags;
587 struct smmu_device *smmu = as->smmu;
592 * do the allocation, then grab as->lock
594 cnt = devm_kzalloc(smmu->dev,
595 sizeof(cnt[0]) * SMMU_PDIR_COUNT,
597 page = alloc_page(GFP_KERNEL | __GFP_DMA);
599 spin_lock_irqsave(&as->lock, flags);
602 /* We raced, free the redundant */
608 dev_err(smmu->dev, "failed to allocate at %s\n", __func__);
613 as->pdir_page = page;
616 SetPageReserved(as->pdir_page);
617 pdir = page_address(as->pdir_page);
619 for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++)
620 pdir[pdn] = _PDE_VACANT(pdn);
621 FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE);
622 val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pdir, as->pdir_page);
623 smmu_write(smmu, val, SMMU_PTC_FLUSH);
624 FLUSH_SMMU_REGS(as->smmu);
625 val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
626 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
627 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
628 smmu_write(smmu, val, SMMU_TLB_FLUSH);
629 FLUSH_SMMU_REGS(as->smmu);
631 spin_unlock_irqrestore(&as->lock, flags);
636 spin_unlock_irqrestore(&as->lock, flags);
638 devm_kfree(smmu->dev, cnt);
644 static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
650 pte = locate_pte(as, iova, false, &page, &count);
654 if (WARN_ON(*pte == _PTE_VACANT(iova)))
657 *pte = _PTE_VACANT(iova);
658 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
659 flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
662 smmu_flush_regs(as->smmu, 0);
666 static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova,
669 struct smmu_device *smmu = as->smmu;
674 pte = locate_pte(as, iova, true, &page, &count);
678 if (*pte == _PTE_VACANT(iova))
680 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
681 if (unlikely((*pte == _PTE_VACANT(iova))))
683 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
684 flush_ptc_and_tlb(smmu, as, iova, pte, page, 0);
685 put_signature(as, iova, pfn);
688 static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova,
689 phys_addr_t pa, size_t bytes, int prot)
691 struct smmu_as *as = domain->priv;
692 unsigned long pfn = __phys_to_pfn(pa);
695 dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa);
700 spin_lock_irqsave(&as->lock, flags);
701 __smmu_iommu_map_pfn(as, iova, pfn);
702 spin_unlock_irqrestore(&as->lock, flags);
706 static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
709 struct smmu_as *as = domain->priv;
712 dev_dbg(as->smmu->dev, "[%d] %08lx\n", as->asid, iova);
714 spin_lock_irqsave(&as->lock, flags);
715 __smmu_iommu_unmap(as, iova);
716 spin_unlock_irqrestore(&as->lock, flags);
717 return SMMU_PAGE_SIZE;
720 static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
723 struct smmu_as *as = domain->priv;
730 spin_lock_irqsave(&as->lock, flags);
732 pte = locate_pte(as, iova, true, &page, &count);
733 pfn = *pte & SMMU_PFN_MASK;
734 WARN_ON(!pfn_valid(pfn));
735 dev_dbg(as->smmu->dev,
736 "iova:%08lx pfn:%08lx asid:%d\n", iova, pfn, as->asid);
738 spin_unlock_irqrestore(&as->lock, flags);
739 return PFN_PHYS(pfn);
742 static int smmu_iommu_domain_has_cap(struct iommu_domain *domain,
748 static int smmu_iommu_attach_dev(struct iommu_domain *domain,
751 struct smmu_as *as = domain->priv;
752 struct smmu_device *smmu = as->smmu;
753 struct smmu_client *client, *c;
757 client = devm_kzalloc(smmu->dev, sizeof(*c), GFP_KERNEL);
762 map = (unsigned long)dev->platform_data;
766 err = smmu_client_enable_hwgrp(client, map);
770 spin_lock(&as->client_lock);
771 list_for_each_entry(c, &as->client, list) {
774 "%s is already attached\n", dev_name(c->dev));
779 list_add(&client->list, &as->client);
780 spin_unlock(&as->client_lock);
783 * Reserve "page zero" for AVP vectors using a common dummy
786 if (map & HWG_AVPC) {
789 page = as->smmu->avp_vector_page;
790 __smmu_iommu_map_pfn(as, 0, page_to_pfn(page));
792 pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
795 dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
799 smmu_client_disable_hwgrp(client);
800 spin_unlock(&as->client_lock);
802 devm_kfree(smmu->dev, client);
806 static void smmu_iommu_detach_dev(struct iommu_domain *domain,
809 struct smmu_as *as = domain->priv;
810 struct smmu_device *smmu = as->smmu;
811 struct smmu_client *c;
813 spin_lock(&as->client_lock);
815 list_for_each_entry(c, &as->client, list) {
817 smmu_client_disable_hwgrp(c);
819 devm_kfree(smmu->dev, c);
822 "%s is detached\n", dev_name(c->dev));
826 dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev));
828 spin_unlock(&as->client_lock);
831 static int smmu_iommu_domain_init(struct iommu_domain *domain)
833 int i, err = -ENODEV;
836 struct smmu_device *smmu = smmu_handle;
838 /* Look for a free AS with lock held */
839 for (i = 0; i < smmu->num_as; i++) {
841 if (!as->pdir_page) {
842 err = alloc_pdir(as);
849 if (i == smmu->num_as)
850 dev_err(smmu->dev, "no free AS\n");
854 spin_lock_irqsave(&smmu->lock, flags);
856 /* Update PDIR register */
857 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
859 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA);
860 FLUSH_SMMU_REGS(smmu);
862 spin_unlock_irqrestore(&smmu->lock, flags);
866 domain->geometry.aperture_start = smmu->iovmm_base;
867 domain->geometry.aperture_end = smmu->iovmm_base +
868 smmu->page_count * SMMU_PAGE_SIZE - 1;
869 domain->geometry.force_aperture = true;
871 dev_dbg(smmu->dev, "smmu_as@%p\n", as);
876 static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
878 struct smmu_as *as = domain->priv;
879 struct smmu_device *smmu = as->smmu;
882 spin_lock_irqsave(&as->lock, flags);
885 spin_lock(&smmu->lock);
886 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
887 smmu_write(smmu, SMMU_PTB_DATA_RESET_VAL, SMMU_PTB_DATA);
888 FLUSH_SMMU_REGS(smmu);
889 spin_unlock(&smmu->lock);
894 if (!list_empty(&as->client)) {
895 struct smmu_client *c;
897 list_for_each_entry(c, &as->client, list)
898 smmu_iommu_detach_dev(domain, c->dev);
901 spin_unlock_irqrestore(&as->lock, flags);
904 dev_dbg(smmu->dev, "smmu_as@%p\n", as);
907 static struct iommu_ops smmu_iommu_ops = {
908 .domain_init = smmu_iommu_domain_init,
909 .domain_destroy = smmu_iommu_domain_destroy,
910 .attach_dev = smmu_iommu_attach_dev,
911 .detach_dev = smmu_iommu_detach_dev,
912 .map = smmu_iommu_map,
913 .unmap = smmu_iommu_unmap,
914 .iova_to_phys = smmu_iommu_iova_to_phys,
915 .domain_has_cap = smmu_iommu_domain_has_cap,
916 .pgsize_bitmap = SMMU_IOMMU_PGSIZES,
919 /* Should be in the order of enum */
920 static const char * const smmu_debugfs_mc[] = { "mc", };
921 static const char * const smmu_debugfs_cache[] = { "tlb", "ptc", };
923 static ssize_t smmu_debugfs_stats_write(struct file *file,
924 const char __user *buffer,
925 size_t count, loff_t *pos)
927 struct smmu_debugfs_info *info;
928 struct smmu_device *smmu;
936 const char * const command[] = {
941 char str[] = "reset";
945 count = min_t(size_t, count, sizeof(str));
946 if (copy_from_user(str, buffer, count))
949 for (i = 0; i < ARRAY_SIZE(command); i++)
950 if (strncmp(str, command[i],
951 strlen(command[i])) == 0)
954 if (i == ARRAY_SIZE(command))
957 dent = file->f_dentry;
958 info = dent->d_inode->i_private;
961 offs = SMMU_CACHE_CONFIG(info->cache);
962 val = smmu_read(smmu, offs);
965 val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE;
966 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
967 smmu_write(smmu, val, offs);
970 val |= SMMU_CACHE_CONFIG_STATS_ENABLE;
971 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
972 smmu_write(smmu, val, offs);
975 val |= SMMU_CACHE_CONFIG_STATS_TEST;
976 smmu_write(smmu, val, offs);
977 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
978 smmu_write(smmu, val, offs);
985 dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__,
986 val, smmu_read(smmu, offs), offs);
991 static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
993 struct smmu_debugfs_info *info;
994 struct smmu_device *smmu;
997 const char * const stats[] = { "hit", "miss", };
999 dent = d_find_alias(s->private);
1000 info = dent->d_inode->i_private;
1003 for (i = 0; i < ARRAY_SIZE(stats); i++) {
1007 offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i);
1008 val = smmu_read(smmu, offs);
1009 seq_printf(s, "%s:%08x ", stats[i], val);
1011 dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__,
1012 stats[i], val, offs);
1014 seq_printf(s, "\n");
1019 static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
1021 return single_open(file, smmu_debugfs_stats_show, inode);
1024 static const struct file_operations smmu_debugfs_stats_fops = {
1025 .open = smmu_debugfs_stats_open,
1027 .llseek = seq_lseek,
1028 .release = single_release,
1029 .write = smmu_debugfs_stats_write,
1032 static void smmu_debugfs_delete(struct smmu_device *smmu)
1034 debugfs_remove_recursive(smmu->debugfs_root);
1035 kfree(smmu->debugfs_info);
1038 static void smmu_debugfs_create(struct smmu_device *smmu)
1042 struct dentry *root;
1044 bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) *
1045 sizeof(*smmu->debugfs_info);
1046 smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL);
1047 if (!smmu->debugfs_info)
1050 root = debugfs_create_dir(dev_name(smmu->dev), NULL);
1053 smmu->debugfs_root = root;
1055 for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) {
1059 mc = debugfs_create_dir(smmu_debugfs_mc[i], root);
1063 for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) {
1064 struct dentry *cache;
1065 struct smmu_debugfs_info *info;
1067 info = smmu->debugfs_info;
1068 info += i * ARRAY_SIZE(smmu_debugfs_mc) + j;
1073 cache = debugfs_create_file(smmu_debugfs_cache[j],
1074 S_IWUGO | S_IRUGO, mc,
1076 &smmu_debugfs_stats_fops);
1085 smmu_debugfs_delete(smmu);
1088 static int tegra_smmu_suspend(struct device *dev)
1090 struct smmu_device *smmu = dev_get_drvdata(dev);
1092 smmu->translation_enable_0 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_0);
1093 smmu->translation_enable_1 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_1);
1094 smmu->translation_enable_2 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_2);
1095 smmu->asid_security = smmu_read(smmu, SMMU_ASID_SECURITY);
1099 static int tegra_smmu_resume(struct device *dev)
1101 struct smmu_device *smmu = dev_get_drvdata(dev);
1102 unsigned long flags;
1105 spin_lock_irqsave(&smmu->lock, flags);
1106 err = smmu_setup_regs(smmu);
1107 spin_unlock_irqrestore(&smmu->lock, flags);
1111 static int tegra_smmu_probe(struct platform_device *pdev)
1113 struct smmu_device *smmu;
1114 struct device *dev = &pdev->dev;
1115 int i, asids, err = 0;
1116 dma_addr_t uninitialized_var(base);
1117 size_t bytes, uninitialized_var(size);
1122 BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
1124 if (of_property_read_u32(dev->of_node, "nvidia,#asids", &asids))
1127 bytes = sizeof(*smmu) + asids * sizeof(*smmu->as);
1128 smmu = devm_kzalloc(dev, bytes, GFP_KERNEL);
1130 dev_err(dev, "failed to allocate smmu_device\n");
1134 for (i = 0; i < ARRAY_SIZE(smmu->regs); i++) {
1135 struct resource *res;
1137 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1140 smmu->regs[i] = devm_request_and_ioremap(&pdev->dev, res);
1145 err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size);
1149 if (size & SMMU_PAGE_MASK)
1152 size >>= SMMU_PAGE_SHIFT;
1156 smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0);
1161 smmu->num_as = asids;
1162 smmu->iovmm_base = base;
1163 smmu->page_count = size;
1165 smmu->translation_enable_0 = ~0;
1166 smmu->translation_enable_1 = ~0;
1167 smmu->translation_enable_2 = ~0;
1168 smmu->asid_security = 0;
1170 for (i = 0; i < smmu->num_as; i++) {
1171 struct smmu_as *as = &smmu->as[i];
1175 as->pdir_attr = _PDIR_ATTR;
1176 as->pde_attr = _PDE_ATTR;
1177 as->pte_attr = _PTE_ATTR;
1179 spin_lock_init(&as->lock);
1180 INIT_LIST_HEAD(&as->client);
1182 spin_lock_init(&smmu->lock);
1183 err = smmu_setup_regs(smmu);
1186 platform_set_drvdata(pdev, smmu);
1188 smmu->avp_vector_page = alloc_page(GFP_KERNEL);
1189 if (!smmu->avp_vector_page)
1192 smmu_debugfs_create(smmu);
1197 static int tegra_smmu_remove(struct platform_device *pdev)
1199 struct smmu_device *smmu = platform_get_drvdata(pdev);
1202 smmu_debugfs_delete(smmu);
1204 smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
1205 for (i = 0; i < smmu->num_as; i++)
1206 free_pdir(&smmu->as[i]);
1207 __free_page(smmu->avp_vector_page);
1212 const struct dev_pm_ops tegra_smmu_pm_ops = {
1213 .suspend = tegra_smmu_suspend,
1214 .resume = tegra_smmu_resume,
1218 static struct of_device_id tegra_smmu_of_match[] __devinitdata = {
1219 { .compatible = "nvidia,tegra30-smmu", },
1222 MODULE_DEVICE_TABLE(of, tegra_smmu_of_match);
1225 static struct platform_driver tegra_smmu_driver = {
1226 .probe = tegra_smmu_probe,
1227 .remove = tegra_smmu_remove,
1229 .owner = THIS_MODULE,
1230 .name = "tegra-smmu",
1231 .pm = &tegra_smmu_pm_ops,
1232 .of_match_table = of_match_ptr(tegra_smmu_of_match),
1236 static int __devinit tegra_smmu_init(void)
1238 bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
1239 return platform_driver_register(&tegra_smmu_driver);
1242 static void __exit tegra_smmu_exit(void)
1244 platform_driver_unregister(&tegra_smmu_driver);
1247 subsys_initcall(tegra_smmu_init);
1248 module_exit(tegra_smmu_exit);
1250 MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30");
1251 MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
1252 MODULE_ALIAS("platform:tegra-smmu");
1253 MODULE_LICENSE("GPL v2");