2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/clk.h>
15 #include <linux/err.h>
17 #include <linux/errno.h>
18 #include <linux/memblock.h>
19 #include <linux/export.h>
20 #include <linux/module.h>
22 #include <asm/cacheflush.h>
23 #include <asm/pgtable.h>
25 #include <linux/rockchip-iovmm.h>
26 #include <linux/rockchip/grf.h>
27 #include <linux/rockchip/cpu.h>
28 #include <linux/rockchip/iomap.h>
29 #include <linux/device.h>
30 #include "rockchip-iommu.h"
32 /* We does not consider super section mapping (16MB) */
33 #define SPAGE_ORDER 12
34 #define SPAGE_SIZE (1 << SPAGE_ORDER)
35 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
37 static void __iomem *rk312x_vop_mmu_base;
39 enum iommu_entry_flags {
40 IOMMU_FLAGS_PRESENT = 0x01,
41 IOMMU_FLAGS_READ_PERMISSION = 0x02,
42 IOMMU_FLAGS_WRITE_PERMISSION = 0x04,
43 IOMMU_FLAGS_OVERRIDE_CACHE = 0x8,
44 IOMMU_FLAGS_WRITE_CACHEABLE = 0x10,
45 IOMMU_FLAGS_WRITE_ALLOCATE = 0x20,
46 IOMMU_FLAGS_WRITE_BUFFERABLE = 0x40,
47 IOMMU_FLAGS_READ_CACHEABLE = 0x80,
48 IOMMU_FLAGS_READ_ALLOCATE = 0x100,
49 IOMMU_FLAGS_MASK = 0x1FF,
52 #define rockchip_lv1ent_fault(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 0)
53 #define rockchip_lv1ent_page(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 1)
54 #define rockchip_lv2ent_fault(pent) ((*(pent) & IOMMU_FLAGS_PRESENT) == 0)
55 #define rockchip_spage_phys(pent) (*(pent) & SPAGE_MASK)
56 #define rockchip_spage_offs(iova) ((iova) & 0x0FFF)
58 #define rockchip_lv1ent_offset(iova) (((iova)>>22) & 0x03FF)
59 #define rockchip_lv2ent_offset(iova) (((iova)>>12) & 0x03FF)
61 #define NUM_LV1ENTRIES 1024
62 #define NUM_LV2ENTRIES 1024
64 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
66 #define rockchip_lv2table_base(sent) (*(sent) & 0xFFFFFFFE)
68 #define rockchip_mk_lv1ent_page(pa) ((pa) | IOMMU_FLAGS_PRESENT)
69 /*write and read permission for level2 page default*/
70 #define rockchip_mk_lv2ent_spage(pa) ((pa) | IOMMU_FLAGS_PRESENT | \
71 IOMMU_FLAGS_READ_PERMISSION | \
72 IOMMU_FLAGS_WRITE_PERMISSION)
74 #define IOMMU_REG_POLL_COUNT_FAST 1000
77 * MMU register numbers
78 * Used in the register read/write routines.
79 * See the hardware documentation for more information about each register
82 /**< Current Page Directory Pointer */
83 IOMMU_REGISTER_DTE_ADDR = 0x0000,
84 /**< Status of the MMU */
85 IOMMU_REGISTER_STATUS = 0x0004,
86 /**< Command register, used to control the MMU */
87 IOMMU_REGISTER_COMMAND = 0x0008,
88 /**< Logical address of the last page fault */
89 IOMMU_REGISTER_PAGE_FAULT_ADDR = 0x000C,
90 /**< Used to invalidate the mapping of a single page from the MMU */
91 IOMMU_REGISTER_ZAP_ONE_LINE = 0x010,
92 /**< Raw interrupt status, all interrupts visible */
93 IOMMU_REGISTER_INT_RAWSTAT = 0x0014,
94 /**< Indicate to the MMU that the interrupt has been received */
95 IOMMU_REGISTER_INT_CLEAR = 0x0018,
96 /**< Enable/disable types of interrupts */
97 IOMMU_REGISTER_INT_MASK = 0x001C,
98 /**< Interrupt status based on the mask */
99 IOMMU_REGISTER_INT_STATUS = 0x0020,
100 IOMMU_REGISTER_AUTO_GATING = 0x0024
104 /**< Enable paging (memory translation) */
105 IOMMU_COMMAND_ENABLE_PAGING = 0x00,
106 /**< Disable paging (memory translation) */
107 IOMMU_COMMAND_DISABLE_PAGING = 0x01,
108 /**< Enable stall on page fault */
109 IOMMU_COMMAND_ENABLE_STALL = 0x02,
110 /**< Disable stall on page fault */
111 IOMMU_COMMAND_DISABLE_STALL = 0x03,
112 /**< Zap the entire page table cache */
113 IOMMU_COMMAND_ZAP_CACHE = 0x04,
114 /**< Page fault processed */
115 IOMMU_COMMAND_PAGE_FAULT_DONE = 0x05,
116 /**< Reset the MMU back to power-on settings */
117 IOMMU_COMMAND_HARD_RESET = 0x06
121 * MMU interrupt register bits
122 * Each cause of the interrupt is reported
123 * through the (raw) interrupt status registers.
124 * Multiple interrupts can be pending, so multiple bits
125 * can be set at once.
127 enum iommu_interrupt {
128 IOMMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
129 IOMMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
132 enum iommu_status_bits {
133 IOMMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
134 IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
135 IOMMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
136 IOMMU_STATUS_BIT_IDLE = 1 << 3,
137 IOMMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
138 IOMMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
139 IOMMU_STATUS_BIT_STALL_NOT_ACTIVE = 1 << 31,
143 * Size of an MMU page in bytes
145 #define IOMMU_PAGE_SIZE 0x1000
148 * Size of the address space referenced by a page table page
150 #define IOMMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
153 * Page directory index from address
154 * Calculates the page directory index from the given address
156 #define IOMMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
159 * Page table index from address
160 * Calculates the page table index from the given address
162 #define IOMMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
165 * Extract the memory address from an PDE/PTE entry
167 #define IOMMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
169 #define INVALID_PAGE ((u32)(~0))
171 static struct kmem_cache *lv2table_kmem_cache;
173 static unsigned long *rockchip_section_entry(unsigned long *pgtable, unsigned long iova)
175 return pgtable + rockchip_lv1ent_offset(iova);
178 static unsigned long *rockchip_page_entry(unsigned long *sent, unsigned long iova)
180 return (unsigned long *)__va(rockchip_lv2table_base(sent)) +
181 rockchip_lv2ent_offset(iova);
184 struct rk_iommu_domain {
185 struct list_head clients; /* list of iommu_drvdata.node */
186 unsigned long *pgtable; /* lv1 page table, 4KB */
187 short *lv2entcnt; /* free lv2 entry counter for each section */
188 spinlock_t lock; /* lock for this structure */
189 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
192 static bool rockchip_set_iommu_active(struct iommu_drvdata *data)
194 /* return true if the IOMMU was not active previously
195 and it needs to be initialized */
196 return ++data->activations == 1;
199 static bool rockchip_set_iommu_inactive(struct iommu_drvdata *data)
201 /* return true if the IOMMU is needed to be disabled */
202 BUG_ON(data->activations < 1);
203 return --data->activations == 0;
206 static bool rockchip_is_iommu_active(struct iommu_drvdata *data)
208 return data->activations > 0;
211 static void rockchip_iommu_disable_stall(void __iomem *base)
216 if (base != rk312x_vop_mmu_base) {
217 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
219 goto skip_vop_mmu_disable;
222 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED)) {
226 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
227 pr_info("Aborting MMU disable stall request since it is in pagefault state.\n");
231 if (!(mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE)) {
235 skip_vop_mmu_disable:
236 __raw_writel(IOMMU_COMMAND_DISABLE_STALL, base + IOMMU_REGISTER_COMMAND);
238 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
241 if (base != rk312x_vop_mmu_base) {
242 status = __raw_readl(base + IOMMU_REGISTER_STATUS);
250 if (0 == (status & IOMMU_STATUS_BIT_STALL_ACTIVE))
253 if (status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
256 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED))
260 if (IOMMU_REG_POLL_COUNT_FAST == i) {
261 pr_info("Disable stall request failed, MMU status is 0x%08X\n",
262 __raw_readl(base + IOMMU_REGISTER_STATUS));
266 static bool rockchip_iommu_enable_stall(void __iomem *base)
272 if (base != rk312x_vop_mmu_base) {
273 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
275 goto skip_vop_mmu_enable;
278 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED)) {
282 if (mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE){
283 pr_info("MMU stall already enabled\n");
287 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
288 pr_info("Aborting MMU stall request since it is in pagefault state. mmu status is 0x%08x\n",
294 __raw_writel(IOMMU_COMMAND_ENABLE_STALL, base + IOMMU_REGISTER_COMMAND);
296 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
297 if (base != rk312x_vop_mmu_base) {
298 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
306 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
309 if ((mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE) &&
310 (0 == (mmu_status & IOMMU_STATUS_BIT_STALL_NOT_ACTIVE)))
313 if (0 == (mmu_status & (IOMMU_STATUS_BIT_PAGING_ENABLED)))
317 if (IOMMU_REG_POLL_COUNT_FAST == i) {
318 pr_info("Enable stall request failed, MMU status is 0x%08X\n",
319 __raw_readl(base + IOMMU_REGISTER_STATUS));
323 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
324 pr_info("Aborting MMU stall request since it has a pagefault.\n");
331 static bool rockchip_iommu_enable_paging(void __iomem *base)
335 __raw_writel(IOMMU_COMMAND_ENABLE_PAGING,
336 base + IOMMU_REGISTER_COMMAND);
338 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
339 if (base != rk312x_vop_mmu_base) {
340 if (__raw_readl(base + IOMMU_REGISTER_STATUS) &
341 IOMMU_STATUS_BIT_PAGING_ENABLED)
351 if (IOMMU_REG_POLL_COUNT_FAST == i) {
352 pr_info("Enable paging request failed, MMU status is 0x%08X\n",
353 __raw_readl(base + IOMMU_REGISTER_STATUS));
360 static bool rockchip_iommu_disable_paging(void __iomem *base)
364 __raw_writel(IOMMU_COMMAND_DISABLE_PAGING,
365 base + IOMMU_REGISTER_COMMAND);
367 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
368 if (base != rk312x_vop_mmu_base) {
369 if (!(__raw_readl(base + IOMMU_REGISTER_STATUS) &
370 IOMMU_STATUS_BIT_PAGING_ENABLED))
380 if (IOMMU_REG_POLL_COUNT_FAST == i) {
381 pr_info("Disable paging request failed, MMU status is 0x%08X\n",
382 __raw_readl(base + IOMMU_REGISTER_STATUS));
389 static void rockchip_iommu_page_fault_done(void __iomem *base, const char *dbgname)
391 pr_info("MMU: %s: Leaving page fault mode\n",
393 __raw_writel(IOMMU_COMMAND_PAGE_FAULT_DONE,
394 base + IOMMU_REGISTER_COMMAND);
397 static void rockchip_iommu_zap_tlb_without_stall (void __iomem *base)
399 __raw_writel(IOMMU_COMMAND_ZAP_CACHE, base + IOMMU_REGISTER_COMMAND);
402 static int rockchip_iommu_zap_tlb(void __iomem *base)
404 if (!rockchip_iommu_enable_stall(base)) {
405 pr_err("%s failed\n", __func__);
409 __raw_writel(IOMMU_COMMAND_ZAP_CACHE, base + IOMMU_REGISTER_COMMAND);
411 rockchip_iommu_disable_stall(base);
416 static inline bool rockchip_iommu_raw_reset(void __iomem *base)
420 unsigned int grf_value;
422 __raw_writel(0xCAFEBABE, base + IOMMU_REGISTER_DTE_ADDR);
424 if (base != rk312x_vop_mmu_base) {
425 ret = __raw_readl(base + IOMMU_REGISTER_DTE_ADDR);
426 if (!(0xCAFEB000 == ret)) {
427 grf_value = readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1);
428 pr_info("error when %s. grf = 0x%08x\n", __func__, grf_value);
432 __raw_writel(IOMMU_COMMAND_HARD_RESET,
433 base + IOMMU_REGISTER_COMMAND);
435 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
436 if (base != rk312x_vop_mmu_base) {
437 if (__raw_readl(base + IOMMU_REGISTER_DTE_ADDR) == 0)
447 if (IOMMU_REG_POLL_COUNT_FAST == i) {
448 pr_info("%s,Reset request failed, MMU status is 0x%08X\n",
449 __func__, __raw_readl(base + IOMMU_REGISTER_DTE_ADDR));
455 static void rockchip_iommu_set_ptbase(void __iomem *base, unsigned long pgd)
457 __raw_writel(pgd, base + IOMMU_REGISTER_DTE_ADDR);
460 static bool rockchip_iommu_reset(void __iomem *base, const char *dbgname)
464 ret = rockchip_iommu_raw_reset(base);
466 pr_info("(%s), %s failed\n", dbgname, __func__);
470 if (base != rk312x_vop_mmu_base)
471 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
472 IOMMU_INTERRUPT_READ_BUS_ERROR,
473 base + IOMMU_REGISTER_INT_MASK);
475 __raw_writel(0x00, base + IOMMU_REGISTER_INT_MASK);
480 static inline void rockchip_pgtable_flush(void *vastart, void *vaend)
482 dmac_flush_range(vastart, vaend);
483 outer_flush_range(virt_to_phys(vastart), virt_to_phys(vaend));
486 static void dump_pagetbl(dma_addr_t fault_address, u32 addr_dte)
488 u32 dte_index, pte_index, page_offset;
490 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
493 phys_addr_t pte_addr_phys = 0;
494 u32 *pte_addr = NULL;
496 phys_addr_t page_addr_phys = 0;
499 dte_index = rockchip_lv1ent_offset(fault_address);
500 pte_index = rockchip_lv2ent_offset(fault_address);
501 page_offset = (u32)(fault_address & 0x00000fff);
503 mmu_dte_addr = addr_dte;
504 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
506 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
507 dte_addr = phys_to_virt(dte_addr_phys);
510 if (!(IOMMU_FLAGS_PRESENT & dte))
513 pte_addr_phys = ((phys_addr_t)dte & 0xfffff000) + (pte_index * 4);
514 pte_addr = phys_to_virt(pte_addr_phys);
517 if (!(IOMMU_FLAGS_PRESENT & pte))
520 page_addr_phys = ((phys_addr_t)pte & 0xfffff000) + page_offset;
521 page_flags = pte & 0x000001fe;
524 pr_err("iova = %pad: dte_index: 0x%03x pte_index: 0x%03x page_offset: 0x%03x\n",
525 &fault_address, dte_index, pte_index, page_offset);
526 pr_err("mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
527 &mmu_dte_addr_phys, &dte_addr_phys, dte,
528 (dte & IOMMU_FLAGS_PRESENT), &pte_addr_phys, pte,
529 (pte & IOMMU_FLAGS_PRESENT), &page_addr_phys, page_flags);
532 static irqreturn_t rockchip_iommu_irq(int irq, void *dev_id)
534 /* SYSMMU is in blocked when interrupt occurred. */
535 struct iommu_drvdata *data = dev_id;
538 dma_addr_t fault_address;
544 spin_lock_irqsave(&data->data_lock, flags);
546 if (!rockchip_is_iommu_active(data)) {
547 spin_unlock_irqrestore(&data->data_lock, flags);
551 for (i = 0; i < data->num_res_mem; i++) {
552 status = __raw_readl(data->res_bases[i] +
553 IOMMU_REGISTER_INT_STATUS);
557 rawstat = __raw_readl(data->res_bases[i] +
558 IOMMU_REGISTER_INT_RAWSTAT);
560 reg_status = __raw_readl(data->res_bases[i] +
561 IOMMU_REGISTER_STATUS);
563 dev_info(data->iommu, "1.rawstat = 0x%08x,status = 0x%08x,reg_status = 0x%08x\n",
564 rawstat, status, reg_status);
566 if (rawstat & IOMMU_INTERRUPT_PAGE_FAULT) {
570 fault_address = __raw_readl(data->res_bases[i] +
571 IOMMU_REGISTER_PAGE_FAULT_ADDR);
573 dte = __raw_readl(data->res_bases[i] +
574 IOMMU_REGISTER_DTE_ADDR);
576 flags = (status & 32) ? 1 : 0;
578 dev_err(data->iommu, "Page fault detected at %pad from bus id %d of type %s on %s\n",
579 &fault_address, (status >> 6) & 0x1F,
580 (flags == 1) ? "write" : "read", data->dbgname);
582 dump_pagetbl(fault_address, dte);
585 report_iommu_fault(data->domain, data->iommu,
586 fault_address, flags);
588 rockchip_iommu_page_fault_done(data->res_bases[i],
592 if (rawstat & IOMMU_INTERRUPT_READ_BUS_ERROR) {
593 dev_err(data->iommu, "bus error occured at %pad\n",
597 if (rawstat & ~(IOMMU_INTERRUPT_READ_BUS_ERROR |
598 IOMMU_INTERRUPT_PAGE_FAULT)) {
599 dev_err(data->iommu, "unexpected int_status: %#08x\n\n",
603 __raw_writel(rawstat, data->res_bases[i] +
604 IOMMU_REGISTER_INT_CLEAR);
606 status = __raw_readl(data->res_bases[i] +
607 IOMMU_REGISTER_INT_STATUS);
609 rawstat = __raw_readl(data->res_bases[i] +
610 IOMMU_REGISTER_INT_RAWSTAT);
612 reg_status = __raw_readl(data->res_bases[i] +
613 IOMMU_REGISTER_STATUS);
615 dev_info(data->iommu, "2.rawstat = 0x%08x,status = 0x%08x,reg_status = 0x%08x\n",
616 rawstat, status, reg_status);
618 ret = rockchip_iommu_zap_tlb(data->res_bases[i]);
620 dev_err(data->iommu, "(%s) %s failed\n", data->dbgname,
624 spin_unlock_irqrestore(&data->data_lock, flags);
628 static bool rockchip_iommu_disable(struct iommu_drvdata *data)
634 spin_lock_irqsave(&data->data_lock, flags);
636 if (!rockchip_set_iommu_inactive(data)) {
637 spin_unlock_irqrestore(&data->data_lock, flags);
638 dev_info(data->iommu,"(%s) %d times left to be disabled\n",
639 data->dbgname, data->activations);
643 for (i = 0; i < data->num_res_mem; i++) {
644 __raw_writel(0, data->res_bases[i] + IOMMU_REGISTER_INT_MASK);
645 ret = rockchip_iommu_disable_paging(data->res_bases[i]);
647 spin_unlock_irqrestore(&data->data_lock, flags);
648 dev_info(data->iommu, "%s error\n", __func__);
655 spin_unlock_irqrestore(&data->data_lock, flags);
657 dev_info(data->iommu,"(%s) Disabled\n", data->dbgname);
662 /* __rk_sysmmu_enable: Enables System MMU
664 * returns -error if an error occurred and System MMU is not enabled,
665 * 0 if the System MMU has been just enabled and 1 if System MMU was already
668 static int rockchip_iommu_enable(struct iommu_drvdata *data, unsigned long pgtable)
673 spin_lock_irqsave(&data->data_lock, flags);
675 if (!rockchip_set_iommu_active(data)) {
676 if (WARN_ON(pgtable != data->pgtable)) {
678 rockchip_set_iommu_inactive(data);
683 spin_unlock_irqrestore(&data->data_lock, flags);
684 dev_info(data->iommu, "(%s) Already enabled\n", data->dbgname);
689 for (i = 0; i < data->num_res_mem; i++) {
690 ret = rockchip_iommu_enable_stall(data->res_bases[i]);
692 dev_info(data->iommu, "(%s), %s failed\n",
693 data->dbgname, __func__);
694 spin_unlock_irqrestore(&data->data_lock, flags);
698 if (!strstr(data->dbgname, "isp")) {
699 if (!rockchip_iommu_reset(data->res_bases[i],
701 spin_unlock_irqrestore(&data->data_lock, flags);
706 rockchip_iommu_set_ptbase(data->res_bases[i], pgtable);
708 __raw_writel(IOMMU_COMMAND_ZAP_CACHE, data->res_bases[i] +
709 IOMMU_REGISTER_COMMAND);
711 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
712 IOMMU_INTERRUPT_READ_BUS_ERROR,
713 data->res_bases[i] + IOMMU_REGISTER_INT_MASK);
715 ret = rockchip_iommu_enable_paging(data->res_bases[i]);
717 spin_unlock_irqrestore(&data->data_lock, flags);
718 dev_info(data->iommu, "(%s), %s failed\n",
719 data->dbgname, __func__);
723 rockchip_iommu_disable_stall(data->res_bases[i]);
726 data->pgtable = pgtable;
728 dev_info(data->iommu,"(%s) Enabled\n", data->dbgname);
730 spin_unlock_irqrestore(&data->data_lock, flags);
735 int rockchip_iommu_tlb_invalidate(struct device *dev)
738 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
740 spin_lock_irqsave(&data->data_lock, flags);
742 if (rockchip_is_iommu_active(data)) {
746 for (i = 0; i < data->num_res_mem; i++) {
747 ret = rockchip_iommu_zap_tlb(data->res_bases[i]);
749 dev_err(dev->archdata.iommu, "(%s) %s failed\n",
750 data->dbgname, __func__);
751 spin_unlock_irqrestore(&data->data_lock, flags);
757 dev_dbg(dev->archdata.iommu, "(%s) Disabled. Skipping invalidating TLB.\n",
761 spin_unlock_irqrestore(&data->data_lock, flags);
766 static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain,
769 struct rk_iommu_domain *priv = domain->priv;
770 unsigned long *entry;
772 phys_addr_t phys = 0;
774 spin_lock_irqsave(&priv->pgtablelock, flags);
776 entry = rockchip_section_entry(priv->pgtable, iova);
777 entry = rockchip_page_entry(entry, iova);
778 phys = rockchip_spage_phys(entry) + rockchip_spage_offs(iova);
780 spin_unlock_irqrestore(&priv->pgtablelock, flags);
785 static int rockchip_lv2set_page(unsigned long *pent, phys_addr_t paddr,
786 size_t size, short *pgcnt)
788 if (!rockchip_lv2ent_fault(pent))
791 *pent = rockchip_mk_lv2ent_spage(paddr);
792 rockchip_pgtable_flush(pent, pent + 1);
797 static unsigned long *rockchip_alloc_lv2entry(unsigned long *sent,
798 unsigned long iova, short *pgcounter)
800 if (rockchip_lv1ent_fault(sent)) {
803 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
804 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
808 *sent = rockchip_mk_lv1ent_page(__pa(pent));
809 kmemleak_ignore(pent);
810 *pgcounter = NUM_LV2ENTRIES;
811 rockchip_pgtable_flush(pent, pent + NUM_LV2ENTRIES);
812 rockchip_pgtable_flush(sent, sent + 1);
814 return rockchip_page_entry(sent, iova);
817 static size_t rockchip_iommu_unmap(struct iommu_domain *domain,
818 unsigned long iova, size_t size)
820 struct rk_iommu_domain *priv = domain->priv;
824 BUG_ON(priv->pgtable == NULL);
826 spin_lock_irqsave(&priv->pgtablelock, flags);
828 ent = rockchip_section_entry(priv->pgtable, iova);
830 if (unlikely(rockchip_lv1ent_fault(ent))) {
831 if (size > SPAGE_SIZE)
836 /* lv1ent_page(sent) == true here */
838 ent = rockchip_page_entry(ent, iova);
840 if (unlikely(rockchip_lv2ent_fault(ent))) {
847 priv->lv2entcnt[rockchip_lv1ent_offset(iova)] += 1;
852 pr_info("%s:unmap iova 0x%lx/0x%x bytes\n",
853 __func__, iova,size);
855 spin_unlock_irqrestore(&priv->pgtablelock, flags);
860 static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova,
861 phys_addr_t paddr, size_t size, int prot)
863 struct rk_iommu_domain *priv = domain->priv;
864 unsigned long *entry;
869 BUG_ON(priv->pgtable == NULL);
871 spin_lock_irqsave(&priv->pgtablelock, flags);
873 entry = rockchip_section_entry(priv->pgtable, iova);
875 pent = rockchip_alloc_lv2entry(entry, iova,
876 &priv->lv2entcnt[rockchip_lv1ent_offset(iova)]);
880 ret = rockchip_lv2set_page(pent, paddr, size,
881 &priv->lv2entcnt[rockchip_lv1ent_offset(iova)]);
884 pr_info("%s: Failed to map iova 0x%lx/0x%x bytes\n", __func__,
887 spin_unlock_irqrestore(&priv->pgtablelock, flags);
892 static void rockchip_iommu_detach_device(struct iommu_domain *domain, struct device *dev)
894 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
895 struct rk_iommu_domain *priv = domain->priv;
896 struct list_head *pos;
900 spin_lock_irqsave(&priv->lock, flags);
902 list_for_each(pos, &priv->clients) {
903 if (list_entry(pos, struct iommu_drvdata, node) == data) {
910 spin_unlock_irqrestore(&priv->lock, flags);
914 if (rockchip_iommu_disable(data)) {
915 dev_dbg(dev->archdata.iommu,"%s: Detached IOMMU with pgtable %#lx\n",
916 __func__, __pa(priv->pgtable));
918 list_del_init(&data->node);
921 dev_err(dev->archdata.iommu,"%s: Detaching IOMMU with pgtable %#lx delayed",
922 __func__, __pa(priv->pgtable));
924 spin_unlock_irqrestore(&priv->lock, flags);
927 static int rockchip_iommu_attach_device(struct iommu_domain *domain, struct device *dev)
929 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
930 struct rk_iommu_domain *priv = domain->priv;
934 spin_lock_irqsave(&priv->lock, flags);
936 ret = rockchip_iommu_enable(data, __pa(priv->pgtable));
939 /* 'data->node' must not be appeared in priv->clients */
940 BUG_ON(!list_empty(&data->node));
941 list_add_tail(&data->node, &priv->clients);
942 data->domain = domain;
945 spin_unlock_irqrestore(&priv->lock, flags);
948 dev_err(dev->archdata.iommu,"%s: Failed to attach IOMMU with pgtable %#lx\n",
949 __func__, __pa(priv->pgtable));
950 } else if (ret > 0) {
951 dev_dbg(dev->archdata.iommu,"%s: IOMMU with pgtable 0x%lx already attached\n",
952 __func__, __pa(priv->pgtable));
954 dev_dbg(dev->archdata.iommu,"%s: Attached new IOMMU with pgtable 0x%lx\n",
955 __func__, __pa(priv->pgtable));
961 static void rockchip_iommu_domain_destroy(struct iommu_domain *domain)
963 struct rk_iommu_domain *priv = domain->priv;
966 WARN_ON(!list_empty(&priv->clients));
968 for (i = 0; i < NUM_LV1ENTRIES; i++)
969 if (rockchip_lv1ent_page(priv->pgtable + i))
970 kmem_cache_free(lv2table_kmem_cache,
971 __va(rockchip_lv2table_base(priv->pgtable + i)));
973 free_pages((unsigned long)priv->pgtable, 0);
974 free_pages((unsigned long)priv->lv2entcnt, 0);
979 static int rockchip_iommu_domain_init(struct iommu_domain *domain)
981 struct rk_iommu_domain *priv;
983 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
987 /*rk32xx iommu use 2 level pagetable,
988 level1 and leve2 both have 1024 entries,each entry occupy 4 bytes,
989 so alloc a page size for each page table
991 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL |
996 priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL |
998 if (!priv->lv2entcnt)
1001 rockchip_pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
1003 spin_lock_init(&priv->lock);
1004 spin_lock_init(&priv->pgtablelock);
1005 INIT_LIST_HEAD(&priv->clients);
1007 domain->priv = priv;
1011 free_pages((unsigned long)priv->pgtable, 0);
1017 static struct iommu_ops rk_iommu_ops = {
1018 .domain_init = &rockchip_iommu_domain_init,
1019 .domain_destroy = &rockchip_iommu_domain_destroy,
1020 .attach_dev = &rockchip_iommu_attach_device,
1021 .detach_dev = &rockchip_iommu_detach_device,
1022 .map = &rockchip_iommu_map,
1023 .unmap = &rockchip_iommu_unmap,
1024 .iova_to_phys = &rockchip_iommu_iova_to_phys,
1025 .pgsize_bitmap = SPAGE_SIZE,
1028 static int rockchip_get_iommu_resource_num(struct platform_device *pdev,
1034 for (i = 0; i < pdev->num_resources; i++) {
1035 struct resource *r = &pdev->resource[i];
1036 if (type == resource_type(r))
1043 static int rockchip_iommu_probe(struct platform_device *pdev)
1047 struct iommu_drvdata *data;
1051 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
1053 dev_dbg(dev, "Not enough memory\n");
1057 dev_set_drvdata(dev, data);
1059 if (pdev->dev.of_node)
1060 of_property_read_string(pdev->dev.of_node, "dbgname",
1063 dev_dbg(dev, "dbgname not assigned in device tree or device node not exist\r\n");
1065 dev_info(dev,"(%s) Enter\n", data->dbgname);
1067 data->num_res_mem = rockchip_get_iommu_resource_num(pdev,
1069 if (0 == data->num_res_mem) {
1070 dev_err(dev,"can't find iommu memory resource \r\n");
1073 dev_dbg(dev,"data->num_res_mem=%d\n", data->num_res_mem);
1075 data->num_res_irq = rockchip_get_iommu_resource_num(pdev,
1077 if (0 == data->num_res_irq) {
1078 dev_err(dev,"can't find iommu irq resource \r\n");
1081 dev_dbg(dev,"data->num_res_irq=%d\n", data->num_res_irq);
1083 data->res_bases = devm_kmalloc_array(dev, data->num_res_mem,
1084 sizeof(*data->res_bases), GFP_KERNEL);
1085 if (data->res_bases == NULL) {
1086 dev_err(dev, "Not enough memory\n");
1090 for (i = 0; i < data->num_res_mem; i++) {
1091 struct resource *res;
1093 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1095 dev_err(dev,"Unable to find IOMEM region\n");
1099 data->res_bases[i] = devm_ioremap(dev,res->start,
1100 resource_size(res));
1101 if (!data->res_bases[i]) {
1102 dev_err(dev, "Unable to map IOMEM @ PA:%#x\n",
1107 dev_dbg(dev,"res->start = 0x%08x ioremap to data->res_bases[%d] = 0x%08x\n",
1108 res->start, i, (unsigned int)data->res_bases[i]);
1110 if (strstr(data->dbgname, "vop") && cpu_is_rk312x()) {
1111 rk312x_vop_mmu_base = data->res_bases[0];
1112 dev_dbg(dev, "rk312x_vop_mmu_base = 0x%08x\n",
1113 (unsigned int)rk312x_vop_mmu_base);
1117 for (i = 0; i < data->num_res_irq; i++) {
1118 if (cpu_is_rk312x() && strstr(data->dbgname, "vop")) {
1119 dev_info(dev, "skip request vop mmu irq\n");
1123 ret = platform_get_irq(pdev, i);
1125 dev_err(dev,"Unable to find IRQ resource\n");
1129 ret = devm_request_irq(dev, ret, rockchip_iommu_irq,
1130 IRQF_SHARED, dev_name(dev), data);
1132 dev_err(dev, "Unabled to register interrupt handler\n");
1137 ret = rockchip_init_iovmm(dev, &data->vmm);
1142 spin_lock_init(&data->data_lock);
1143 INIT_LIST_HEAD(&data->node);
1145 dev_info(dev,"(%s) Initialized\n", data->dbgname);
1151 static const struct of_device_id iommu_dt_ids[] = {
1152 { .compatible = IEP_IOMMU_COMPATIBLE_NAME},
1153 { .compatible = VIP_IOMMU_COMPATIBLE_NAME},
1154 { .compatible = VOPB_IOMMU_COMPATIBLE_NAME},
1155 { .compatible = VOPL_IOMMU_COMPATIBLE_NAME},
1156 { .compatible = HEVC_IOMMU_COMPATIBLE_NAME},
1157 { .compatible = VPU_IOMMU_COMPATIBLE_NAME},
1158 { .compatible = ISP_IOMMU_COMPATIBLE_NAME},
1159 { .compatible = VOP_IOMMU_COMPATIBLE_NAME},
1163 MODULE_DEVICE_TABLE(of, iommu_dt_ids);
1166 static struct platform_driver rk_iommu_driver = {
1167 .probe = rockchip_iommu_probe,
1171 .owner = THIS_MODULE,
1172 .of_match_table = of_match_ptr(iommu_dt_ids),
1176 static int __init rockchip_iommu_init_driver(void)
1180 lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table",
1181 LV2TABLE_SIZE, LV2TABLE_SIZE,
1183 if (!lv2table_kmem_cache) {
1184 pr_info("%s: failed to create kmem cache\n", __func__);
1188 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1192 return platform_driver_register(&rk_iommu_driver);
1195 core_initcall(rockchip_iommu_init_driver);