2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/clk.h>
15 #include <linux/err.h>
17 #include <linux/errno.h>
18 #include <linux/memblock.h>
19 #include <linux/export.h>
20 #include <linux/module.h>
22 #include <asm/cacheflush.h>
23 #include <asm/pgtable.h>
25 #include <linux/rockchip-iovmm.h>
26 #include <linux/rockchip/grf.h>
27 #include <linux/rockchip/cpu.h>
28 #include <linux/rockchip/iomap.h>
29 #include <linux/device.h>
30 #include "rockchip-iommu.h"
32 /* We does not consider super section mapping (16MB) */
33 #define SPAGE_ORDER 12
34 #define SPAGE_SIZE (1 << SPAGE_ORDER)
35 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
37 static void __iomem *rk312x_vop_mmu_base;
39 enum iommu_entry_flags {
40 IOMMU_FLAGS_PRESENT = 0x01,
41 IOMMU_FLAGS_READ_PERMISSION = 0x02,
42 IOMMU_FLAGS_WRITE_PERMISSION = 0x04,
43 IOMMU_FLAGS_OVERRIDE_CACHE = 0x8,
44 IOMMU_FLAGS_WRITE_CACHEABLE = 0x10,
45 IOMMU_FLAGS_WRITE_ALLOCATE = 0x20,
46 IOMMU_FLAGS_WRITE_BUFFERABLE = 0x40,
47 IOMMU_FLAGS_READ_CACHEABLE = 0x80,
48 IOMMU_FLAGS_READ_ALLOCATE = 0x100,
49 IOMMU_FLAGS_MASK = 0x1FF,
52 #define rockchip_lv1ent_fault(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 0)
53 #define rockchip_lv1ent_page(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 1)
54 #define rockchip_lv2ent_fault(pent) ((*(pent) & IOMMU_FLAGS_PRESENT) == 0)
55 #define rockchip_spage_phys(pent) (*(pent) & SPAGE_MASK)
56 #define rockchip_spage_offs(iova) ((iova) & 0x0FFF)
58 #define rockchip_lv1ent_offset(iova) (((iova)>>22) & 0x03FF)
59 #define rockchip_lv2ent_offset(iova) (((iova)>>12) & 0x03FF)
61 #define NUM_LV1ENTRIES 1024
62 #define NUM_LV2ENTRIES 1024
64 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
66 #define rockchip_lv2table_base(sent) (*(sent) & 0xFFFFFFFE)
68 #define rockchip_mk_lv1ent_page(pa) ((pa) | IOMMU_FLAGS_PRESENT)
69 /*write and read permission for level2 page default*/
70 #define rockchip_mk_lv2ent_spage(pa) ((pa) | IOMMU_FLAGS_PRESENT | \
71 IOMMU_FLAGS_READ_PERMISSION | \
72 IOMMU_FLAGS_WRITE_PERMISSION)
74 #define IOMMU_REG_POLL_COUNT_FAST 1000
77 * MMU register numbers
78 * Used in the register read/write routines.
79 * See the hardware documentation for more information about each register
82 /**< Current Page Directory Pointer */
83 IOMMU_REGISTER_DTE_ADDR = 0x0000,
84 /**< Status of the MMU */
85 IOMMU_REGISTER_STATUS = 0x0004,
86 /**< Command register, used to control the MMU */
87 IOMMU_REGISTER_COMMAND = 0x0008,
88 /**< Logical address of the last page fault */
89 IOMMU_REGISTER_PAGE_FAULT_ADDR = 0x000C,
90 /**< Used to invalidate the mapping of a single page from the MMU */
91 IOMMU_REGISTER_ZAP_ONE_LINE = 0x010,
92 /**< Raw interrupt status, all interrupts visible */
93 IOMMU_REGISTER_INT_RAWSTAT = 0x0014,
94 /**< Indicate to the MMU that the interrupt has been received */
95 IOMMU_REGISTER_INT_CLEAR = 0x0018,
96 /**< Enable/disable types of interrupts */
97 IOMMU_REGISTER_INT_MASK = 0x001C,
98 /**< Interrupt status based on the mask */
99 IOMMU_REGISTER_INT_STATUS = 0x0020,
100 IOMMU_REGISTER_AUTO_GATING = 0x0024
104 /**< Enable paging (memory translation) */
105 IOMMU_COMMAND_ENABLE_PAGING = 0x00,
106 /**< Disable paging (memory translation) */
107 IOMMU_COMMAND_DISABLE_PAGING = 0x01,
108 /**< Enable stall on page fault */
109 IOMMU_COMMAND_ENABLE_STALL = 0x02,
110 /**< Disable stall on page fault */
111 IOMMU_COMMAND_DISABLE_STALL = 0x03,
112 /**< Zap the entire page table cache */
113 IOMMU_COMMAND_ZAP_CACHE = 0x04,
114 /**< Page fault processed */
115 IOMMU_COMMAND_PAGE_FAULT_DONE = 0x05,
116 /**< Reset the MMU back to power-on settings */
117 IOMMU_COMMAND_HARD_RESET = 0x06
121 * MMU interrupt register bits
122 * Each cause of the interrupt is reported
123 * through the (raw) interrupt status registers.
124 * Multiple interrupts can be pending, so multiple bits
125 * can be set at once.
127 enum iommu_interrupt {
128 IOMMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
129 IOMMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
132 enum iommu_status_bits {
133 IOMMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
134 IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
135 IOMMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
136 IOMMU_STATUS_BIT_IDLE = 1 << 3,
137 IOMMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
138 IOMMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
139 IOMMU_STATUS_BIT_STALL_NOT_ACTIVE = 1 << 31,
143 * Size of an MMU page in bytes
145 #define IOMMU_PAGE_SIZE 0x1000
148 * Size of the address space referenced by a page table page
150 #define IOMMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
153 * Page directory index from address
154 * Calculates the page directory index from the given address
156 #define IOMMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
159 * Page table index from address
160 * Calculates the page table index from the given address
162 #define IOMMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
165 * Extract the memory address from an PDE/PTE entry
167 #define IOMMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
169 #define INVALID_PAGE ((u32)(~0))
171 static struct kmem_cache *lv2table_kmem_cache;
173 static unsigned long *rockchip_section_entry(unsigned long *pgtable, unsigned long iova)
175 return pgtable + rockchip_lv1ent_offset(iova);
178 static unsigned long *rockchip_page_entry(unsigned long *sent, unsigned long iova)
180 return (unsigned long *)__va(rockchip_lv2table_base(sent)) +
181 rockchip_lv2ent_offset(iova);
184 struct rk_iommu_domain {
185 struct list_head clients; /* list of iommu_drvdata.node */
186 unsigned long *pgtable; /* lv1 page table, 4KB */
187 short *lv2entcnt; /* free lv2 entry counter for each section */
188 spinlock_t lock; /* lock for this structure */
189 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
192 static bool rockchip_set_iommu_active(struct iommu_drvdata *data)
194 /* return true if the IOMMU was not active previously
195 and it needs to be initialized */
196 return ++data->activations == 1;
199 static bool rockchip_set_iommu_inactive(struct iommu_drvdata *data)
201 /* return true if the IOMMU is needed to be disabled */
202 BUG_ON(data->activations < 1);
203 return --data->activations == 0;
206 static bool rockchip_is_iommu_active(struct iommu_drvdata *data)
208 return data->activations > 0;
211 static void rockchip_iommu_disable_stall(void __iomem *base)
216 if (base != rk312x_vop_mmu_base) {
217 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
219 goto skip_vop_mmu_disable;
222 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED)) {
226 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
227 pr_info("Aborting MMU disable stall request since it is in pagefault state.\n");
231 if (!(mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE)) {
235 skip_vop_mmu_disable:
236 __raw_writel(IOMMU_COMMAND_DISABLE_STALL, base + IOMMU_REGISTER_COMMAND);
238 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
241 if (base != rk312x_vop_mmu_base) {
242 status = __raw_readl(base + IOMMU_REGISTER_STATUS);
250 if (0 == (status & IOMMU_STATUS_BIT_STALL_ACTIVE))
253 if (status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
256 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED))
260 if (IOMMU_REG_POLL_COUNT_FAST == i) {
261 pr_info("Disable stall request failed, MMU status is 0x%08X\n",
262 __raw_readl(base + IOMMU_REGISTER_STATUS));
266 static bool rockchip_iommu_enable_stall(void __iomem *base)
272 if (base != rk312x_vop_mmu_base) {
273 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
275 goto skip_vop_mmu_enable;
278 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED)) {
282 if (mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE){
283 pr_info("MMU stall already enabled\n");
287 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
288 pr_info("Aborting MMU stall request since it is in pagefault state. mmu status is 0x%08x\n",
294 __raw_writel(IOMMU_COMMAND_ENABLE_STALL, base + IOMMU_REGISTER_COMMAND);
296 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
297 if (base != rk312x_vop_mmu_base) {
298 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
306 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
309 if ((mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE) &&
310 (0 == (mmu_status & IOMMU_STATUS_BIT_STALL_NOT_ACTIVE)))
313 if (0 == (mmu_status & (IOMMU_STATUS_BIT_PAGING_ENABLED)))
317 if (IOMMU_REG_POLL_COUNT_FAST == i) {
318 pr_info("Enable stall request failed, MMU status is 0x%08X\n",
319 __raw_readl(base + IOMMU_REGISTER_STATUS));
323 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
324 pr_info("Aborting MMU stall request since it has a pagefault.\n");
331 static bool rockchip_iommu_enable_paging(void __iomem *base)
335 __raw_writel(IOMMU_COMMAND_ENABLE_PAGING,
336 base + IOMMU_REGISTER_COMMAND);
338 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
339 if (base != rk312x_vop_mmu_base) {
340 if (__raw_readl(base + IOMMU_REGISTER_STATUS) &
341 IOMMU_STATUS_BIT_PAGING_ENABLED)
351 if (IOMMU_REG_POLL_COUNT_FAST == i) {
352 pr_info("Enable paging request failed, MMU status is 0x%08X\n",
353 __raw_readl(base + IOMMU_REGISTER_STATUS));
360 static bool rockchip_iommu_disable_paging(void __iomem *base)
364 __raw_writel(IOMMU_COMMAND_DISABLE_PAGING,
365 base + IOMMU_REGISTER_COMMAND);
367 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
368 if (base != rk312x_vop_mmu_base) {
369 if (!(__raw_readl(base + IOMMU_REGISTER_STATUS) &
370 IOMMU_STATUS_BIT_PAGING_ENABLED))
380 if (IOMMU_REG_POLL_COUNT_FAST == i) {
381 pr_info("Disable paging request failed, MMU status is 0x%08X\n",
382 __raw_readl(base + IOMMU_REGISTER_STATUS));
389 static void rockchip_iommu_page_fault_done(void __iomem *base, const char *dbgname)
391 pr_info("MMU: %s: Leaving page fault mode\n",
393 __raw_writel(IOMMU_COMMAND_PAGE_FAULT_DONE,
394 base + IOMMU_REGISTER_COMMAND);
397 static int rockchip_iommu_zap_tlb_without_stall (void __iomem *base)
399 __raw_writel(IOMMU_COMMAND_ZAP_CACHE, base + IOMMU_REGISTER_COMMAND);
406 static int rockchip_iommu_zap_tlb(void __iomem *base)
408 if (!rockchip_iommu_enable_stall(base)) {
409 pr_err("%s failed\n", __func__);
413 __raw_writel(IOMMU_COMMAND_ZAP_CACHE, base + IOMMU_REGISTER_COMMAND);
415 rockchip_iommu_disable_stall(base);
421 static inline bool rockchip_iommu_raw_reset(void __iomem *base)
425 unsigned int grf_value;
427 __raw_writel(0xCAFEBABE, base + IOMMU_REGISTER_DTE_ADDR);
429 if (base != rk312x_vop_mmu_base) {
430 ret = __raw_readl(base + IOMMU_REGISTER_DTE_ADDR);
431 if (!(0xCAFEB000 == ret)) {
432 grf_value = readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1);
433 pr_info("error when %s. grf = 0x%08x\n", __func__, grf_value);
437 __raw_writel(IOMMU_COMMAND_HARD_RESET,
438 base + IOMMU_REGISTER_COMMAND);
440 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
441 if (base != rk312x_vop_mmu_base) {
442 if (__raw_readl(base + IOMMU_REGISTER_DTE_ADDR) == 0)
452 if (IOMMU_REG_POLL_COUNT_FAST == i) {
453 pr_info("%s,Reset request failed, MMU status is 0x%08X\n",
454 __func__, __raw_readl(base + IOMMU_REGISTER_DTE_ADDR));
460 static void rockchip_iommu_set_ptbase(void __iomem *base, unsigned long pgd)
462 __raw_writel(pgd, base + IOMMU_REGISTER_DTE_ADDR);
465 static bool rockchip_iommu_reset(void __iomem *base, const char *dbgname)
469 ret = rockchip_iommu_raw_reset(base);
471 pr_info("(%s), %s failed\n", dbgname, __func__);
475 if (base != rk312x_vop_mmu_base)
476 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
477 IOMMU_INTERRUPT_READ_BUS_ERROR,
478 base + IOMMU_REGISTER_INT_MASK);
480 __raw_writel(0x00, base + IOMMU_REGISTER_INT_MASK);
485 static inline void rockchip_pgtable_flush(void *vastart, void *vaend)
487 dmac_flush_range(vastart, vaend);
488 outer_flush_range(virt_to_phys(vastart), virt_to_phys(vaend));
491 static void dump_pagetbl(dma_addr_t fault_address, u32 addr_dte)
493 u32 dte_index, pte_index, page_offset;
495 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
498 phys_addr_t pte_addr_phys = 0;
499 u32 *pte_addr = NULL;
501 phys_addr_t page_addr_phys = 0;
504 dte_index = rockchip_lv1ent_offset(fault_address);
505 pte_index = rockchip_lv2ent_offset(fault_address);
506 page_offset = (u32)(fault_address & 0x00000fff);
508 mmu_dte_addr = addr_dte;
509 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
511 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
512 dte_addr = phys_to_virt(dte_addr_phys);
515 if (!(IOMMU_FLAGS_PRESENT & dte))
518 pte_addr_phys = ((phys_addr_t)dte & 0xfffff000) + (pte_index * 4);
519 pte_addr = phys_to_virt(pte_addr_phys);
522 if (!(IOMMU_FLAGS_PRESENT & pte))
525 page_addr_phys = ((phys_addr_t)pte & 0xfffff000) + page_offset;
526 page_flags = pte & 0x000001fe;
529 pr_err("iova = %pad: dte_index: 0x%03x pte_index: 0x%03x page_offset: 0x%03x\n",
530 &fault_address, dte_index, pte_index, page_offset);
531 pr_err("mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
532 &mmu_dte_addr_phys, &dte_addr_phys, dte,
533 (dte & IOMMU_FLAGS_PRESENT), &pte_addr_phys, pte,
534 (pte & IOMMU_FLAGS_PRESENT), &page_addr_phys, page_flags);
537 static irqreturn_t rockchip_iommu_irq(int irq, void *dev_id)
539 /* SYSMMU is in blocked when interrupt occurred. */
540 struct iommu_drvdata *data = dev_id;
543 dma_addr_t fault_address;
549 spin_lock_irqsave(&data->data_lock, flags);
551 if (!rockchip_is_iommu_active(data)) {
552 spin_unlock_irqrestore(&data->data_lock, flags);
556 for (i = 0; i < data->num_res_mem; i++) {
557 status = __raw_readl(data->res_bases[i] +
558 IOMMU_REGISTER_INT_STATUS);
562 rawstat = __raw_readl(data->res_bases[i] +
563 IOMMU_REGISTER_INT_RAWSTAT);
565 reg_status = __raw_readl(data->res_bases[i] +
566 IOMMU_REGISTER_STATUS);
568 dev_info(data->iommu, "1.rawstat = 0x%08x,status = 0x%08x,reg_status = 0x%08x\n",
569 rawstat, status, reg_status);
571 if (rawstat & IOMMU_INTERRUPT_PAGE_FAULT) {
575 fault_address = __raw_readl(data->res_bases[i] +
576 IOMMU_REGISTER_PAGE_FAULT_ADDR);
578 dte = __raw_readl(data->res_bases[i] +
579 IOMMU_REGISTER_DTE_ADDR);
581 flags = (status & 32) ? 1 : 0;
583 dev_err(data->iommu, "Page fault detected at %pad from bus id %d of type %s on %s\n",
584 &fault_address, (status >> 6) & 0x1F,
585 (flags == 1) ? "write" : "read", data->dbgname);
587 dump_pagetbl(fault_address, dte);
590 report_iommu_fault(data->domain, data->iommu,
591 fault_address, flags);
593 rockchip_iommu_page_fault_done(data->res_bases[i],
597 if (rawstat & IOMMU_INTERRUPT_READ_BUS_ERROR) {
598 dev_err(data->iommu, "bus error occured at %pad\n",
602 if (rawstat & ~(IOMMU_INTERRUPT_READ_BUS_ERROR |
603 IOMMU_INTERRUPT_PAGE_FAULT)) {
604 dev_err(data->iommu, "unexpected int_status: %#08x\n\n",
608 __raw_writel(rawstat, data->res_bases[i] +
609 IOMMU_REGISTER_INT_CLEAR);
611 status = __raw_readl(data->res_bases[i] +
612 IOMMU_REGISTER_INT_STATUS);
614 rawstat = __raw_readl(data->res_bases[i] +
615 IOMMU_REGISTER_INT_RAWSTAT);
617 reg_status = __raw_readl(data->res_bases[i] +
618 IOMMU_REGISTER_STATUS);
620 dev_info(data->iommu, "2.rawstat = 0x%08x,status = 0x%08x,reg_status = 0x%08x\n",
621 rawstat, status, reg_status);
623 ret = rockchip_iommu_zap_tlb_without_stall(data->res_bases[i]);
625 dev_err(data->iommu, "(%s) %s failed\n", data->dbgname,
629 spin_unlock_irqrestore(&data->data_lock, flags);
633 static bool rockchip_iommu_disable(struct iommu_drvdata *data)
639 spin_lock_irqsave(&data->data_lock, flags);
641 if (!rockchip_set_iommu_inactive(data)) {
642 spin_unlock_irqrestore(&data->data_lock, flags);
643 dev_info(data->iommu,"(%s) %d times left to be disabled\n",
644 data->dbgname, data->activations);
648 for (i = 0; i < data->num_res_mem; i++) {
649 ret = rockchip_iommu_enable_stall(data->res_bases[i]);
651 dev_info(data->iommu, "(%s), %s failed\n",
652 data->dbgname, __func__);
653 spin_unlock_irqrestore(&data->data_lock, flags);
657 __raw_writel(0, data->res_bases[i] + IOMMU_REGISTER_INT_MASK);
659 ret = rockchip_iommu_disable_paging(data->res_bases[i]);
661 rockchip_iommu_disable_stall(data->res_bases[i]);
662 spin_unlock_irqrestore(&data->data_lock, flags);
663 dev_info(data->iommu, "%s error\n", __func__);
666 rockchip_iommu_disable_stall(data->res_bases[i]);
671 spin_unlock_irqrestore(&data->data_lock, flags);
673 dev_info(data->iommu,"(%s) Disabled\n", data->dbgname);
678 /* __rk_sysmmu_enable: Enables System MMU
680 * returns -error if an error occurred and System MMU is not enabled,
681 * 0 if the System MMU has been just enabled and 1 if System MMU was already
684 static int rockchip_iommu_enable(struct iommu_drvdata *data, unsigned long pgtable)
689 spin_lock_irqsave(&data->data_lock, flags);
691 if (!rockchip_set_iommu_active(data)) {
692 if (WARN_ON(pgtable != data->pgtable)) {
694 rockchip_set_iommu_inactive(data);
699 spin_unlock_irqrestore(&data->data_lock, flags);
700 dev_info(data->iommu, "(%s) Already enabled\n", data->dbgname);
705 for (i = 0; i < data->num_res_mem; i++) {
706 ret = rockchip_iommu_enable_stall(data->res_bases[i]);
708 dev_info(data->iommu, "(%s), %s failed\n",
709 data->dbgname, __func__);
710 spin_unlock_irqrestore(&data->data_lock, flags);
714 if (!strstr(data->dbgname, "isp")) {
715 if (!rockchip_iommu_reset(data->res_bases[i],
717 spin_unlock_irqrestore(&data->data_lock, flags);
722 rockchip_iommu_set_ptbase(data->res_bases[i], pgtable);
724 __raw_writel(IOMMU_COMMAND_ZAP_CACHE, data->res_bases[i] +
725 IOMMU_REGISTER_COMMAND);
727 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
728 IOMMU_INTERRUPT_READ_BUS_ERROR,
729 data->res_bases[i] + IOMMU_REGISTER_INT_MASK);
731 ret = rockchip_iommu_enable_paging(data->res_bases[i]);
733 spin_unlock_irqrestore(&data->data_lock, flags);
734 dev_info(data->iommu, "(%s), %s failed\n",
735 data->dbgname, __func__);
739 rockchip_iommu_disable_stall(data->res_bases[i]);
742 data->pgtable = pgtable;
744 dev_info(data->iommu,"(%s) Enabled\n", data->dbgname);
746 spin_unlock_irqrestore(&data->data_lock, flags);
751 int rockchip_iommu_tlb_invalidate(struct device *dev)
754 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
756 spin_lock_irqsave(&data->data_lock, flags);
758 if (rockchip_is_iommu_active(data)) {
762 for (i = 0; i < data->num_res_mem; i++) {
763 ret = rockchip_iommu_zap_tlb_without_stall(data->res_bases[i]);
765 dev_err(dev->archdata.iommu, "(%s) %s failed\n",
766 data->dbgname, __func__);
767 spin_unlock_irqrestore(&data->data_lock, flags);
773 dev_dbg(dev->archdata.iommu, "(%s) Disabled. Skipping invalidating TLB.\n",
777 spin_unlock_irqrestore(&data->data_lock, flags);
782 static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain,
785 struct rk_iommu_domain *priv = domain->priv;
786 unsigned long *entry;
788 phys_addr_t phys = 0;
790 spin_lock_irqsave(&priv->pgtablelock, flags);
792 entry = rockchip_section_entry(priv->pgtable, iova);
793 entry = rockchip_page_entry(entry, iova);
794 phys = rockchip_spage_phys(entry) + rockchip_spage_offs(iova);
796 spin_unlock_irqrestore(&priv->pgtablelock, flags);
801 static int rockchip_lv2set_page(unsigned long *pent, phys_addr_t paddr,
802 size_t size, short *pgcnt)
804 if (!rockchip_lv2ent_fault(pent))
807 *pent = rockchip_mk_lv2ent_spage(paddr);
808 rockchip_pgtable_flush(pent, pent + 1);
813 static unsigned long *rockchip_alloc_lv2entry(unsigned long *sent,
814 unsigned long iova, short *pgcounter)
816 if (rockchip_lv1ent_fault(sent)) {
819 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
820 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
824 *sent = rockchip_mk_lv1ent_page(__pa(pent));
825 kmemleak_ignore(pent);
826 *pgcounter = NUM_LV2ENTRIES;
827 rockchip_pgtable_flush(pent, pent + NUM_LV2ENTRIES);
828 rockchip_pgtable_flush(sent, sent + 1);
830 return rockchip_page_entry(sent, iova);
833 static size_t rockchip_iommu_unmap(struct iommu_domain *domain,
834 unsigned long iova, size_t size)
836 struct rk_iommu_domain *priv = domain->priv;
840 BUG_ON(priv->pgtable == NULL);
842 spin_lock_irqsave(&priv->pgtablelock, flags);
844 ent = rockchip_section_entry(priv->pgtable, iova);
846 if (unlikely(rockchip_lv1ent_fault(ent))) {
847 if (size > SPAGE_SIZE)
852 /* lv1ent_page(sent) == true here */
854 ent = rockchip_page_entry(ent, iova);
856 if (unlikely(rockchip_lv2ent_fault(ent))) {
863 priv->lv2entcnt[rockchip_lv1ent_offset(iova)] += 1;
868 pr_info("%s:unmap iova 0x%lx/0x%x bytes\n",
869 __func__, iova,size);
871 spin_unlock_irqrestore(&priv->pgtablelock, flags);
876 static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova,
877 phys_addr_t paddr, size_t size, int prot)
879 struct rk_iommu_domain *priv = domain->priv;
880 unsigned long *entry;
885 BUG_ON(priv->pgtable == NULL);
887 spin_lock_irqsave(&priv->pgtablelock, flags);
889 entry = rockchip_section_entry(priv->pgtable, iova);
891 pent = rockchip_alloc_lv2entry(entry, iova,
892 &priv->lv2entcnt[rockchip_lv1ent_offset(iova)]);
896 ret = rockchip_lv2set_page(pent, paddr, size,
897 &priv->lv2entcnt[rockchip_lv1ent_offset(iova)]);
900 pr_info("%s: Failed to map iova 0x%lx/0x%x bytes\n", __func__,
903 spin_unlock_irqrestore(&priv->pgtablelock, flags);
908 static void rockchip_iommu_detach_device(struct iommu_domain *domain, struct device *dev)
910 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
911 struct rk_iommu_domain *priv = domain->priv;
912 struct list_head *pos;
916 spin_lock_irqsave(&priv->lock, flags);
918 list_for_each(pos, &priv->clients) {
919 if (list_entry(pos, struct iommu_drvdata, node) == data) {
926 spin_unlock_irqrestore(&priv->lock, flags);
930 if (rockchip_iommu_disable(data)) {
931 dev_dbg(dev->archdata.iommu,"%s: Detached IOMMU with pgtable %#lx\n",
932 __func__, __pa(priv->pgtable));
934 list_del_init(&data->node);
937 dev_err(dev->archdata.iommu,"%s: Detaching IOMMU with pgtable %#lx delayed",
938 __func__, __pa(priv->pgtable));
940 spin_unlock_irqrestore(&priv->lock, flags);
943 static int rockchip_iommu_attach_device(struct iommu_domain *domain, struct device *dev)
945 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
946 struct rk_iommu_domain *priv = domain->priv;
950 spin_lock_irqsave(&priv->lock, flags);
952 ret = rockchip_iommu_enable(data, __pa(priv->pgtable));
955 /* 'data->node' must not be appeared in priv->clients */
956 BUG_ON(!list_empty(&data->node));
957 list_add_tail(&data->node, &priv->clients);
958 data->domain = domain;
961 spin_unlock_irqrestore(&priv->lock, flags);
964 dev_err(dev->archdata.iommu,"%s: Failed to attach IOMMU with pgtable %#lx\n",
965 __func__, __pa(priv->pgtable));
966 } else if (ret > 0) {
967 dev_dbg(dev->archdata.iommu,"%s: IOMMU with pgtable 0x%lx already attached\n",
968 __func__, __pa(priv->pgtable));
970 dev_dbg(dev->archdata.iommu,"%s: Attached new IOMMU with pgtable 0x%lx\n",
971 __func__, __pa(priv->pgtable));
977 static void rockchip_iommu_domain_destroy(struct iommu_domain *domain)
979 struct rk_iommu_domain *priv = domain->priv;
982 WARN_ON(!list_empty(&priv->clients));
984 for (i = 0; i < NUM_LV1ENTRIES; i++)
985 if (rockchip_lv1ent_page(priv->pgtable + i))
986 kmem_cache_free(lv2table_kmem_cache,
987 __va(rockchip_lv2table_base(priv->pgtable + i)));
989 free_pages((unsigned long)priv->pgtable, 0);
990 free_pages((unsigned long)priv->lv2entcnt, 0);
995 static int rockchip_iommu_domain_init(struct iommu_domain *domain)
997 struct rk_iommu_domain *priv;
999 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1003 /*rk32xx iommu use 2 level pagetable,
1004 level1 and leve2 both have 1024 entries,each entry occupy 4 bytes,
1005 so alloc a page size for each page table
1007 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL |
1012 priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL |
1014 if (!priv->lv2entcnt)
1017 rockchip_pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
1019 spin_lock_init(&priv->lock);
1020 spin_lock_init(&priv->pgtablelock);
1021 INIT_LIST_HEAD(&priv->clients);
1023 domain->priv = priv;
1027 free_pages((unsigned long)priv->pgtable, 0);
1033 static struct iommu_ops rk_iommu_ops = {
1034 .domain_init = &rockchip_iommu_domain_init,
1035 .domain_destroy = &rockchip_iommu_domain_destroy,
1036 .attach_dev = &rockchip_iommu_attach_device,
1037 .detach_dev = &rockchip_iommu_detach_device,
1038 .map = &rockchip_iommu_map,
1039 .unmap = &rockchip_iommu_unmap,
1040 .iova_to_phys = &rockchip_iommu_iova_to_phys,
1041 .pgsize_bitmap = SPAGE_SIZE,
1044 static int rockchip_get_iommu_resource_num(struct platform_device *pdev,
1050 for (i = 0; i < pdev->num_resources; i++) {
1051 struct resource *r = &pdev->resource[i];
1052 if (type == resource_type(r))
1059 static int rockchip_iommu_probe(struct platform_device *pdev)
1063 struct iommu_drvdata *data;
1067 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
1069 dev_dbg(dev, "Not enough memory\n");
1073 dev_set_drvdata(dev, data);
1075 if (pdev->dev.of_node)
1076 of_property_read_string(pdev->dev.of_node, "dbgname",
1079 dev_dbg(dev, "dbgname not assigned in device tree or device node not exist\r\n");
1081 dev_info(dev,"(%s) Enter\n", data->dbgname);
1083 data->num_res_mem = rockchip_get_iommu_resource_num(pdev,
1085 if (0 == data->num_res_mem) {
1086 dev_err(dev,"can't find iommu memory resource \r\n");
1089 dev_dbg(dev,"data->num_res_mem=%d\n", data->num_res_mem);
1091 data->num_res_irq = rockchip_get_iommu_resource_num(pdev,
1093 if (0 == data->num_res_irq) {
1094 dev_err(dev,"can't find iommu irq resource \r\n");
1097 dev_dbg(dev,"data->num_res_irq=%d\n", data->num_res_irq);
1099 data->res_bases = devm_kmalloc_array(dev, data->num_res_mem,
1100 sizeof(*data->res_bases), GFP_KERNEL);
1101 if (data->res_bases == NULL) {
1102 dev_err(dev, "Not enough memory\n");
1106 for (i = 0; i < data->num_res_mem; i++) {
1107 struct resource *res;
1109 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1111 dev_err(dev,"Unable to find IOMEM region\n");
1115 data->res_bases[i] = devm_ioremap(dev,res->start,
1116 resource_size(res));
1117 if (!data->res_bases[i]) {
1118 dev_err(dev, "Unable to map IOMEM @ PA:%#x\n",
1123 dev_dbg(dev,"res->start = 0x%08x ioremap to data->res_bases[%d] = 0x%08x\n",
1124 res->start, i, (unsigned int)data->res_bases[i]);
1126 if (strstr(data->dbgname, "vop") && cpu_is_rk312x()) {
1127 rk312x_vop_mmu_base = data->res_bases[0];
1128 dev_dbg(dev, "rk312x_vop_mmu_base = 0x%08x\n",
1129 (unsigned int)rk312x_vop_mmu_base);
1133 for (i = 0; i < data->num_res_irq; i++) {
1134 if (cpu_is_rk312x() && strstr(data->dbgname, "vop")) {
1135 dev_info(dev, "skip request vop mmu irq\n");
1139 ret = platform_get_irq(pdev, i);
1141 dev_err(dev,"Unable to find IRQ resource\n");
1145 ret = devm_request_irq(dev, ret, rockchip_iommu_irq,
1146 IRQF_SHARED, dev_name(dev), data);
1148 dev_err(dev, "Unabled to register interrupt handler\n");
1153 ret = rockchip_init_iovmm(dev, &data->vmm);
1158 spin_lock_init(&data->data_lock);
1159 INIT_LIST_HEAD(&data->node);
1161 dev_info(dev,"(%s) Initialized\n", data->dbgname);
1167 static const struct of_device_id iommu_dt_ids[] = {
1168 { .compatible = IEP_IOMMU_COMPATIBLE_NAME},
1169 { .compatible = VIP_IOMMU_COMPATIBLE_NAME},
1170 { .compatible = VOPB_IOMMU_COMPATIBLE_NAME},
1171 { .compatible = VOPL_IOMMU_COMPATIBLE_NAME},
1172 { .compatible = HEVC_IOMMU_COMPATIBLE_NAME},
1173 { .compatible = VPU_IOMMU_COMPATIBLE_NAME},
1174 { .compatible = ISP_IOMMU_COMPATIBLE_NAME},
1175 { .compatible = VOP_IOMMU_COMPATIBLE_NAME},
1179 MODULE_DEVICE_TABLE(of, iommu_dt_ids);
1182 static struct platform_driver rk_iommu_driver = {
1183 .probe = rockchip_iommu_probe,
1187 .owner = THIS_MODULE,
1188 .of_match_table = of_match_ptr(iommu_dt_ids),
1192 static int __init rockchip_iommu_init_driver(void)
1196 lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table",
1197 LV2TABLE_SIZE, LV2TABLE_SIZE,
1199 if (!lv2table_kmem_cache) {
1200 pr_info("%s: failed to create kmem cache\n", __func__);
1204 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1208 return platform_driver_register(&rk_iommu_driver);
1211 core_initcall(rockchip_iommu_init_driver);