2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/clk.h>
15 #include <linux/err.h>
17 #include <linux/errno.h>
18 #include <linux/memblock.h>
19 #include <linux/export.h>
20 #include <linux/module.h>
22 #include <asm/cacheflush.h>
23 #include <asm/pgtable.h>
25 #include <linux/rockchip-iovmm.h>
27 #include "rockchip-iommu.h"
29 /* We does not consider super section mapping (16MB) */
30 #define SPAGE_ORDER 12
31 #define SPAGE_SIZE (1 << SPAGE_ORDER)
32 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
34 enum iommu_entry_flags {
35 IOMMU_FLAGS_PRESENT = 0x01,
36 IOMMU_FLAGS_READ_PERMISSION = 0x02,
37 IOMMU_FLAGS_WRITE_PERMISSION = 0x04,
38 IOMMU_FLAGS_OVERRIDE_CACHE = 0x8,
39 IOMMU_FLAGS_WRITE_CACHEABLE = 0x10,
40 IOMMU_FLAGS_WRITE_ALLOCATE = 0x20,
41 IOMMU_FLAGS_WRITE_BUFFERABLE = 0x40,
42 IOMMU_FLAGS_READ_CACHEABLE = 0x80,
43 IOMMU_FLAGS_READ_ALLOCATE = 0x100,
44 IOMMU_FLAGS_MASK = 0x1FF,
47 #define lv1ent_fault(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 0)
48 #define lv1ent_page(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 1)
49 #define lv2ent_fault(pent) ((*(pent) & IOMMU_FLAGS_PRESENT) == 0)
50 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
51 #define spage_offs(iova) ((iova) & 0x0FFF)
53 #define lv1ent_offset(iova) (((iova)>>22) & 0x03FF)
54 #define lv2ent_offset(iova) (((iova)>>12) & 0x03FF)
56 #define NUM_LV1ENTRIES 1024
57 #define NUM_LV2ENTRIES 1024
59 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
61 #define lv2table_base(sent) (*(sent) & 0xFFFFFFFE)
63 #define mk_lv1ent_page(pa) ((pa) | IOMMU_FLAGS_PRESENT)
64 /*write and read permission for level2 page default*/
65 #define mk_lv2ent_spage(pa) ((pa) | IOMMU_FLAGS_PRESENT | \
66 IOMMU_FLAGS_READ_PERMISSION | \
67 IOMMU_FLAGS_WRITE_PERMISSION)
69 #define IOMMU_REG_POLL_COUNT_FAST 1000
71 /*rk3036:vpu and hevc share ahb interface*/
72 #define BIT_VCODEC_SEL (1<<3)
76 * MMU register numbers
77 * Used in the register read/write routines.
78 * See the hardware documentation for more information about each register
81 /**< Current Page Directory Pointer */
82 IOMMU_REGISTER_DTE_ADDR = 0x0000,
83 /**< Status of the MMU */
84 IOMMU_REGISTER_STATUS = 0x0004,
85 /**< Command register, used to control the MMU */
86 IOMMU_REGISTER_COMMAND = 0x0008,
87 /**< Logical address of the last page fault */
88 IOMMU_REGISTER_PAGE_FAULT_ADDR = 0x000C,
89 /**< Used to invalidate the mapping of a single page from the MMU */
90 IOMMU_REGISTER_ZAP_ONE_LINE = 0x010,
91 /**< Raw interrupt status, all interrupts visible */
92 IOMMU_REGISTER_INT_RAWSTAT = 0x0014,
93 /**< Indicate to the MMU that the interrupt has been received */
94 IOMMU_REGISTER_INT_CLEAR = 0x0018,
95 /**< Enable/disable types of interrupts */
96 IOMMU_REGISTER_INT_MASK = 0x001C,
97 /**< Interrupt status based on the mask */
98 IOMMU_REGISTER_INT_STATUS = 0x0020,
99 IOMMU_REGISTER_AUTO_GATING = 0x0024
103 /**< Enable paging (memory translation) */
104 IOMMU_COMMAND_ENABLE_PAGING = 0x00,
105 /**< Disable paging (memory translation) */
106 IOMMU_COMMAND_DISABLE_PAGING = 0x01,
107 /**< Enable stall on page fault */
108 IOMMU_COMMAND_ENABLE_STALL = 0x02,
109 /**< Disable stall on page fault */
110 IOMMU_COMMAND_DISABLE_STALL = 0x03,
111 /**< Zap the entire page table cache */
112 IOMMU_COMMAND_ZAP_CACHE = 0x04,
113 /**< Page fault processed */
114 IOMMU_COMMAND_PAGE_FAULT_DONE = 0x05,
115 /**< Reset the MMU back to power-on settings */
116 IOMMU_COMMAND_HARD_RESET = 0x06
120 * MMU interrupt register bits
121 * Each cause of the interrupt is reported
122 * through the (raw) interrupt status registers.
123 * Multiple interrupts can be pending, so multiple bits
124 * can be set at once.
126 enum iommu_interrupt {
127 IOMMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
128 IOMMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
131 enum iommu_status_bits {
132 IOMMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
133 IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
134 IOMMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
135 IOMMU_STATUS_BIT_IDLE = 1 << 3,
136 IOMMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
137 IOMMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
138 IOMMU_STATUS_BIT_STALL_NOT_ACTIVE = 1 << 31,
142 * Size of an MMU page in bytes
144 #define IOMMU_PAGE_SIZE 0x1000
147 * Size of the address space referenced by a page table page
149 #define IOMMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
152 * Page directory index from address
153 * Calculates the page directory index from the given address
155 #define IOMMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
158 * Page table index from address
159 * Calculates the page table index from the given address
161 #define IOMMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
164 * Extract the memory address from an PDE/PTE entry
166 #define IOMMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
168 #define INVALID_PAGE ((u32)(~0))
170 static struct kmem_cache *lv2table_kmem_cache;
172 static void rockchip_vcodec_select(const char *string)
174 if(strstr(string,"hevc"))
176 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1) |
177 (BIT_VCODEC_SEL) | (BIT_VCODEC_SEL << 16),
178 RK_GRF_VIRT + RK3036_GRF_SOC_CON1);
180 else if(strstr(string,"vpu"))
182 writel_relaxed((readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1) &
183 (~BIT_VCODEC_SEL)) | (BIT_VCODEC_SEL << 16),
184 RK_GRF_VIRT + RK3036_GRF_SOC_CON1);
187 static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
189 return pgtable + lv1ent_offset(iova);
192 static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
194 return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
197 static char *iommu_fault_name[IOMMU_FAULTS_NUM] = {
203 struct rk_iommu_domain {
204 struct list_head clients; /* list of iommu_drvdata.node */
205 unsigned long *pgtable; /* lv1 page table, 4KB */
206 short *lv2entcnt; /* free lv2 entry counter for each section */
207 spinlock_t lock; /* lock for this structure */
208 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
211 static bool set_iommu_active(struct iommu_drvdata *data)
213 /* return true if the IOMMU was not active previously
214 and it needs to be initialized */
215 return ++data->activations == 1;
218 static bool set_iommu_inactive(struct iommu_drvdata *data)
220 /* return true if the IOMMU is needed to be disabled */
221 BUG_ON(data->activations < 1);
222 return --data->activations == 0;
225 static bool is_iommu_active(struct iommu_drvdata *data)
227 return data->activations > 0;
230 static void iommu_disable_stall(void __iomem *base)
233 u32 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
235 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED))
237 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
238 pr_err("Aborting MMU disable stall request since it is in pagefault state.\n");
241 __raw_writel(IOMMU_COMMAND_DISABLE_STALL,
242 base + IOMMU_REGISTER_COMMAND);
244 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
245 u32 status = __raw_readl(base + IOMMU_REGISTER_STATUS);
247 if (0 == (status & IOMMU_STATUS_BIT_STALL_ACTIVE))
249 if (status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
251 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED))
254 if (IOMMU_REG_POLL_COUNT_FAST == i)
255 pr_err("Disable stall request failed, MMU status is 0x%08X\n",
256 __raw_readl(base + IOMMU_REGISTER_STATUS));
259 static bool iommu_enable_stall(void __iomem *base)
263 u32 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
265 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED))
267 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
268 pr_err("Aborting MMU stall request since it is in pagefault state.\n");
271 __raw_writel(IOMMU_COMMAND_ENABLE_STALL,
272 base + IOMMU_REGISTER_COMMAND);
274 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
275 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
276 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
278 if ((mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE) &&
279 (0 == (mmu_status & IOMMU_STATUS_BIT_STALL_NOT_ACTIVE)))
281 if (0 == (mmu_status & (IOMMU_STATUS_BIT_PAGING_ENABLED)))
284 if (IOMMU_REG_POLL_COUNT_FAST == i) {
285 pr_err("Enable stall request failed, MMU status is 0x%08X\n",
286 __raw_readl(base + IOMMU_REGISTER_STATUS));
289 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
290 pr_err("Aborting MMU stall request since it has a pagefault.\n");
296 static bool iommu_enable_paging(void __iomem *base)
300 __raw_writel(IOMMU_COMMAND_ENABLE_PAGING,
301 base + IOMMU_REGISTER_COMMAND);
303 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
304 if (__raw_readl(base + IOMMU_REGISTER_STATUS) &
305 IOMMU_STATUS_BIT_PAGING_ENABLED)
308 if (IOMMU_REG_POLL_COUNT_FAST == i) {
309 pr_err("Enable paging request failed, MMU status is 0x%08X\n",
310 __raw_readl(base + IOMMU_REGISTER_STATUS));
316 static bool iommu_disable_paging(void __iomem *base)
320 __raw_writel(IOMMU_COMMAND_DISABLE_PAGING,
321 base + IOMMU_REGISTER_COMMAND);
323 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
324 if (!(__raw_readl(base + IOMMU_REGISTER_STATUS) &
325 IOMMU_STATUS_BIT_PAGING_ENABLED))
328 if (IOMMU_REG_POLL_COUNT_FAST == i) {
329 pr_err("Disable paging request failed, MMU status is 0x%08X\n",
330 __raw_readl(base + IOMMU_REGISTER_STATUS));
336 static void iommu_page_fault_done(void __iomem *base, const char *dbgname)
338 pr_info("MMU: %s: Leaving page fault mode\n",
340 __raw_writel(IOMMU_COMMAND_PAGE_FAULT_DONE,
341 base + IOMMU_REGISTER_COMMAND);
344 static bool iommu_zap_tlb(void __iomem *base)
346 bool stall_success = iommu_enable_stall(base);
348 __raw_writel(IOMMU_COMMAND_ZAP_CACHE,
349 base + IOMMU_REGISTER_COMMAND);
352 iommu_disable_stall(base);
356 static inline bool iommu_raw_reset(void __iomem *base)
360 __raw_writel(0xCAFEBABE, base + IOMMU_REGISTER_DTE_ADDR);
362 if (!(0xCAFEB000 == __raw_readl(base + IOMMU_REGISTER_DTE_ADDR))) {
363 pr_err("error when %s.\n", __func__);
366 __raw_writel(IOMMU_COMMAND_HARD_RESET,
367 base + IOMMU_REGISTER_COMMAND);
369 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
370 if (__raw_readl(base + IOMMU_REGISTER_DTE_ADDR) == 0)
373 if (IOMMU_REG_POLL_COUNT_FAST == i) {
374 pr_err("%s,Reset request failed, MMU status is 0x%08X\n",
375 __func__, __raw_readl(base + IOMMU_REGISTER_DTE_ADDR));
381 static void __iommu_set_ptbase(void __iomem *base, unsigned long pgd)
383 __raw_writel(pgd, base + IOMMU_REGISTER_DTE_ADDR);
386 static bool iommu_reset(void __iomem *base, const char *dbgname)
390 err = iommu_enable_stall(base);
392 pr_err("%s:stall failed: %s\n", __func__, dbgname);
395 err = iommu_raw_reset(base);
397 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
398 IOMMU_INTERRUPT_READ_BUS_ERROR,
399 base+IOMMU_REGISTER_INT_MASK);
400 iommu_disable_stall(base);
402 pr_err("%s: failed: %s\n", __func__, dbgname);
406 static inline void pgtable_flush(void *vastart, void *vaend)
408 dmac_flush_range(vastart, vaend);
409 outer_flush_range(virt_to_phys(vastart), virt_to_phys(vaend));
412 static void set_fault_handler(struct iommu_drvdata *data,
413 rockchip_iommu_fault_handler_t handler)
417 write_lock_irqsave(&data->lock, flags);
418 data->fault_handler = handler;
419 write_unlock_irqrestore(&data->lock, flags);
422 static int default_fault_handler(struct device *dev,
423 enum rk_iommu_inttype itype,
424 unsigned long pgtable_base,
425 unsigned long fault_addr,
428 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
431 pr_err("%s,iommu device not assigned yet\n", __func__);
434 if ((itype >= IOMMU_FAULTS_NUM) || (itype < IOMMU_PAGEFAULT))
435 itype = IOMMU_FAULT_UNKNOWN;
437 if (itype == IOMMU_BUSERROR)
438 pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",
439 iommu_fault_name[itype], fault_addr, pgtable_base);
441 if (itype == IOMMU_PAGEFAULT)
442 pr_err("IOMMU:Page fault detected at 0x%lx from bus id %d of type %s on %s\n",
444 (status >> 6) & 0x1F,
445 (status & 32) ? "write" : "read",
448 pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
455 static void dump_pagetbl(u32 fault_address, u32 addr_dte)
462 u32 *lv1_entry_value;
467 u32 *lv2_entry_value;
470 lv1_offset = lv1ent_offset(fault_address);
471 lv2_offset = lv2ent_offset(fault_address);
473 lv1_entry_pa = (u32 *)addr_dte + lv1_offset;
474 lv1_entry_va = (u32 *)(__va(addr_dte)) + lv1_offset;
475 lv1_entry_value = (u32 *)(*lv1_entry_va);
477 lv2_base = (u32 *)((*lv1_entry_va) & 0xfffffffe);
478 lv2_entry_pa = (u32 *)lv2_base + lv2_offset;
479 lv2_entry_va = (u32 *)(__va(lv2_base)) + lv2_offset;
480 lv2_entry_value = (u32 *)(*lv2_entry_va);
482 pr_info("fault address = 0x%08x,dte addr pa = 0x%08x,va = 0x%08x\n",
483 fault_address, addr_dte, (u32)__va(addr_dte));
484 pr_info("lv1_offset = 0x%x,lv1_entry_pa = 0x%08x,lv1_entry_va = 0x%08x\n",
485 lv1_offset, (u32)lv1_entry_pa, (u32)lv1_entry_va);
486 pr_info("lv1_entry_value(*lv1_entry_va) = 0x%08x,lv2_base = 0x%08x\n",
487 (u32)lv1_entry_value, (u32)lv2_base);
488 pr_info("lv2_offset = 0x%x,lv2_entry_pa = 0x%08x,lv2_entry_va = 0x%08x\n",
489 lv2_offset, (u32)lv2_entry_pa, (u32)lv2_entry_va);
490 pr_info("lv2_entry value(*lv2_entry_va) = 0x%08x\n",
491 (u32)lv2_entry_value);
494 static irqreturn_t rockchip_iommu_irq(int irq, void *dev_id)
496 /* SYSMMU is in blocked when interrupt occurred. */
497 struct iommu_drvdata *data = dev_id;
498 struct resource *irqres;
499 struct platform_device *pdev;
500 enum rk_iommu_inttype itype = IOMMU_FAULT_UNKNOWN;
507 read_lock(&data->lock);
509 if (!is_iommu_active(data)) {
510 read_unlock(&data->lock);
514 if(cpu_is_312x() || cpu_is_3036())
515 rockchip_vcodec_select(data->dbgname);
517 pdev = to_platform_device(data->iommu);
519 for (i = 0; i < data->num_res_irq; i++) {
520 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
521 if (irqres && ((int)irqres->start == irq))
525 if (i == data->num_res_irq) {
526 itype = IOMMU_FAULT_UNKNOWN;
528 int_status = __raw_readl(data->res_bases[i] +
529 IOMMU_REGISTER_INT_STATUS);
531 if (int_status != 0) {
533 __raw_writel(0x00, data->res_bases[i] +
534 IOMMU_REGISTER_INT_MASK);
536 rawstat = __raw_readl(data->res_bases[i] +
537 IOMMU_REGISTER_INT_RAWSTAT);
539 if (rawstat & IOMMU_INTERRUPT_PAGE_FAULT) {
540 fault_address = __raw_readl(data->res_bases[i] +
541 IOMMU_REGISTER_PAGE_FAULT_ADDR);
542 itype = IOMMU_PAGEFAULT;
543 } else if (rawstat & IOMMU_INTERRUPT_READ_BUS_ERROR) {
544 itype = IOMMU_BUSERROR;
548 dump_pagetbl(fault_address,
549 __raw_readl(data->res_bases[i] +
550 IOMMU_REGISTER_DTE_ADDR));
556 if (data->fault_handler) {
557 unsigned long base = __raw_readl(data->res_bases[i] +
558 IOMMU_REGISTER_DTE_ADDR);
559 status = __raw_readl(data->res_bases[i] +
560 IOMMU_REGISTER_STATUS);
561 ret = data->fault_handler(data->dev, itype, base,
562 fault_address, status);
565 if (!ret && (itype != IOMMU_FAULT_UNKNOWN)) {
566 if (IOMMU_PAGEFAULT == itype) {
567 iommu_zap_tlb(data->res_bases[i]);
568 iommu_page_fault_done(data->res_bases[i],
570 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
571 IOMMU_INTERRUPT_READ_BUS_ERROR,
573 IOMMU_REGISTER_INT_MASK);
576 pr_err("(%s) %s is not handled.\n",
577 data->dbgname, iommu_fault_name[itype]);
581 read_unlock(&data->lock);
586 static bool __rockchip_iommu_disable(struct iommu_drvdata *data)
590 bool disabled = false;
592 write_lock_irqsave(&data->lock, flags);
594 if (!set_iommu_inactive(data))
597 for (i = 0; i < data->num_res_mem; i++)
598 iommu_disable_paging(data->res_bases[i]);
604 write_unlock_irqrestore(&data->lock, flags);
607 pr_info("(%s) Disabled\n", data->dbgname);
609 pr_info("(%s) %d times left to be disabled\n",
610 data->dbgname, data->activations);
615 /* __rk_sysmmu_enable: Enables System MMU
617 * returns -error if an error occurred and System MMU is not enabled,
618 * 0 if the System MMU has been just enabled and 1 if System MMU was already
621 static int __rockchip_iommu_enable(struct iommu_drvdata *data,
622 unsigned long pgtable,
623 struct iommu_domain *domain)
628 write_lock_irqsave(&data->lock, flags);
630 if (!set_iommu_active(data)) {
631 if (WARN_ON(pgtable != data->pgtable)) {
633 set_iommu_inactive(data);
638 pr_info("(%s) Already enabled\n", data->dbgname);
642 data->pgtable = pgtable;
644 for (i = 0; i < data->num_res_mem; i++) {
647 status = iommu_enable_stall(data->res_bases[i]);
649 __iommu_set_ptbase(data->res_bases[i], pgtable);
650 __raw_writel(IOMMU_COMMAND_ZAP_CACHE,
652 IOMMU_REGISTER_COMMAND);
654 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
655 IOMMU_INTERRUPT_READ_BUS_ERROR,
656 data->res_bases[i]+IOMMU_REGISTER_INT_MASK);
657 iommu_enable_paging(data->res_bases[i]);
658 iommu_disable_stall(data->res_bases[i]);
661 data->domain = domain;
663 pr_info("(%s) Enabled\n", data->dbgname);
665 write_unlock_irqrestore(&data->lock, flags);
670 bool rockchip_iommu_disable(struct device *dev)
672 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
675 disabled = __rockchip_iommu_disable(data);
680 void rockchip_iommu_tlb_invalidate(struct device *dev)
683 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
685 read_lock_irqsave(&data->lock, flags);
687 if(cpu_is_312x() || cpu_is_3036())
688 rockchip_vcodec_select(data->dbgname);
690 if (is_iommu_active(data)) {
693 for (i = 0; i < data->num_res_mem; i++) {
694 if (!iommu_zap_tlb(data->res_bases[i]))
695 pr_err("%s,invalidating TLB failed\n",
699 pr_info("(%s) Disabled. Skipping invalidating TLB.\n",
703 read_unlock_irqrestore(&data->lock, flags);
706 static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain,
709 struct rk_iommu_domain *priv = domain->priv;
710 unsigned long *entry;
712 phys_addr_t phys = 0;
714 spin_lock_irqsave(&priv->pgtablelock, flags);
716 entry = section_entry(priv->pgtable, iova);
717 entry = page_entry(entry, iova);
718 phys = spage_phys(entry) + spage_offs(iova);
720 spin_unlock_irqrestore(&priv->pgtablelock, flags);
725 static int lv2set_page(unsigned long *pent, phys_addr_t paddr,
726 size_t size, short *pgcnt)
728 if (!lv2ent_fault(pent))
731 *pent = mk_lv2ent_spage(paddr);
732 pgtable_flush(pent, pent + 1);
737 static unsigned long *alloc_lv2entry(unsigned long *sent,
738 unsigned long iova, short *pgcounter)
740 if (lv1ent_fault(sent)) {
743 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
744 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
748 *sent = mk_lv1ent_page(__pa(pent));
749 kmemleak_ignore(pent);
750 *pgcounter = NUM_LV2ENTRIES;
751 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
752 pgtable_flush(sent, sent + 1);
754 return page_entry(sent, iova);
757 static size_t rockchip_iommu_unmap(struct iommu_domain *domain,
758 unsigned long iova, size_t size)
760 struct rk_iommu_domain *priv = domain->priv;
764 BUG_ON(priv->pgtable == NULL);
766 spin_lock_irqsave(&priv->pgtablelock, flags);
768 ent = section_entry(priv->pgtable, iova);
770 if (unlikely(lv1ent_fault(ent))) {
771 if (size > SPAGE_SIZE)
776 /* lv1ent_page(sent) == true here */
778 ent = page_entry(ent, iova);
780 if (unlikely(lv2ent_fault(ent))) {
787 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
791 /*pr_info("%s:unmap iova 0x%lx/0x%x bytes\n",
792 __func__, iova,size);
794 spin_unlock_irqrestore(&priv->pgtablelock, flags);
799 static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova,
800 phys_addr_t paddr, size_t size, int prot)
802 struct rk_iommu_domain *priv = domain->priv;
803 unsigned long *entry;
808 BUG_ON(priv->pgtable == NULL);
810 spin_lock_irqsave(&priv->pgtablelock, flags);
812 entry = section_entry(priv->pgtable, iova);
814 pent = alloc_lv2entry(entry, iova,
815 &priv->lv2entcnt[lv1ent_offset(iova)]);
819 ret = lv2set_page(pent, paddr, size,
820 &priv->lv2entcnt[lv1ent_offset(iova)]);
823 pr_err("%s: Failed to map iova 0x%lx/0x%x bytes\n", __func__,
826 spin_unlock_irqrestore(&priv->pgtablelock, flags);
831 static void rockchip_iommu_detach_device(struct iommu_domain *domain,
834 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
835 struct rk_iommu_domain *priv = domain->priv;
836 struct list_head *pos;
840 spin_lock_irqsave(&priv->lock, flags);
842 list_for_each(pos, &priv->clients)
844 if (list_entry(pos, struct iommu_drvdata, node) == data) {
852 if(cpu_is_312x() || cpu_is_3036())
853 rockchip_vcodec_select(data->dbgname);
855 if (__rockchip_iommu_disable(data)) {
856 pr_info("%s: Detached IOMMU with pgtable %#lx\n",
857 __func__, __pa(priv->pgtable));
858 list_del(&data->node);
859 INIT_LIST_HEAD(&data->node);
862 pr_info("%s: Detaching IOMMU with pgtable %#lx delayed",
863 __func__, __pa(priv->pgtable));
866 spin_unlock_irqrestore(&priv->lock, flags);
869 static int rockchip_iommu_attach_device(struct iommu_domain *domain,
872 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
873 struct rk_iommu_domain *priv = domain->priv;
877 spin_lock_irqsave(&priv->lock, flags);
879 if(cpu_is_312x() || cpu_is_3036())
880 rockchip_vcodec_select(data->dbgname);
882 ret = __rockchip_iommu_enable(data, __pa(priv->pgtable), domain);
885 /* 'data->node' must not be appeared in priv->clients */
886 BUG_ON(!list_empty(&data->node));
888 list_add_tail(&data->node, &priv->clients);
891 spin_unlock_irqrestore(&priv->lock, flags);
894 pr_err("%s: Failed to attach IOMMU with pgtable %#lx\n",
895 __func__, __pa(priv->pgtable));
896 } else if (ret > 0) {
897 pr_info("%s: IOMMU with pgtable 0x%lx already attached\n",
898 __func__, __pa(priv->pgtable));
900 pr_info("%s: Attached new IOMMU with pgtable 0x%lx\n",
901 __func__, __pa(priv->pgtable));
907 static void rockchip_iommu_domain_destroy(struct iommu_domain *domain)
909 struct rk_iommu_domain *priv = domain->priv;
910 struct iommu_drvdata *data;
914 WARN_ON(!list_empty(&priv->clients));
916 spin_lock_irqsave(&priv->lock, flags);
918 list_for_each_entry(data, &priv->clients, node) {
919 if(cpu_is_312x() || cpu_is_3036())
920 rockchip_vcodec_select(data->dbgname);
921 while (!rockchip_iommu_disable(data->dev))
922 ; /* until System MMU is actually disabled */
924 spin_unlock_irqrestore(&priv->lock, flags);
926 for (i = 0; i < NUM_LV1ENTRIES; i++)
927 if (lv1ent_page(priv->pgtable + i))
928 kmem_cache_free(lv2table_kmem_cache,
929 __va(lv2table_base(priv->pgtable + i)));
931 free_pages((unsigned long)priv->pgtable, 0);
932 free_pages((unsigned long)priv->lv2entcnt, 0);
937 static int rockchip_iommu_domain_init(struct iommu_domain *domain)
939 struct rk_iommu_domain *priv;
941 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
945 /*rk32xx iommu use 2 level pagetable,
946 level1 and leve2 both have 1024 entries,each entry occupy 4 bytes,
947 so alloc a page size for each page table
949 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL |
954 priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL |
956 if (!priv->lv2entcnt)
959 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
961 spin_lock_init(&priv->lock);
962 spin_lock_init(&priv->pgtablelock);
963 INIT_LIST_HEAD(&priv->clients);
969 free_pages((unsigned long)priv->pgtable, 0);
975 static struct iommu_ops rk_iommu_ops = {
976 .domain_init = &rockchip_iommu_domain_init,
977 .domain_destroy = &rockchip_iommu_domain_destroy,
978 .attach_dev = &rockchip_iommu_attach_device,
979 .detach_dev = &rockchip_iommu_detach_device,
980 .map = &rockchip_iommu_map,
981 .unmap = &rockchip_iommu_unmap,
982 .iova_to_phys = &rockchip_iommu_iova_to_phys,
983 .pgsize_bitmap = SPAGE_SIZE,
986 static int rockchip_iommu_prepare(void)
994 lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table",
998 if (!lv2table_kmem_cache) {
999 pr_err("%s: failed to create kmem cache\n", __func__);
1002 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1006 pr_err("%s:failed to set iommu to bus\r\n", __func__);
1010 static int rockchip_get_iommu_resource_num(struct platform_device *pdev,
1013 struct resource *info = NULL;
1014 int num_resources = 0;
1016 /*get resouce info*/
1018 info = platform_get_resource(pdev, type, num_resources);
1023 return num_resources;
1026 static struct kobject *dump_mmu_object;
1028 static int dump_mmu_pagetbl(struct device *dev, struct device_attribute *attr,
1029 const char *buf, u32 count)
1037 ret = kstrtouint(buf, 0, &mmu_base);
1039 pr_info("%s is not in hexdecimal form.\n", buf);
1040 base = ioremap(mmu_base, 0x100);
1041 iommu_dte = __raw_readl(base + IOMMU_REGISTER_DTE_ADDR);
1042 fault_address = __raw_readl(base + IOMMU_REGISTER_PAGE_FAULT_ADDR);
1043 dump_pagetbl(fault_address, iommu_dte);
1047 static DEVICE_ATTR(dump_mmu_pgtable, 0644, NULL, dump_mmu_pagetbl);
1049 void dump_iommu_sysfs_init(void)
1053 dump_mmu_object = kobject_create_and_add("rk_iommu", NULL);
1054 if (dump_mmu_object == NULL)
1056 ret = sysfs_create_file(dump_mmu_object,
1057 &dev_attr_dump_mmu_pgtable.attr);
1060 static int rockchip_iommu_probe(struct platform_device *pdev)
1064 struct iommu_drvdata *data;
1068 ret = rockchip_iommu_prepare();
1070 pr_err("%s,failed\r\n", __func__);
1074 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
1076 dev_dbg(dev, "Not enough memory\n");
1080 dev_set_drvdata(dev, data);
1082 ret = dev_set_drvdata(dev, data);
1085 dev_dbg(dev, "Unabled to initialize driver data\n");
1089 if (pdev->dev.of_node) {
1090 of_property_read_string(pdev->dev.of_node,
1091 "dbgname", &(data->dbgname));
1093 pr_info("dbgname not assigned in device tree or device node not exist\r\n");
1096 pr_info("(%s) Enter\n", data->dbgname);
1098 data->num_res_mem = rockchip_get_iommu_resource_num(pdev,
1100 if (0 == data->num_res_mem) {
1101 pr_err("can't find iommu memory resource \r\n");
1104 pr_info("data->num_res_mem=%d\n", data->num_res_mem);
1105 data->num_res_irq = rockchip_get_iommu_resource_num(pdev,
1107 if (0 == data->num_res_irq) {
1108 pr_err("can't find iommu irq resource \r\n");
1112 data->res_bases = kmalloc_array(data->num_res_mem,
1113 sizeof(*data->res_bases), GFP_KERNEL);
1114 if (data->res_bases == NULL) {
1115 dev_dbg(dev, "Not enough memory\n");
1120 for (i = 0; i < data->num_res_mem; i++) {
1121 struct resource *res;
1123 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1125 pr_err("Unable to find IOMEM region\n");
1129 data->res_bases[i] = ioremap(res->start, resource_size(res));
1130 pr_info("res->start = 0x%08x ioremap to data->res_bases[%d] = 0x%08x\n",
1131 res->start, i, (unsigned int)data->res_bases[i]);
1132 if (!data->res_bases[i]) {
1133 pr_err("Unable to map IOMEM @ PA:%#x\n", res->start);
1138 if(cpu_is_312x() || cpu_is_3036())
1139 rockchip_vcodec_select(data->dbgname);
1141 if (!strstr(data->dbgname, "isp")) {
1142 if (!iommu_reset(data->res_bases[i], data->dbgname)) {
1149 for (i = 0; i < data->num_res_irq; i++) {
1150 ret = platform_get_irq(pdev, i);
1152 pr_err("Unable to find IRQ resource\n");
1155 ret = request_irq(ret, rockchip_iommu_irq,
1156 IRQF_SHARED, dev_name(dev), data);
1158 pr_err("Unabled to register interrupt handler\n");
1162 ret = rockchip_init_iovmm(dev, &data->vmm);
1167 rwlock_init(&data->lock);
1168 INIT_LIST_HEAD(&data->node);
1170 set_fault_handler(data, &default_fault_handler);
1172 pr_info("(%s) Initialized\n", data->dbgname);
1179 irq = platform_get_irq(pdev, i);
1180 free_irq(irq, data);
1183 while (data->num_res_mem-- > 0)
1184 iounmap(data->res_bases[data->num_res_mem]);
1185 kfree(data->res_bases);
1189 dev_err(dev, "Failed to initialize\n");
1194 static const struct of_device_id iommu_dt_ids[] = {
1195 { .compatible = IEP_IOMMU_COMPATIBLE_NAME},
1196 { .compatible = VIP_IOMMU_COMPATIBLE_NAME},
1197 { .compatible = VOPB_IOMMU_COMPATIBLE_NAME},
1198 { .compatible = VOPL_IOMMU_COMPATIBLE_NAME},
1199 { .compatible = HEVC_IOMMU_COMPATIBLE_NAME},
1200 { .compatible = VPU_IOMMU_COMPATIBLE_NAME},
1201 { .compatible = ISP_IOMMU_COMPATIBLE_NAME},
1202 { .compatible = VOP_IOMMU_COMPATIBLE_NAME},
1206 MODULE_DEVICE_TABLE(of, iommu_dt_ids);
1209 static struct platform_driver rk_iommu_driver = {
1210 .probe = rockchip_iommu_probe,
1214 .owner = THIS_MODULE,
1215 .of_match_table = of_match_ptr(iommu_dt_ids),
1219 static int __init rockchip_iommu_init_driver(void)
1221 dump_iommu_sysfs_init();
1223 return platform_driver_register(&rk_iommu_driver);
1226 core_initcall(rockchip_iommu_init_driver);