X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=drivers%2Fiommu%2Frockchip-iommu.c;h=c4c2c54a6127992b12c4ff0a269cebeb2076736a;hb=cce453c22d47914ae858ce70f577b9f4e8f2d3ee;hp=a34e459f560d71be090a6ecc03d9f1625fddb0dc;hpb=534c1ca9c257a81dd1b456f9244c1a1bfa0f7af7;p=firefly-linux-kernel-4.4.55.git diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index a34e459f560d..c4c2c54a6127 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -4,11 +4,11 @@ * published by the Free Software Foundation. */ -#include -#include +#include #include #include #include +#include #include #include #include @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -77,9 +78,11 @@ struct rk_iommu_domain { struct list_head iommus; + struct platform_device *pdev; u32 *dt; /* page directory table */ - spinlock_t iommus_lock; /* lock for iommus list */ - spinlock_t dt_lock; /* lock for modifying page directory table */ + dma_addr_t dt_dma; + struct mutex iommus_lock; /* lock for iommus list */ + struct mutex dt_lock; /* lock for modifying page directory table */ struct iommu_domain domain; }; @@ -89,22 +92,19 @@ struct rk_iommu { void __iomem **bases; int num_mmu; int irq; + bool reset_disabled; /* isp iommu reset operation would failed */ struct list_head node; /* entry in rk_iommu_domain.iommus */ struct iommu_domain *domain; /* domain to which iommu is attached */ + struct clk *aclk; /* aclock belong to master */ + struct clk *hclk; /* hclock belong to master */ }; -static inline void rk_table_flush(u32 *va, unsigned int count) +static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma, + unsigned int count) { -#if defined(CONFIG_ARM) - phys_addr_t pa_start = virt_to_phys(va); - phys_addr_t pa_end = virt_to_phys(va + count); - size_t size = pa_end - pa_start; - - __cpuc_flush_dcache_area(va, size); - outer_flush_range(pa_start, pa_end); -#elif defined(CONFIG_ARM64) - __dma_flush_range(va, va + count); -#endif + size_t size = count * sizeof(u32); /* count of u32 entry */ + + dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE); } static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom) @@ -187,10 +187,9 @@ static inline bool rk_dte_is_pt_valid(u32 dte) return dte & RK_DTE_PT_VALID; } -static u32 rk_mk_dte(u32 *pt) +static inline u32 rk_mk_dte(dma_addr_t pt_dma) { - phys_addr_t pt_phys = virt_to_phys(pt); - return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; + return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; } /* @@ -261,6 +260,26 @@ static u32 rk_mk_pte_invalid(u32 pte) #define RK_IOVA_PAGE_MASK 0x00000fff #define RK_IOVA_PAGE_SHIFT 0 +static void rk_iommu_power_on(struct rk_iommu *iommu) +{ + if (iommu->aclk && iommu->hclk) { + clk_enable(iommu->aclk); + clk_enable(iommu->hclk); + } + + pm_runtime_get_sync(iommu->dev); +} + +static void rk_iommu_power_off(struct rk_iommu *iommu) +{ + pm_runtime_put_sync(iommu->dev); + + if (iommu->aclk && iommu->hclk) { + clk_disable(iommu->aclk); + clk_disable(iommu->hclk); + } +} + static u32 rk_iova_dte_index(dma_addr_t iova) { return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; @@ -298,39 +317,48 @@ static void rk_iommu_base_command(void __iomem *base, u32 command) { writel(command, base + RK_MMU_COMMAND); } -static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, +static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, size_t size) { int i; - - dma_addr_t iova_end = iova + size; + dma_addr_t iova_end = iova_start + size; /* * TODO(djkurtz): Figure out when it is more efficient to shootdown the * entire iotlb rather than iterate over individual iovas. */ - for (i = 0; i < iommu->num_mmu; i++) - for (; iova < iova_end; iova += SPAGE_SIZE) + + rk_iommu_power_on(iommu); + + for (i = 0; i < iommu->num_mmu; i++) { + dma_addr_t iova; + + for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE) rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); + } + + rk_iommu_power_off(iommu); } static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) { - u32 active = RK_MMU_STATUS_STALL_ACTIVE; + bool active = true; int i; for (i = 0; i < iommu->num_mmu; i++) - active &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS); + active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & + RK_MMU_STATUS_STALL_ACTIVE); return active; } static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) { - u32 enable = RK_MMU_STATUS_PAGING_ENABLED; + bool enable = true; int i; for (i = 0; i < iommu->num_mmu; i++) - enable &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS); + enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & + RK_MMU_STATUS_PAGING_ENABLED); return enable; } @@ -416,6 +444,10 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu) int ret, i; u32 dte_addr; + /* Workaround for isp mmus */ + if (iommu->reset_disabled) + return 0; + /* * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY * and verifying that upper 5 nybbles are read back. @@ -553,12 +585,11 @@ static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; phys_addr_t pt_phys, phys = 0; u32 dte, pte; u32 *page_table; - spin_lock_irqsave(&rk_domain->dt_lock, flags); + mutex_lock(&rk_domain->dt_lock); dte = rk_domain->dt[rk_iova_dte_index(iova)]; if (!rk_dte_is_pt_valid(dte)) @@ -572,7 +603,7 @@ static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); out: - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + mutex_unlock(&rk_domain->dt_lock); return phys; } @@ -581,16 +612,15 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, dma_addr_t iova, size_t size) { struct list_head *pos; - unsigned long flags; /* shootdown these iova from all iommus using this domain */ - spin_lock_irqsave(&rk_domain->iommus_lock, flags); + mutex_lock(&rk_domain->iommus_lock); list_for_each(pos, &rk_domain->iommus) { struct rk_iommu *iommu; iommu = list_entry(pos, struct rk_iommu, node); rk_iommu_zap_lines(iommu, iova, size); } - spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + mutex_unlock(&rk_domain->iommus_lock); } static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, @@ -605,13 +635,16 @@ static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, dma_addr_t iova) { + struct device *dev = &rk_domain->pdev->dev; u32 *page_table, *dte_addr; - u32 dte; + u32 dte_index, dte; phys_addr_t pt_phys; + dma_addr_t pt_dma; - assert_spin_locked(&rk_domain->dt_lock); + WARN_ON(!mutex_is_locked(&rk_domain->dt_lock)); - dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)]; + dte_index = rk_iova_dte_index(iova); + dte_addr = &rk_domain->dt[dte_index]; dte = *dte_addr; if (rk_dte_is_pt_valid(dte)) goto done; @@ -620,24 +653,32 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, if (!page_table) return ERR_PTR(-ENOMEM); - dte = rk_mk_dte(page_table); - *dte_addr = dte; + pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pt_dma)) { + dev_err(dev, "DMA mapping error while allocating page table\n"); + free_page((unsigned long)page_table); + return ERR_PTR(-ENOMEM); + } - rk_table_flush(page_table, NUM_PT_ENTRIES); - rk_table_flush(dte_addr, 1); + dte = rk_mk_dte(pt_dma); + *dte_addr = dte; + rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES); + rk_table_flush(rk_domain, + rk_domain->dt_dma + dte_index * sizeof(u32), 1); done: pt_phys = rk_dte_pt_address(dte); return (u32 *)phys_to_virt(pt_phys); } static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, - u32 *pte_addr, dma_addr_t iova, size_t size) + u32 *pte_addr, dma_addr_t pte_dma, + size_t size) { unsigned int pte_count; unsigned int pte_total = size / SPAGE_SIZE; - assert_spin_locked(&rk_domain->dt_lock); + WARN_ON(!mutex_is_locked(&rk_domain->dt_lock)); for (pte_count = 0; pte_count < pte_total; pte_count++) { u32 pte = pte_addr[pte_count]; @@ -647,20 +688,20 @@ static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, pte_addr[pte_count] = rk_mk_pte_invalid(pte); } - rk_table_flush(pte_addr, pte_count); + rk_table_flush(rk_domain, pte_dma, pte_count); return pte_count * SPAGE_SIZE; } static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, - dma_addr_t iova, phys_addr_t paddr, size_t size, - int prot) + dma_addr_t pte_dma, dma_addr_t iova, + phys_addr_t paddr, size_t size, int prot) { unsigned int pte_count; unsigned int pte_total = size / SPAGE_SIZE; phys_addr_t page_phys; - assert_spin_locked(&rk_domain->dt_lock); + WARN_ON(!mutex_is_locked(&rk_domain->dt_lock)); for (pte_count = 0; pte_count < pte_total; pte_count++) { u32 pte = pte_addr[pte_count]; @@ -673,7 +714,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, paddr += SPAGE_SIZE; } - rk_table_flush(pte_addr, pte_count); + rk_table_flush(rk_domain, pte_dma, pte_total); /* * Zap the first and last iova to evict from iotlb any previously @@ -686,7 +727,8 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, return 0; unwind: /* Unmap the range of iovas that we just mapped */ - rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE); + rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, + pte_count * SPAGE_SIZE); iova += pte_count * SPAGE_SIZE; page_phys = rk_pte_page_address(pte_addr[pte_count]); @@ -700,12 +742,12 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, phys_addr_t paddr, size_t size, int prot) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; - dma_addr_t iova = (dma_addr_t)_iova; + dma_addr_t pte_dma, iova = (dma_addr_t)_iova; u32 *page_table, *pte_addr; + u32 dte_index, pte_index; int ret; - spin_lock_irqsave(&rk_domain->dt_lock, flags); + mutex_lock(&rk_domain->dt_lock); /* * pgsize_bitmap specifies iova sizes that fit in one page table @@ -716,13 +758,18 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, */ page_table = rk_dte_get_page_table(rk_domain, iova); if (IS_ERR(page_table)) { - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + mutex_unlock(&rk_domain->dt_lock); return PTR_ERR(page_table); } - pte_addr = &page_table[rk_iova_pte_index(iova)]; - ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot); - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + dte_index = rk_domain->dt[rk_iova_dte_index(iova)]; + pte_index = rk_iova_pte_index(iova); + pte_addr = &page_table[pte_index]; + pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32); + ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, + paddr, size, prot); + + mutex_unlock(&rk_domain->dt_lock); return ret; } @@ -731,14 +778,13 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, size_t size) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; - dma_addr_t iova = (dma_addr_t)_iova; + dma_addr_t pte_dma, iova = (dma_addr_t)_iova; phys_addr_t pt_phys; u32 dte; u32 *pte_addr; size_t unmap_size; - spin_lock_irqsave(&rk_domain->dt_lock, flags); + mutex_lock(&rk_domain->dt_lock); /* * pgsize_bitmap specifies iova sizes that fit in one page table @@ -750,15 +796,16 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, dte = rk_domain->dt[rk_iova_dte_index(iova)]; /* Just return 0 if iova is unmapped */ if (!rk_dte_is_pt_valid(dte)) { - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + mutex_unlock(&rk_domain->dt_lock); return 0; } pt_phys = rk_dte_pt_address(dte); pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); - unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size); + pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); + unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + mutex_unlock(&rk_domain->dt_lock); /* Shootdown iotlb entries for iova range that was just unmapped */ rk_iommu_zap_iova(rk_domain, iova, unmap_size); @@ -792,9 +839,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, { struct rk_iommu *iommu; struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; int ret, i; - phys_addr_t dte_addr; /* * Allow 'virtual devices' (e.g., drm) to attach to domain. @@ -804,6 +849,8 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, if (!iommu) return 0; + rk_iommu_power_on(iommu); + ret = rk_iommu_enable_stall(iommu); if (ret) return ret; @@ -819,10 +866,10 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, if (ret) return ret; - dte_addr = virt_to_phys(rk_domain->dt); for (i = 0; i < iommu->num_mmu; i++) { - rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr); - rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); + rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, + rk_domain->dt_dma); + rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); } @@ -830,9 +877,9 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, if (ret) return ret; - spin_lock_irqsave(&rk_domain->iommus_lock, flags); + mutex_lock(&rk_domain->iommus_lock); list_add_tail(&iommu->node, &rk_domain->iommus); - spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + mutex_unlock(&rk_domain->iommus_lock); dev_dbg(dev, "Attached to iommu domain\n"); @@ -846,7 +893,6 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, { struct rk_iommu *iommu; struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; int i; /* Allow 'virtual devices' (eg drm) to detach from domain */ @@ -854,9 +900,9 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, if (!iommu) return; - spin_lock_irqsave(&rk_domain->iommus_lock, flags); + mutex_lock(&rk_domain->iommus_lock); list_del_init(&iommu->node); - spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + mutex_unlock(&rk_domain->iommus_lock); /* Ignore error while disabling, just keep going */ rk_iommu_enable_stall(iommu); @@ -871,20 +917,38 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, iommu->domain = NULL; + rk_iommu_power_off(iommu); + dev_dbg(dev, "Detached from iommu domain\n"); } static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) { struct rk_iommu_domain *rk_domain; + struct platform_device *pdev; + struct device *iommu_dev; - if (type != IOMMU_DOMAIN_UNMANAGED) + if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) return NULL; - rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); - if (!rk_domain) + /* Register a pdev per domain, so DMA API can base on this *dev + * even some virtual master doesn't have an iommu slave + */ + pdev = platform_device_register_simple("rk_iommu_domain", + PLATFORM_DEVID_AUTO, NULL, 0); + if (IS_ERR(pdev)) return NULL; + rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL); + if (!rk_domain) + goto err_unreg_pdev; + + rk_domain->pdev = pdev; + + if (type == IOMMU_DOMAIN_DMA && + iommu_get_dma_cookie(&rk_domain->domain)) + goto err_unreg_pdev; + /* * rk32xx iommus use a 2 level pagetable. * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. @@ -892,18 +956,36 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) */ rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); if (!rk_domain->dt) - goto err_dt; + goto err_put_cookie; + + iommu_dev = &pdev->dev; + rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt, + SPAGE_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) { + dev_err(iommu_dev, "DMA map error for DT\n"); + goto err_free_dt; + } - rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES); + rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES); - spin_lock_init(&rk_domain->iommus_lock); - spin_lock_init(&rk_domain->dt_lock); + mutex_init(&rk_domain->iommus_lock); + mutex_init(&rk_domain->dt_lock); INIT_LIST_HEAD(&rk_domain->iommus); + rk_domain->domain.geometry.aperture_start = 0; + rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); + rk_domain->domain.geometry.force_aperture = true; + return &rk_domain->domain; -err_dt: - kfree(rk_domain); +err_free_dt: + free_page((unsigned long)rk_domain->dt); +err_put_cookie: + if (type == IOMMU_DOMAIN_DMA) + iommu_put_dma_cookie(&rk_domain->domain); +err_unreg_pdev: + platform_device_unregister(pdev); + return NULL; } @@ -919,12 +1001,20 @@ static void rk_iommu_domain_free(struct iommu_domain *domain) if (rk_dte_is_pt_valid(dte)) { phys_addr_t pt_phys = rk_dte_pt_address(dte); u32 *page_table = phys_to_virt(pt_phys); + dma_unmap_single(&rk_domain->pdev->dev, pt_phys, + SPAGE_SIZE, DMA_TO_DEVICE); free_page((unsigned long)page_table); } } + dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma, + SPAGE_SIZE, DMA_TO_DEVICE); free_page((unsigned long)rk_domain->dt); - kfree(rk_domain); + + if (domain->type == IOMMU_DOMAIN_DMA) + iommu_put_dma_cookie(&rk_domain->domain); + + platform_device_unregister(rk_domain->pdev); } static bool rk_iommu_is_dev_iommu_master(struct device *dev) @@ -1036,11 +1126,36 @@ static const struct iommu_ops rk_iommu_ops = { .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, }; +static int rk_iommu_domain_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); + if (!dev->dma_parms) + return -ENOMEM; + + /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */ + arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false); + + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + + return 0; +} + +static struct platform_driver rk_iommu_domain_driver = { + .probe = rk_iommu_domain_probe, + .driver = { + .name = "rk_iommu_domain", + }, +}; + static int rk_iommu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rk_iommu *iommu; struct resource *res; + int num_res = pdev->num_resources; int i; iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); @@ -1050,16 +1165,19 @@ static int rk_iommu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, iommu); iommu->dev = dev; iommu->num_mmu = 0; - iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * iommu->num_mmu, + + iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res, GFP_KERNEL); if (!iommu->bases) return -ENOMEM; - for (i = 0; i < pdev->num_resources; i++) { + for (i = 0; i < num_res; i++) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) continue; iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(iommu->bases[i])) + continue; iommu->num_mmu++; } if (iommu->num_mmu == 0) @@ -1071,11 +1189,35 @@ static int rk_iommu_probe(struct platform_device *pdev) return -ENXIO; } + iommu->reset_disabled = device_property_read_bool(dev, + "rk_iommu,disable_reset_quirk"); + + iommu->aclk = devm_clk_get(dev, "aclk"); + if (IS_ERR(iommu->aclk)) { + dev_info(dev, "can't get aclk\n"); + iommu->aclk = NULL; + } + + iommu->hclk = devm_clk_get(dev, "hclk"); + if (IS_ERR(iommu->hclk)) { + dev_info(dev, "can't get hclk\n"); + iommu->hclk = NULL; + } + + if (iommu->aclk && iommu->hclk) { + clk_prepare(iommu->aclk); + clk_prepare(iommu->hclk); + } + + pm_runtime_enable(dev); + return 0; } static int rk_iommu_remove(struct platform_device *pdev) { + pm_runtime_disable(&pdev->dev); + return 0; } @@ -1109,11 +1251,19 @@ static int __init rk_iommu_init(void) if (ret) return ret; - return platform_driver_register(&rk_iommu_driver); + ret = platform_driver_register(&rk_iommu_domain_driver); + if (ret) + return ret; + + ret = platform_driver_register(&rk_iommu_driver); + if (ret) + platform_driver_unregister(&rk_iommu_domain_driver); + return ret; } static void __exit rk_iommu_exit(void) { platform_driver_unregister(&rk_iommu_driver); + platform_driver_unregister(&rk_iommu_domain_driver); } subsys_initcall(rk_iommu_init);