2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
11 #include <linux/kernel.h>
12 #include <linux/hardirq.h>
13 #include <linux/slab.h>
14 #include <linux/scatterlist.h>
15 #include <linux/err.h>
18 #include <linux/of_platform.h>
20 #include "rockchip-iommu.h"
22 #define IOMMU_REGION_GUARD (2<<PAGE_SHIFT)
24 static struct rk_vm_region *find_region(struct rk_iovmm *vmm, dma_addr_t iova)
26 struct rk_vm_region *region;
28 list_for_each_entry(region, &vmm->regions_list, node)
29 if (region->start == iova)
35 int rockchip_iovmm_invalidate_tlb(struct device *dev)
37 int ret = rockchip_iommu_tlb_invalidate_global(dev);
42 void rockchip_iovmm_set_fault_handler(struct device *dev,
43 rockchip_iommu_fault_handler_t handler)
45 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
47 data->fault_handler = handler;
50 int rockchip_iovmm_activate(struct device *dev)
52 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
54 return iommu_attach_device(vmm->domain, dev);
57 void rockchip_iovmm_deactivate(struct device *dev)
59 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
61 iommu_detach_device(vmm->domain, dev);
64 dma_addr_t rockchip_iovmm_map(struct device *dev,
65 struct scatterlist *sg, off_t offset, size_t size)
68 dma_addr_t addr, start = 0;
69 size_t mapped_size = 0;
70 struct rk_vm_region *region;
71 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
75 for (; sg_dma_len(sg) < offset; sg = sg_next(sg))
76 offset -= sg_dma_len(sg);
78 start_off = offset_in_page(sg_phys(sg) + offset);
79 size = PAGE_ALIGN(size + start_off);
81 order = __fls(min_t(size_t, size, SZ_1M));
83 region = kmalloc(sizeof(*region), GFP_KERNEL);
89 start = (dma_addr_t)gen_pool_alloc(vmm->vmm_pool,
90 size+IOMMU_REGION_GUARD);
96 pr_debug("%s: size = %zx\n", __func__, size);
104 len = sg_dma_len(sg);
106 /* if back to back sg entries are contiguous consolidate them */
107 while (sg_next(sg) && sg_phys(sg) +
108 sg_dma_len(sg) == sg_phys(sg_next(sg))) {
109 len += sg_dma_len(sg_next(sg));
119 if (offset_in_page(phys)) {
120 len += offset_in_page(phys);
121 phys = round_down(phys, PAGE_SIZE);
124 len = PAGE_ALIGN(len);
126 if (len > (size - mapped_size))
127 len = size - mapped_size;
128 pr_debug("addr = %pad, phys = %pa, len = %zx\n", &addr, &phys, len);
129 ret = iommu_map(vmm->domain, addr, phys, len, 0);
135 } while ((sg = sg_next(sg)) && (mapped_size < size));
137 BUG_ON(mapped_size > size);
139 if (mapped_size < size)
142 region->start = start + start_off;
145 INIT_LIST_HEAD(®ion->node);
147 spin_lock(&vmm->lock);
149 list_add(®ion->node, &vmm->regions_list);
151 spin_unlock(&vmm->lock);
153 ret = rockchip_iommu_tlb_invalidate(dev);
155 spin_lock(&vmm->lock);
156 list_del(®ion->node);
157 spin_unlock(&vmm->lock);
160 dev_dbg(dev->archdata.iommu, "IOVMM: Allocated VM region @ %p/%#X bytes.\n",
161 ®ion->start, region->size);
163 return region->start;
166 iommu_unmap(vmm->domain, start, mapped_size);
167 gen_pool_free(vmm->vmm_pool, start, size);
171 dev_err(dev->archdata.iommu, "IOVMM: Failed to allocated VM region for %zx bytes.\n", size);
172 return (dma_addr_t)ret;
175 void rockchip_iovmm_unmap(struct device *dev, dma_addr_t iova)
177 struct rk_vm_region *region;
178 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
179 size_t unmapped_size;
181 /* This function must not be called in IRQ handlers */
184 spin_lock(&vmm->lock);
186 region = find_region(vmm, iova);
187 if (WARN_ON(!region)) {
188 spin_unlock(&vmm->lock);
192 list_del(®ion->node);
194 spin_unlock(&vmm->lock);
196 region->start = round_down(region->start, PAGE_SIZE);
198 unmapped_size = iommu_unmap(vmm->domain,
199 region->start, region->size);
201 rockchip_iommu_tlb_invalidate(dev);
203 gen_pool_free(vmm->vmm_pool, region->start,
204 region->size+IOMMU_REGION_GUARD);
206 WARN_ON(unmapped_size != region->size);
208 dev_dbg(dev->archdata.iommu, "IOVMM: Unmapped %zx bytes from %pad.\n",
209 unmapped_size, ®ion->start);
214 int rockchip_iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size)
216 struct rk_vm_region *region;
217 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
220 if (WARN_ON((phys + size) >= IOVA_START)) {
221 dev_err(dev->archdata.iommu, "Unable to create one to one mapping for %zx @ %pa\n",
226 region = kmalloc(sizeof(*region), GFP_KERNEL);
230 if (WARN_ON(phys & ~PAGE_MASK))
231 phys = round_down(phys, PAGE_SIZE);
234 ret = iommu_map(vmm->domain, (dma_addr_t)phys, phys, size, 0);
240 region->start = (dma_addr_t)phys;
242 INIT_LIST_HEAD(®ion->node);
244 spin_lock(&vmm->lock);
246 list_add(®ion->node, &vmm->regions_list);
248 spin_unlock(&vmm->lock);
250 ret = rockchip_iommu_tlb_invalidate(dev);
257 void rockchip_iovmm_unmap_oto(struct device *dev, phys_addr_t phys)
259 struct rk_vm_region *region;
260 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
261 size_t unmapped_size;
263 /* This function must not be called in IRQ handlers */
266 if (WARN_ON(phys & ~PAGE_MASK))
267 phys = round_down(phys, PAGE_SIZE);
269 spin_lock(&vmm->lock);
271 region = find_region(vmm, (dma_addr_t)phys);
272 if (WARN_ON(!region)) {
273 spin_unlock(&vmm->lock);
277 list_del(®ion->node);
279 spin_unlock(&vmm->lock);
281 unmapped_size = iommu_unmap(vmm->domain, region->start, region->size);
282 WARN_ON(unmapped_size != region->size);
283 dev_dbg(dev->archdata.iommu, "IOVMM: Unmapped %zx bytes from %pad.\n",
284 unmapped_size, ®ion->start);
289 int rockchip_init_iovmm(struct device *iommu, struct rk_iovmm *vmm)
293 vmm->vmm_pool = gen_pool_create(PAGE_SHIFT, -1);
294 if (!vmm->vmm_pool) {
296 goto err_setup_genalloc;
299 /* (1GB - 4KB) addr space from 0x10000000 */
300 ret = gen_pool_add(vmm->vmm_pool, IOVA_START, IOVM_SIZE, -1);
302 goto err_setup_domain;
304 vmm->domain = iommu_domain_alloc(&platform_bus_type);
307 goto err_setup_domain;
310 spin_lock_init(&vmm->lock);
312 INIT_LIST_HEAD(&vmm->regions_list);
314 dev_info(iommu, "IOVMM: Created %#x B IOVMM from %#x.\n",
315 IOVM_SIZE, IOVA_START);
318 gen_pool_destroy(vmm->vmm_pool);
320 dev_err(iommu, "IOVMM: Failed to create IOVMM (%d)\n", ret);