2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
11 #include <linux/kernel.h>
12 #include <linux/hardirq.h>
13 #include <linux/slab.h>
14 #include <linux/scatterlist.h>
15 #include <linux/err.h>
18 #include <linux/of_platform.h>
20 #include "rockchip-iommu.h"
22 static struct rk_vm_region *find_region(struct rk_iovmm *vmm, dma_addr_t iova)
24 struct rk_vm_region *region;
26 list_for_each_entry(region, &vmm->regions_list, node)
27 if (region->start == iova)
33 void rockchip_iovmm_set_fault_handler(struct device *dev,
34 rockchip_iommu_fault_handler_t handler)
37 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
39 write_lock_irqsave(&data->lock, flags);
40 data->fault_handler = handler;
41 write_unlock_irqrestore(&data->lock, flags);
44 int rockchip_iovmm_activate(struct device *dev)
46 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
48 return iommu_attach_device(vmm->domain, dev);
51 void rockchip_iovmm_deactivate(struct device *dev)
53 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
55 iommu_detach_device(vmm->domain, dev);
58 dma_addr_t rockchip_iovmm_map(struct device *dev,
59 struct scatterlist *sg, off_t offset, size_t size)
62 dma_addr_t addr, start = 0;
63 size_t mapped_size = 0;
64 struct rk_vm_region *region;
65 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
69 for (; sg_dma_len(sg) < offset; sg = sg_next(sg))
70 offset -= sg_dma_len(sg);
72 start_off = offset_in_page(sg_phys(sg) + offset);
73 size = PAGE_ALIGN(size + start_off);
75 order = __fls(min_t(size_t, size, SZ_1M));
77 region = kmalloc(sizeof(*region), GFP_KERNEL);
83 start = (dma_addr_t)gen_pool_alloc(vmm->vmm_pool, size);
97 /* if back to back sg entries are contiguous consolidate them */
98 while (sg_next(sg) && sg_phys(sg) +
99 sg_dma_len(sg) == sg_phys(sg_next(sg))) {
100 len += sg_dma_len(sg_next(sg));
110 if (offset_in_page(phys)) {
111 len += offset_in_page(phys);
112 phys = round_down(phys, PAGE_SIZE);
115 len = PAGE_ALIGN(len);
117 if (len > (size - mapped_size))
118 len = size - mapped_size;
120 ret = iommu_map(vmm->domain, addr, phys, len, 0);
126 } while ((sg = sg_next(sg)) && (mapped_size < size));
128 BUG_ON(mapped_size > size);
130 if (mapped_size < size)
133 region->start = start + start_off;
136 INIT_LIST_HEAD(®ion->node);
138 spin_lock(&vmm->lock);
140 list_add(®ion->node, &vmm->regions_list);
142 spin_unlock(&vmm->lock);
144 rockchip_iommu_tlb_invalidate(dev);
146 pr_err("IOVMM: Allocated VM region @ %#x/%#X bytes.\n",
147 region->start, region->size);
149 return region->start;
152 iommu_unmap(vmm->domain, start, mapped_size);
153 gen_pool_free(vmm->vmm_pool, start, size);
157 pr_err("IOVMM: Failed to allocated VM region for %#x bytes.\n", size);
158 return (dma_addr_t)ret;
161 void rockchip_iovmm_unmap(struct device *dev, dma_addr_t iova)
163 struct rk_vm_region *region;
164 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
165 size_t unmapped_size;
167 /* This function must not be called in IRQ handlers */
170 spin_lock(&vmm->lock);
172 region = find_region(vmm, iova);
173 if (WARN_ON(!region)) {
174 spin_unlock(&vmm->lock);
178 list_del(®ion->node);
180 spin_unlock(&vmm->lock);
182 region->start = round_down(region->start, PAGE_SIZE);
184 unmapped_size = iommu_unmap(vmm->domain,
185 region->start, region->size);
187 rockchip_iommu_tlb_invalidate(dev);
189 gen_pool_free(vmm->vmm_pool, region->start, region->size);
191 WARN_ON(unmapped_size != region->size);
193 pr_err("IOVMM: Unmapped %#x bytes from %#x.\n",
194 unmapped_size, region->start);
199 int rockchip_iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size)
201 struct rk_vm_region *region;
202 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
205 if (WARN_ON((phys + size) >= IOVA_START)) {
206 pr_err("Unable to create one to one mapping for %#x @ %#x\n",
211 region = kmalloc(sizeof(*region), GFP_KERNEL);
215 if (WARN_ON(phys & ~PAGE_MASK))
216 phys = round_down(phys, PAGE_SIZE);
219 ret = iommu_map(vmm->domain, (dma_addr_t)phys, phys, size, 0);
225 region->start = (dma_addr_t)phys;
227 INIT_LIST_HEAD(®ion->node);
229 spin_lock(&vmm->lock);
231 list_add(®ion->node, &vmm->regions_list);
233 spin_unlock(&vmm->lock);
235 rockchip_iommu_tlb_invalidate(dev);
240 void rockchip_iovmm_unmap_oto(struct device *dev, phys_addr_t phys)
242 struct rk_vm_region *region;
243 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
244 size_t unmapped_size;
246 /* This function must not be called in IRQ handlers */
249 if (WARN_ON(phys & ~PAGE_MASK))
250 phys = round_down(phys, PAGE_SIZE);
252 spin_lock(&vmm->lock);
254 region = find_region(vmm, (dma_addr_t)phys);
255 if (WARN_ON(!region)) {
256 spin_unlock(&vmm->lock);
260 list_del(®ion->node);
262 spin_unlock(&vmm->lock);
264 unmapped_size = iommu_unmap(vmm->domain, region->start, region->size);
265 rockchip_iommu_tlb_invalidate(dev);
266 WARN_ON(unmapped_size != region->size);
267 pr_err("IOVMM: Unmapped %#x bytes from %#x.\n",
268 unmapped_size, region->start);
273 int rockchip_init_iovmm(struct device *iommu, struct rk_iovmm *vmm)
277 vmm->vmm_pool = gen_pool_create(PAGE_SHIFT, -1);
278 if (!vmm->vmm_pool) {
280 goto err_setup_genalloc;
283 /* (1GB - 4KB) addr space from 0x10000000 */
284 ret = gen_pool_add(vmm->vmm_pool, IOVA_START, IOVM_SIZE, -1);
286 goto err_setup_domain;
288 vmm->domain = iommu_domain_alloc(&platform_bus_type);
291 goto err_setup_domain;
294 spin_lock_init(&vmm->lock);
296 INIT_LIST_HEAD(&vmm->regions_list);
298 pr_info("IOVMM: Created %#x B IOVMM from %#x.\n",
299 IOVM_SIZE, IOVA_START);
302 gen_pool_destroy(vmm->vmm_pool);
304 pr_err("IOVMM: Failed to create IOVMM (%d)\n", ret);