2 * omap iommu: simple virtual address space management
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/device.h>
18 #include <linux/scatterlist.h>
19 #include <linux/iommu.h>
21 #include <asm/cacheflush.h>
22 #include <asm/mach/map.h>
24 #include <plat/iommu.h>
25 #include <plat/iovmm.h>
27 #include <plat/iopgtable.h>
29 static struct kmem_cache *iovm_area_cachep;
31 /* return the offset of the first scatterlist entry in a sg table */
32 static unsigned int sgtable_offset(const struct sg_table *sgt)
34 if (!sgt || !sgt->nents)
37 return sgt->sgl->offset;
40 /* return total bytes of sg buffers */
41 static size_t sgtable_len(const struct sg_table *sgt)
43 unsigned int i, total = 0;
44 struct scatterlist *sg;
49 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
52 bytes = sg->length + sg->offset;
54 if (!iopgsz_ok(bytes)) {
55 pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
56 __func__, i, bytes, sg->offset);
60 if (i && sg->offset) {
61 pr_err("%s: sg[%d] offset not allowed in internal "
62 "entries\n", __func__, i);
71 #define sgtable_ok(x) (!!sgtable_len(x))
73 static unsigned max_alignment(u32 addr)
76 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
77 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
79 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
83 * calculate the optimal number sg elements from total bytes based on
86 static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
88 unsigned nr_entries = 0, ent_sz;
90 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
91 pr_err("%s: wrong size %08x\n", __func__, bytes);
96 ent_sz = max_alignment(da | pa);
97 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
107 /* allocate and initialize sg_table header(a kind of 'superblock') */
108 static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
111 unsigned int nr_entries;
113 struct sg_table *sgt;
116 return ERR_PTR(-EINVAL);
118 if (!IS_ALIGNED(bytes, PAGE_SIZE))
119 return ERR_PTR(-EINVAL);
121 if (flags & IOVMF_LINEAR) {
122 nr_entries = sgtable_nents(bytes, da, pa);
124 return ERR_PTR(-EINVAL);
126 nr_entries = bytes / PAGE_SIZE;
128 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
130 return ERR_PTR(-ENOMEM);
132 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
138 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
143 /* free sg_table header(a kind of superblock) */
144 static void sgtable_free(struct sg_table *sgt)
152 pr_debug("%s: sgt:%p\n", __func__, sgt);
155 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
156 static void *vmap_sg(const struct sg_table *sgt)
161 struct scatterlist *sg;
162 struct vm_struct *new;
163 const struct mem_type *mtype;
165 mtype = get_mem_type(MT_DEVICE);
167 return ERR_PTR(-EINVAL);
169 total = sgtable_len(sgt);
171 return ERR_PTR(-EINVAL);
173 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
175 return ERR_PTR(-ENOMEM);
178 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
183 pa = sg_phys(sg) - sg->offset;
184 bytes = sg->length + sg->offset;
186 BUG_ON(bytes != PAGE_SIZE);
188 err = ioremap_page(va, pa, mtype);
195 flush_cache_vmap((unsigned long)new->addr,
196 (unsigned long)(new->addr + total));
200 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
202 return ERR_PTR(-EAGAIN);
205 static inline void vunmap_sg(const void *va)
210 static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
213 struct iovm_struct *tmp;
215 list_for_each_entry(tmp, &obj->mmap, list) {
216 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
219 len = tmp->da_end - tmp->da_start;
221 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
222 __func__, tmp->da_start, da, tmp->da_end, len,
233 * omap_find_iovm_area - find iovma which includes @da
234 * @da: iommu device virtual address
236 * Find the existing iovma starting at @da
238 struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
240 struct iovm_struct *area;
242 mutex_lock(&obj->mmap_lock);
243 area = __find_iovm_area(obj, da);
244 mutex_unlock(&obj->mmap_lock);
248 EXPORT_SYMBOL_GPL(omap_find_iovm_area);
251 * This finds the hole(area) which fits the requested address and len
252 * in iovmas mmap, and returns the new allocated iovma.
254 static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
255 size_t bytes, u32 flags)
257 struct iovm_struct *new, *tmp;
258 u32 start, prev_end, alignment;
261 return ERR_PTR(-EINVAL);
264 alignment = PAGE_SIZE;
266 if (~flags & IOVMF_DA_FIXED) {
267 /* Don't map address 0 */
268 start = obj->da_start ? obj->da_start : alignment;
270 if (flags & IOVMF_LINEAR)
271 alignment = iopgsz_max(bytes);
272 start = roundup(start, alignment);
273 } else if (start < obj->da_start || start > obj->da_end ||
274 obj->da_end - start < bytes) {
275 return ERR_PTR(-EINVAL);
279 if (list_empty(&obj->mmap))
283 list_for_each_entry(tmp, &obj->mmap, list) {
285 if (prev_end > start)
288 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
291 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
292 start = roundup(tmp->da_end + 1, alignment);
294 prev_end = tmp->da_end;
297 if ((start >= prev_end) && (obj->da_end - start >= bytes))
300 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
301 __func__, da, bytes, flags);
303 return ERR_PTR(-EINVAL);
306 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
308 return ERR_PTR(-ENOMEM);
311 new->da_start = start;
312 new->da_end = start + bytes;
316 * keep ascending order of iovmas
319 list_add_tail(&new->list, &tmp->list);
321 list_add(&new->list, &obj->mmap);
323 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
324 __func__, new->da_start, start, new->da_end, bytes, flags);
329 static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
333 BUG_ON(!obj || !area);
335 bytes = area->da_end - area->da_start;
337 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
338 __func__, area->da_start, area->da_end, bytes, area->flags);
340 list_del(&area->list);
341 kmem_cache_free(iovm_area_cachep, area);
345 * omap_da_to_va - convert (d) to (v)
346 * @obj: objective iommu
347 * @da: iommu device virtual address
348 * @va: mpu virtual address
350 * Returns mpu virtual addr which corresponds to a given device virtual addr
352 void *omap_da_to_va(struct omap_iommu *obj, u32 da)
355 struct iovm_struct *area;
357 mutex_lock(&obj->mmap_lock);
359 area = __find_iovm_area(obj, da);
361 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
366 mutex_unlock(&obj->mmap_lock);
370 EXPORT_SYMBOL_GPL(omap_da_to_va);
372 static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
375 struct scatterlist *sg;
379 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
381 const size_t bytes = PAGE_SIZE;
384 * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
386 pg = vmalloc_to_page(va);
388 sg_set_page(sg, pg, bytes, 0);
393 va_end = _va + PAGE_SIZE * i;
396 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
399 * Actually this is not necessary at all, just exists for
400 * consistency of the code readability.
405 /* create 'da' <-> 'pa' mapping from 'sgt' */
406 static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
407 const struct sg_table *sgt, u32 flags)
411 struct scatterlist *sg;
412 u32 da = new->da_start;
417 BUG_ON(!sgtable_ok(sgt));
419 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
423 pa = sg_phys(sg) - sg->offset;
424 bytes = sg->length + sg->offset;
426 flags &= ~IOVMF_PGSZ_MASK;
428 if (bytes_to_iopgsz(bytes) < 0)
431 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
434 err = iommu_map(domain, da, pa, bytes, flags);
445 for_each_sg(sgt->sgl, sg, i, j) {
448 bytes = sg->length + sg->offset;
450 /* ignore failures.. we're already handling one */
451 iommu_unmap(domain, da, bytes);
458 /* release 'da' <-> 'pa' mapping */
459 static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
460 struct iovm_struct *area)
463 size_t total = area->da_end - area->da_start;
464 const struct sg_table *sgt = area->sgt;
465 struct scatterlist *sg;
469 BUG_ON(!sgtable_ok(sgt));
470 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
472 start = area->da_start;
473 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
476 bytes = sg->length + sg->offset;
478 unmapped = iommu_unmap(domain, start, bytes);
479 if (unmapped < bytes)
482 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
483 __func__, start, bytes, area->flags);
485 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
493 /* template function for all unmapping */
494 static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
495 struct omap_iommu *obj, const u32 da,
496 void (*fn)(const void *), u32 flags)
498 struct sg_table *sgt = NULL;
499 struct iovm_struct *area;
501 if (!IS_ALIGNED(da, PAGE_SIZE)) {
502 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
506 mutex_lock(&obj->mmap_lock);
508 area = __find_iovm_area(obj, da);
510 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
514 if ((area->flags & flags) != flags) {
515 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
519 sgt = (struct sg_table *)area->sgt;
521 unmap_iovm_area(domain, obj, area);
525 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
526 area->da_start, da, area->da_end,
527 area->da_end - area->da_start, area->flags);
529 free_iovm_area(obj, area);
531 mutex_unlock(&obj->mmap_lock);
536 static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
537 u32 da, const struct sg_table *sgt, void *va,
538 size_t bytes, u32 flags)
541 struct iovm_struct *new;
543 mutex_lock(&obj->mmap_lock);
545 new = alloc_iovm_area(obj, da, bytes, flags);
548 goto err_alloc_iovma;
553 if (map_iovm_area(domain, new, sgt, new->flags))
556 mutex_unlock(&obj->mmap_lock);
558 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
559 __func__, new->da_start, bytes, new->flags, va);
561 return new->da_start;
564 free_iovm_area(obj, new);
566 mutex_unlock(&obj->mmap_lock);
571 __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
572 u32 da, const struct sg_table *sgt,
573 void *va, size_t bytes, u32 flags)
575 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
579 * omap_iommu_vmap - (d)-(p)-(v) address mapper
580 * @obj: objective iommu
581 * @sgt: address of scatter gather table
582 * @flags: iovma and page property
584 * Creates 1-n-1 mapping with given @sgt and returns @da.
585 * All @sgt element must be io page size aligned.
587 u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
588 const struct sg_table *sgt, u32 flags)
593 if (!obj || !obj->dev || !sgt)
596 bytes = sgtable_len(sgt);
599 bytes = PAGE_ALIGN(bytes);
601 if (flags & IOVMF_MMIO) {
607 flags |= IOVMF_DISCONT;
610 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
611 if (IS_ERR_VALUE(da))
614 return da + sgtable_offset(sgt);
616 EXPORT_SYMBOL_GPL(omap_iommu_vmap);
619 * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
620 * @obj: objective iommu
621 * @da: iommu device virtual address
623 * Free the iommu virtually contiguous memory area starting at
624 * @da, which was returned by 'omap_iommu_vmap()'.
627 omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
629 struct sg_table *sgt;
631 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
632 * Just returns 'sgt' to the caller to free
635 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
636 IOVMF_DISCONT | IOVMF_MMIO);
638 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
641 EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
644 * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
645 * @obj: objective iommu
646 * @da: contiguous iommu virtual memory
647 * @bytes: allocation size
648 * @flags: iovma and page property
650 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
651 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
654 omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
655 size_t bytes, u32 flags)
658 struct sg_table *sgt;
660 if (!obj || !obj->dev || !bytes)
663 bytes = PAGE_ALIGN(bytes);
669 flags |= IOVMF_DISCONT;
670 flags |= IOVMF_ALLOC;
672 sgt = sgtable_alloc(bytes, flags, da, 0);
677 sgtable_fill_vmalloc(sgt, va);
679 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
680 if (IS_ERR_VALUE(da))
686 sgtable_drain_vmalloc(sgt);
692 EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
695 * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
696 * @obj: objective iommu
697 * @da: iommu device virtual address
699 * Frees the iommu virtually continuous memory area starting at
700 * @da, as obtained from 'omap_iommu_vmalloc()'.
702 void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
705 struct sg_table *sgt;
707 sgt = unmap_vm_area(domain, obj, da, vfree,
708 IOVMF_DISCONT | IOVMF_ALLOC);
710 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
713 EXPORT_SYMBOL_GPL(omap_iommu_vfree);
715 static int __init iovmm_init(void)
717 const unsigned long flags = SLAB_HWCACHE_ALIGN;
718 struct kmem_cache *p;
720 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
724 iovm_area_cachep = p;
728 module_init(iovmm_init);
730 static void __exit iovmm_exit(void)
732 kmem_cache_destroy(iovm_area_cachep);
734 module_exit(iovmm_exit);
736 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
737 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
738 MODULE_LICENSE("GPL v2");