2 * A common IOMMU based DMA-API implementation for ARM and ARM64 architecutes.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/device.h>
18 #include <linux/dma-iommu.h>
19 #include <linux/gfp.h>
20 #include <linux/huge_mm.h>
21 #include <linux/iommu.h>
22 #include <linux/iova.h>
24 #include <linux/scatterlist.h>
25 #include <linux/vmalloc.h>
27 #include <linux/platform_device.h>
28 #include <linux/amba/bus.h>
30 #include <asm/dma-mapping.h>
32 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
33 dma_addr_t *handle, gfp_t gfp,
34 struct dma_attrs *attrs)
36 bool coherent = is_device_dma_coherent(dev);
37 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
41 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
44 size = PAGE_ALIGN(size);
47 * Some drivers rely on this, and we probably don't want the
48 * possibility of stale kernel data being read by devices anyway.
52 if (gfpflags_allow_blocking(gfp)) {
54 pgprot_t prot = arch_get_dma_pgprot(attrs, PAGE_KERNEL,
57 pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
62 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
63 __builtin_return_address(0));
65 iommu_dma_free(dev, pages, iosize, handle);
69 * In atomic context we can't remap anything, so we'll only
70 * get the virtually contiguous buffer we need by way of a
71 * physically contiguous allocation.
74 page = alloc_pages(gfp, get_order(size));
75 addr = page ? page_address(page) : NULL;
77 addr = arch_alloc_from_atomic_pool(size, &page, gfp);
82 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
83 if (iommu_dma_mapping_error(dev, *handle)) {
85 __free_pages(page, get_order(size));
87 arch_free_from_atomic_pool(addr, size);
94 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
95 dma_addr_t handle, struct dma_attrs *attrs)
99 size = PAGE_ALIGN(size);
101 * @cpu_addr will be one of 3 things depending on how it was allocated:
102 * - A remapped array of pages from iommu_dma_alloc(), for all
103 * non-atomic allocations.
104 * - A non-cacheable alias from the atomic pool, for atomic
105 * allocations by non-coherent devices.
106 * - A normal lowmem address, for atomic allocations by
108 * Hence how dodgy the below logic looks...
110 if (arch_in_atomic_pool(cpu_addr, size)) {
111 iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
112 arch_free_from_atomic_pool(cpu_addr, size);
113 } else if (is_vmalloc_addr(cpu_addr)){
114 struct vm_struct *area = find_vm_area(cpu_addr);
116 if (WARN_ON(!area || !area->pages))
118 iommu_dma_free(dev, area->pages, iosize, &handle);
119 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
121 iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
122 __free_pages(virt_to_page(cpu_addr), get_order(size));
126 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
127 void *cpu_addr, dma_addr_t dma_addr, size_t size,
128 struct dma_attrs *attrs)
130 struct vm_struct *area;
133 vma->vm_page_prot = arch_get_dma_pgprot(attrs, vma->vm_page_prot,
134 is_device_dma_coherent(dev));
136 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
139 area = find_vm_area(cpu_addr);
140 if (WARN_ON(!area || !area->pages))
143 return iommu_dma_mmap(area->pages, size, vma);
146 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
147 void *cpu_addr, dma_addr_t dma_addr,
148 size_t size, struct dma_attrs *attrs)
150 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
151 struct vm_struct *area = find_vm_area(cpu_addr);
153 if (WARN_ON(!area || !area->pages))
156 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
160 static void __iommu_sync_single_for_cpu(struct device *dev,
161 dma_addr_t dev_addr, size_t size,
162 enum dma_data_direction dir)
166 if (is_device_dma_coherent(dev))
169 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
170 arch_dma_unmap_area(phys, size, dir);
173 static void __iommu_sync_single_for_device(struct device *dev,
174 dma_addr_t dev_addr, size_t size,
175 enum dma_data_direction dir)
179 if (is_device_dma_coherent(dev))
182 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
183 arch_dma_map_area(phys, size, dir);
186 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
187 unsigned long offset, size_t size,
188 enum dma_data_direction dir,
189 struct dma_attrs *attrs)
191 bool coherent = is_device_dma_coherent(dev);
192 int prot = dma_direction_to_prot(dir, coherent);
193 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
195 if (!iommu_dma_mapping_error(dev, dev_addr) &&
196 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
197 __iommu_sync_single_for_device(dev, dev_addr, size, dir);
202 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
203 size_t size, enum dma_data_direction dir,
204 struct dma_attrs *attrs)
206 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
207 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
209 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
212 static void __iommu_sync_sg_for_cpu(struct device *dev,
213 struct scatterlist *sgl, int nelems,
214 enum dma_data_direction dir)
216 struct scatterlist *sg;
219 if (is_device_dma_coherent(dev))
222 for_each_sg(sgl, sg, nelems, i)
223 arch_dma_unmap_area(sg_phys(sg), sg->length, dir);
226 static void __iommu_sync_sg_for_device(struct device *dev,
227 struct scatterlist *sgl, int nelems,
228 enum dma_data_direction dir)
230 struct scatterlist *sg;
233 if (is_device_dma_coherent(dev))
236 for_each_sg(sgl, sg, nelems, i)
237 arch_dma_map_area(sg_phys(sg), sg->length, dir);
240 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
241 int nelems, enum dma_data_direction dir,
242 struct dma_attrs *attrs)
244 bool coherent = is_device_dma_coherent(dev);
246 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
247 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
249 return iommu_dma_map_sg(dev, sgl, nelems,
250 dma_direction_to_prot(dir, coherent));
253 static void __iommu_unmap_sg_attrs(struct device *dev,
254 struct scatterlist *sgl, int nelems,
255 enum dma_data_direction dir,
256 struct dma_attrs *attrs)
258 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
259 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
261 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
264 static struct dma_map_ops iommu_dma_ops = {
265 .alloc = __iommu_alloc_attrs,
266 .free = __iommu_free_attrs,
267 .mmap = __iommu_mmap_attrs,
268 .get_sgtable = __iommu_get_sgtable,
269 .map_page = __iommu_map_page,
270 .unmap_page = __iommu_unmap_page,
271 .map_sg = __iommu_map_sg_attrs,
272 .unmap_sg = __iommu_unmap_sg_attrs,
273 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
274 .sync_single_for_device = __iommu_sync_single_for_device,
275 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
276 .sync_sg_for_device = __iommu_sync_sg_for_device,
277 .dma_supported = iommu_dma_supported,
278 .mapping_error = iommu_dma_mapping_error,
282 * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
283 * everything it needs to - the device is only partially created and the
284 * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
285 * need this delayed attachment dance. Once IOMMU probe ordering is sorted
286 * to move the arch_setup_dma_ops() call later, all the notifier bits below
287 * become unnecessary, and will go away.
289 struct iommu_dma_notifier_data {
290 struct list_head list;
292 const struct iommu_ops *ops;
296 static LIST_HEAD(iommu_dma_masters);
297 static DEFINE_MUTEX(iommu_dma_notifier_lock);
300 * Temporarily "borrow" a domain feature flag to to tell if we had to resort
301 * to creating our own domain here, in case we need to clean it up again.
303 #define __IOMMU_DOMAIN_FAKE_DEFAULT (1U << 31)
305 static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
306 u64 dma_base, u64 size)
308 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
311 * Best case: The device is either part of a group which was
312 * already attached to a domain in a previous call, or it's
313 * been put in a default DMA domain by the IOMMU core.
317 * Urgh. The IOMMU core isn't going to do default domains
318 * for non-PCI devices anyway, until it has some means of
319 * abstracting the entirely implementation-specific
320 * sideband data/SoC topology/unicorn dust that may or
321 * may not differentiate upstream masters.
322 * So until then, HORRIBLE HACKS!
324 domain = ops->domain_alloc(IOMMU_DOMAIN_DMA);
329 domain->type = IOMMU_DOMAIN_DMA | __IOMMU_DOMAIN_FAKE_DEFAULT;
331 if (iommu_attach_device(domain, dev))
335 if (iommu_dma_init_domain(domain, dma_base, size))
338 arch_set_dma_ops(dev, &iommu_dma_ops);
342 iommu_detach_device(domain, dev);
344 if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
345 iommu_domain_free(domain);
347 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
352 static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
353 u64 dma_base, u64 size)
355 struct iommu_dma_notifier_data *iommudata;
357 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
361 iommudata->dev = dev;
362 iommudata->ops = ops;
363 iommudata->dma_base = dma_base;
364 iommudata->size = size;
366 mutex_lock(&iommu_dma_notifier_lock);
367 list_add(&iommudata->list, &iommu_dma_masters);
368 mutex_unlock(&iommu_dma_notifier_lock);
371 static int __iommu_attach_notifier(struct notifier_block *nb,
372 unsigned long action, void *data)
374 struct iommu_dma_notifier_data *master, *tmp;
376 if (action != BUS_NOTIFY_ADD_DEVICE)
379 mutex_lock(&iommu_dma_notifier_lock);
380 list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
381 if (do_iommu_attach(master->dev, master->ops,
382 master->dma_base, master->size)) {
383 list_del(&master->list);
387 mutex_unlock(&iommu_dma_notifier_lock);
391 static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
393 struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
399 * The device must be attached to a domain before the driver probe
400 * routine gets a chance to start allocating DMA buffers. However,
401 * the IOMMU driver also needs a chance to configure the iommu_group
402 * via its add_device callback first, so we need to make the attach
403 * happen between those two points. Since the IOMMU core uses a bus
404 * notifier with default priority for add_device, do the same but
405 * with a lower priority to ensure the appropriate ordering.
407 nb->notifier_call = __iommu_attach_notifier;
410 ret = bus_register_notifier(bus, nb);
412 pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
419 static int __init __iommu_dma_init(void)
423 ret = iommu_dma_init();
425 ret = register_iommu_dma_ops_notifier(&platform_bus_type);
427 ret = register_iommu_dma_ops_notifier(&amba_bustype);
429 /* handle devices queued before this arch_initcall */
431 __iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL);
434 arch_initcall(__iommu_dma_init);
436 bool common_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
437 const struct iommu_ops *ops)
439 struct iommu_group *group;
444 * TODO: As a concession to the future, we're ready to handle being
445 * called both early and late (i.e. after bus_add_device). Once all
446 * the platform bus code is reworked to call us late and the notifier
447 * junk above goes away, move the body of do_iommu_attach here.
449 group = iommu_group_get(dev);
451 do_iommu_attach(dev, ops, dma_base, size);
452 iommu_group_put(group);
454 queue_iommu_attach(dev, ops, dma_base, size);
459 EXPORT_SYMBOL_GPL(common_iommu_setup_dma_ops);
461 void common_iommu_teardown_dma_ops(struct device *dev)
463 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
466 iommu_detach_device(domain, dev);
467 if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
468 iommu_domain_free(domain);
471 arch_set_dma_ops(dev, NULL);
473 EXPORT_SYMBOL_GPL(common_iommu_teardown_dma_ops);