2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/genalloc.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dma-contiguous.h>
26 #include <linux/vmalloc.h>
27 #include <linux/swiotlb.h>
29 #include <asm/cacheflush.h>
31 struct dma_map_ops *dma_ops;
32 EXPORT_SYMBOL(dma_ops);
34 static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
37 if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
38 return pgprot_writecombine(prot);
42 static struct gen_pool *atomic_pool;
44 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
45 static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
47 static int __init early_coherent_pool(char *p)
49 atomic_pool_size = memparse(p, &p);
52 early_param("coherent_pool", early_coherent_pool);
54 static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
60 WARN(1, "coherent pool not initialised!\n");
64 val = gen_pool_alloc(atomic_pool, size);
66 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
68 *ret_page = phys_to_page(phys);
70 if (flags & __GFP_ZERO)
77 static bool __in_atomic_pool(void *start, size_t size)
79 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
82 static int __free_from_pool(void *start, size_t size)
84 if (!__in_atomic_pool(start, size))
87 gen_pool_free(atomic_pool, (unsigned long)start, size);
92 static void *__dma_alloc_coherent(struct device *dev, size_t size,
93 dma_addr_t *dma_handle, gfp_t flags,
94 struct dma_attrs *attrs)
97 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
101 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
102 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
104 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
108 size = PAGE_ALIGN(size);
109 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
114 *dma_handle = phys_to_dma(dev, page_to_phys(page));
115 addr = page_address(page);
116 if (flags & __GFP_ZERO)
117 memset(addr, 0, size);
120 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
124 static void __dma_free_coherent(struct device *dev, size_t size,
125 void *vaddr, dma_addr_t dma_handle,
126 struct dma_attrs *attrs)
129 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
132 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
136 freed = dma_release_from_contiguous(dev,
140 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
143 static void *__dma_alloc(struct device *dev, size_t size,
144 dma_addr_t *dma_handle, gfp_t flags,
145 struct dma_attrs *attrs)
148 void *ptr, *coherent_ptr;
149 bool coherent = is_device_dma_coherent(dev);
151 size = PAGE_ALIGN(size);
153 if (!coherent && !(flags & __GFP_WAIT)) {
154 struct page *page = NULL;
155 void *addr = __alloc_from_pool(size, &page, flags);
158 *dma_handle = phys_to_dma(dev, page_to_phys(page));
163 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
167 /* no need for non-cacheable mapping if coherent */
171 /* remove any dirty cache lines on the kernel alias */
172 __dma_flush_range(ptr, ptr + size);
174 /* create a coherent mapping */
175 page = virt_to_page(ptr);
176 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
177 __get_dma_pgprot(attrs,
178 __pgprot(PROT_NORMAL_NC), false),
186 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
188 *dma_handle = DMA_ERROR_CODE;
192 static void __dma_free(struct device *dev, size_t size,
193 void *vaddr, dma_addr_t dma_handle,
194 struct dma_attrs *attrs)
196 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
198 if (!is_device_dma_coherent(dev)) {
199 if (__free_from_pool(vaddr, size))
203 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
206 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
207 unsigned long offset, size_t size,
208 enum dma_data_direction dir,
209 struct dma_attrs *attrs)
213 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
214 if (!is_device_dma_coherent(dev))
215 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
221 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
222 size_t size, enum dma_data_direction dir,
223 struct dma_attrs *attrs)
225 if (!is_device_dma_coherent(dev))
226 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
227 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
230 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
231 int nelems, enum dma_data_direction dir,
232 struct dma_attrs *attrs)
234 struct scatterlist *sg;
237 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
238 if (!is_device_dma_coherent(dev))
239 for_each_sg(sgl, sg, ret, i)
240 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
246 static void __swiotlb_unmap_sg_attrs(struct device *dev,
247 struct scatterlist *sgl, int nelems,
248 enum dma_data_direction dir,
249 struct dma_attrs *attrs)
251 struct scatterlist *sg;
254 if (!is_device_dma_coherent(dev))
255 for_each_sg(sgl, sg, nelems, i)
256 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
258 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
261 static void __swiotlb_sync_single_for_cpu(struct device *dev,
262 dma_addr_t dev_addr, size_t size,
263 enum dma_data_direction dir)
265 if (!is_device_dma_coherent(dev))
266 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
267 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
270 static void __swiotlb_sync_single_for_device(struct device *dev,
271 dma_addr_t dev_addr, size_t size,
272 enum dma_data_direction dir)
274 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
275 if (!is_device_dma_coherent(dev))
276 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
279 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
280 struct scatterlist *sgl, int nelems,
281 enum dma_data_direction dir)
283 struct scatterlist *sg;
286 if (!is_device_dma_coherent(dev))
287 for_each_sg(sgl, sg, nelems, i)
288 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
290 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
293 static void __swiotlb_sync_sg_for_device(struct device *dev,
294 struct scatterlist *sgl, int nelems,
295 enum dma_data_direction dir)
297 struct scatterlist *sg;
300 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
301 if (!is_device_dma_coherent(dev))
302 for_each_sg(sgl, sg, nelems, i)
303 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
307 /* vma->vm_page_prot must be set appropriately before calling this function */
308 static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
309 void *cpu_addr, dma_addr_t dma_addr, size_t size)
312 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
314 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
315 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
316 unsigned long off = vma->vm_pgoff;
318 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
321 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
322 ret = remap_pfn_range(vma, vma->vm_start,
324 vma->vm_end - vma->vm_start,
331 static int __swiotlb_mmap(struct device *dev,
332 struct vm_area_struct *vma,
333 void *cpu_addr, dma_addr_t dma_addr, size_t size,
334 struct dma_attrs *attrs)
336 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
337 is_device_dma_coherent(dev));
338 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
341 static struct dma_map_ops swiotlb_dma_ops = {
342 .alloc = __dma_alloc,
344 .mmap = __swiotlb_mmap,
345 .map_page = __swiotlb_map_page,
346 .unmap_page = __swiotlb_unmap_page,
347 .map_sg = __swiotlb_map_sg_attrs,
348 .unmap_sg = __swiotlb_unmap_sg_attrs,
349 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
350 .sync_single_for_device = __swiotlb_sync_single_for_device,
351 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
352 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
353 .dma_supported = swiotlb_dma_supported,
354 .mapping_error = swiotlb_dma_mapping_error,
357 static int __init atomic_pool_init(void)
359 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
360 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
363 unsigned int pool_size_order = get_order(atomic_pool_size);
365 if (dev_get_cma_area(NULL))
366 page = dma_alloc_from_contiguous(NULL, nr_pages,
369 page = alloc_pages(GFP_DMA, pool_size_order);
373 void *page_addr = page_address(page);
375 memset(page_addr, 0, atomic_pool_size);
376 __dma_flush_range(page_addr, page_addr + atomic_pool_size);
378 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
382 addr = dma_common_contiguous_remap(page, atomic_pool_size,
383 VM_USERMAP, prot, atomic_pool_init);
386 goto destroy_genpool;
388 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
390 atomic_pool_size, -1);
394 gen_pool_set_algo(atomic_pool,
395 gen_pool_first_fit_order_align,
398 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
399 atomic_pool_size / 1024);
405 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
407 gen_pool_destroy(atomic_pool);
410 if (!dma_release_from_contiguous(NULL, page, nr_pages))
411 __free_pages(page, pool_size_order);
413 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
414 atomic_pool_size / 1024);
418 static int __init arm64_dma_init(void)
422 dma_ops = &swiotlb_dma_ops;
424 ret = atomic_pool_init();
428 arch_initcall(arm64_dma_init);
430 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
432 static int __init dma_debug_do_init(void)
434 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
437 fs_initcall(dma_debug_do_init);