2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dma-contiguous.h>
25 #include <linux/vmalloc.h>
26 #include <linux/swiotlb.h>
28 #include <asm/cacheflush.h>
30 struct dma_map_ops *dma_ops;
31 EXPORT_SYMBOL(dma_ops);
33 static void *__dma_alloc_coherent(struct device *dev, size_t size,
34 dma_addr_t *dma_handle, gfp_t flags,
35 struct dma_attrs *attrs)
37 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
38 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
40 if (IS_ENABLED(CONFIG_DMA_CMA)) {
43 size = PAGE_ALIGN(size);
44 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
49 *dma_handle = phys_to_dma(dev, page_to_phys(page));
50 return page_address(page);
52 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
56 static void __dma_free_coherent(struct device *dev, size_t size,
57 void *vaddr, dma_addr_t dma_handle,
58 struct dma_attrs *attrs)
61 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
65 if (IS_ENABLED(CONFIG_DMA_CMA)) {
66 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
68 dma_release_from_contiguous(dev,
72 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
76 static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
77 dma_addr_t *dma_handle, gfp_t flags,
78 struct dma_attrs *attrs)
80 struct page *page, **map;
81 void *ptr, *coherent_ptr;
84 size = PAGE_ALIGN(size);
85 order = get_order(size);
87 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
90 map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
94 /* remove any dirty cache lines on the kernel alias */
95 __dma_flush_range(ptr, ptr + size);
97 /* create a coherent mapping */
98 page = virt_to_page(ptr);
99 for (i = 0; i < (size >> PAGE_SHIFT); i++)
101 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
102 pgprot_dmacoherent(pgprot_default));
110 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
116 static void __dma_free_noncoherent(struct device *dev, size_t size,
117 void *vaddr, dma_addr_t dma_handle,
118 struct dma_attrs *attrs)
120 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
123 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
126 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
127 unsigned long offset, size_t size,
128 enum dma_data_direction dir,
129 struct dma_attrs *attrs)
133 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
134 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
140 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
141 size_t size, enum dma_data_direction dir,
142 struct dma_attrs *attrs)
144 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
145 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
148 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
149 int nelems, enum dma_data_direction dir,
150 struct dma_attrs *attrs)
152 struct scatterlist *sg;
155 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
156 for_each_sg(sgl, sg, ret, i)
157 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
163 static void __swiotlb_unmap_sg_attrs(struct device *dev,
164 struct scatterlist *sgl, int nelems,
165 enum dma_data_direction dir,
166 struct dma_attrs *attrs)
168 struct scatterlist *sg;
171 for_each_sg(sgl, sg, nelems, i)
172 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
174 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
177 static void __swiotlb_sync_single_for_cpu(struct device *dev,
178 dma_addr_t dev_addr, size_t size,
179 enum dma_data_direction dir)
181 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
182 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
185 static void __swiotlb_sync_single_for_device(struct device *dev,
186 dma_addr_t dev_addr, size_t size,
187 enum dma_data_direction dir)
189 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
190 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
193 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
194 struct scatterlist *sgl, int nelems,
195 enum dma_data_direction dir)
197 struct scatterlist *sg;
200 for_each_sg(sgl, sg, nelems, i)
201 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
203 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
206 static void __swiotlb_sync_sg_for_device(struct device *dev,
207 struct scatterlist *sgl, int nelems,
208 enum dma_data_direction dir)
210 struct scatterlist *sg;
213 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
214 for_each_sg(sgl, sg, nelems, i)
215 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
219 struct dma_map_ops noncoherent_swiotlb_dma_ops = {
220 .alloc = __dma_alloc_noncoherent,
221 .free = __dma_free_noncoherent,
222 .map_page = __swiotlb_map_page,
223 .unmap_page = __swiotlb_unmap_page,
224 .map_sg = __swiotlb_map_sg_attrs,
225 .unmap_sg = __swiotlb_unmap_sg_attrs,
226 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
227 .sync_single_for_device = __swiotlb_sync_single_for_device,
228 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
229 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
230 .dma_supported = swiotlb_dma_supported,
231 .mapping_error = swiotlb_dma_mapping_error,
233 EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
235 struct dma_map_ops coherent_swiotlb_dma_ops = {
236 .alloc = __dma_alloc_coherent,
237 .free = __dma_free_coherent,
238 .map_page = swiotlb_map_page,
239 .unmap_page = swiotlb_unmap_page,
240 .map_sg = swiotlb_map_sg_attrs,
241 .unmap_sg = swiotlb_unmap_sg_attrs,
242 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
243 .sync_single_for_device = swiotlb_sync_single_for_device,
244 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
245 .sync_sg_for_device = swiotlb_sync_sg_for_device,
246 .dma_supported = swiotlb_dma_supported,
247 .mapping_error = swiotlb_dma_mapping_error,
249 EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
251 extern int swiotlb_late_init_with_default_size(size_t default_size);
253 static int __init swiotlb_late_init(void)
255 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
257 dma_ops = &noncoherent_swiotlb_dma_ops;
259 return swiotlb_late_init_with_default_size(swiotlb_size);
261 subsys_initcall(swiotlb_late_init);
263 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
265 static int __init dma_debug_do_init(void)
267 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
270 fs_initcall(dma_debug_do_init);