2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
17 #include <linux/gfp.h>
18 #include <linux/highmem.h>
20 #include <asm/cache.h>
23 #include <dma-coherence.h>
25 int coherentio = 0; /* User defined DMA coherency from command line. */
26 EXPORT_SYMBOL_GPL(coherentio);
27 int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
29 static int __init setcoherentio(char *str)
32 pr_info("Hardware DMA cache coherency (command line)\n");
35 early_param("coherentio", setcoherentio);
37 static int __init setnocoherentio(char *str)
40 pr_info("Software DMA cache coherency (command line)\n");
43 early_param("nocoherentio", setnocoherentio);
45 static inline struct page *dma_addr_to_page(struct device *dev,
49 plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
53 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
54 * speculatively fill random cachelines with stale data at any time,
55 * requiring an extra flush post-DMA.
57 * Warning on the terminology - Linux calls an uncached area coherent;
58 * MIPS terminology calls memory areas with hardware maintained coherency
61 static inline int cpu_needs_post_dma_flush(struct device *dev)
63 return !plat_device_is_coherent(dev) &&
64 (current_cpu_type() == CPU_R10000 ||
65 current_cpu_type() == CPU_R12000 ||
66 current_cpu_type() == CPU_BMIPS5000);
69 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
73 /* ignore region specifiers */
74 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
81 #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
82 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
84 else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
85 dma_flag = __GFP_DMA32;
88 #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
89 if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
90 dma_flag = __GFP_DMA32;
93 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
94 if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
100 /* Don't invoke OOM killer */
101 gfp |= __GFP_NORETRY;
103 return gfp | dma_flag;
106 void *dma_alloc_noncoherent(struct device *dev, size_t size,
107 dma_addr_t * dma_handle, gfp_t gfp)
111 gfp = massage_gfp_flags(dev, gfp);
113 ret = (void *) __get_free_pages(gfp, get_order(size));
116 memset(ret, 0, size);
117 *dma_handle = plat_map_dma_mem(dev, ret, size);
122 EXPORT_SYMBOL(dma_alloc_noncoherent);
124 static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
125 dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
129 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
132 gfp = massage_gfp_flags(dev, gfp);
134 ret = (void *) __get_free_pages(gfp, get_order(size));
137 memset(ret, 0, size);
138 *dma_handle = plat_map_dma_mem(dev, ret, size);
140 if (!plat_device_is_coherent(dev)) {
141 dma_cache_wback_inv((unsigned long) ret, size);
143 ret = UNCAC_ADDR(ret);
151 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
152 dma_addr_t dma_handle)
154 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
155 free_pages((unsigned long) vaddr, get_order(size));
157 EXPORT_SYMBOL(dma_free_noncoherent);
159 static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
160 dma_addr_t dma_handle, struct dma_attrs *attrs)
162 unsigned long addr = (unsigned long) vaddr;
163 int order = get_order(size);
165 if (dma_release_from_coherent(dev, order, vaddr))
168 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
170 if (!plat_device_is_coherent(dev) && !hw_coherentio)
171 addr = CAC_ADDR(addr);
173 free_pages(addr, get_order(size));
176 static inline void __dma_sync_virtual(void *addr, size_t size,
177 enum dma_data_direction direction)
181 dma_cache_wback((unsigned long)addr, size);
184 case DMA_FROM_DEVICE:
185 dma_cache_inv((unsigned long)addr, size);
188 case DMA_BIDIRECTIONAL:
189 dma_cache_wback_inv((unsigned long)addr, size);
198 * A single sg entry may refer to multiple physically contiguous
199 * pages. But we still need to process highmem pages individually.
200 * If highmem is not configured then the bulk of this loop gets
203 static inline void __dma_sync(struct page *page,
204 unsigned long offset, size_t size, enum dma_data_direction direction)
211 if (PageHighMem(page)) {
214 if (offset + len > PAGE_SIZE) {
215 if (offset >= PAGE_SIZE) {
216 page += offset >> PAGE_SHIFT;
217 offset &= ~PAGE_MASK;
219 len = PAGE_SIZE - offset;
222 addr = kmap_atomic(page);
223 __dma_sync_virtual(addr + offset, len, direction);
226 __dma_sync_virtual(page_address(page) + offset,
234 static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
235 size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
237 if (cpu_needs_post_dma_flush(dev))
238 __dma_sync(dma_addr_to_page(dev, dma_addr),
239 dma_addr & ~PAGE_MASK, size, direction);
241 plat_unmap_dma_mem(dev, dma_addr, size, direction);
244 static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
245 int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
249 for (i = 0; i < nents; i++, sg++) {
250 if (!plat_device_is_coherent(dev))
251 __dma_sync(sg_page(sg), sg->offset, sg->length,
253 sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
260 static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
261 unsigned long offset, size_t size, enum dma_data_direction direction,
262 struct dma_attrs *attrs)
264 if (!plat_device_is_coherent(dev))
265 __dma_sync(page, offset, size, direction);
267 return plat_map_dma_mem_page(dev, page) + offset;
270 static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
271 int nhwentries, enum dma_data_direction direction,
272 struct dma_attrs *attrs)
276 for (i = 0; i < nhwentries; i++, sg++) {
277 if (!plat_device_is_coherent(dev) &&
278 direction != DMA_TO_DEVICE)
279 __dma_sync(sg_page(sg), sg->offset, sg->length,
281 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
285 static void mips_dma_sync_single_for_cpu(struct device *dev,
286 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
288 if (cpu_needs_post_dma_flush(dev))
289 __dma_sync(dma_addr_to_page(dev, dma_handle),
290 dma_handle & ~PAGE_MASK, size, direction);
293 static void mips_dma_sync_single_for_device(struct device *dev,
294 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
296 plat_extra_sync_for_device(dev);
297 if (!plat_device_is_coherent(dev))
298 __dma_sync(dma_addr_to_page(dev, dma_handle),
299 dma_handle & ~PAGE_MASK, size, direction);
302 static void mips_dma_sync_sg_for_cpu(struct device *dev,
303 struct scatterlist *sg, int nelems, enum dma_data_direction direction)
307 /* Make sure that gcc doesn't leave the empty loop body. */
308 for (i = 0; i < nelems; i++, sg++) {
309 if (cpu_needs_post_dma_flush(dev))
310 __dma_sync(sg_page(sg), sg->offset, sg->length,
315 static void mips_dma_sync_sg_for_device(struct device *dev,
316 struct scatterlist *sg, int nelems, enum dma_data_direction direction)
320 /* Make sure that gcc doesn't leave the empty loop body. */
321 for (i = 0; i < nelems; i++, sg++) {
322 if (!plat_device_is_coherent(dev))
323 __dma_sync(sg_page(sg), sg->offset, sg->length,
328 int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
330 return plat_dma_mapping_error(dev, dma_addr);
333 int mips_dma_supported(struct device *dev, u64 mask)
335 return plat_dma_supported(dev, mask);
338 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
339 enum dma_data_direction direction)
341 BUG_ON(direction == DMA_NONE);
343 plat_extra_sync_for_device(dev);
344 if (!plat_device_is_coherent(dev))
345 __dma_sync_virtual(vaddr, size, direction);
348 EXPORT_SYMBOL(dma_cache_sync);
350 static struct dma_map_ops mips_default_dma_map_ops = {
351 .alloc = mips_dma_alloc_coherent,
352 .free = mips_dma_free_coherent,
353 .map_page = mips_dma_map_page,
354 .unmap_page = mips_dma_unmap_page,
355 .map_sg = mips_dma_map_sg,
356 .unmap_sg = mips_dma_unmap_sg,
357 .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
358 .sync_single_for_device = mips_dma_sync_single_for_device,
359 .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
360 .sync_sg_for_device = mips_dma_sync_sg_for_device,
361 .mapping_error = mips_dma_mapping_error,
362 .dma_supported = mips_dma_supported
365 struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
366 EXPORT_SYMBOL(mips_dma_map_ops);
368 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
370 static int __init mips_dma_init(void)
372 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
376 fs_initcall(mips_dma_init);