1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
8 #include <linux/dma-attrs.h>
9 #include <linux/dma-debug.h>
11 #include <asm/cacheflush.h>
12 #include <asm/memory.h>
15 #include <asm/xen/hypervisor.h>
17 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
18 extern struct dma_map_ops arm_dma_ops;
19 extern struct dma_map_ops arm_coherent_dma_ops;
21 static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
23 if (dev && dev->archdata.dma_ops)
24 return dev->archdata.dma_ops;
28 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
30 if (xen_initial_domain())
33 return __generic_dma_ops(dev);
36 static inline void arch_set_dma_ops(struct device *dev, struct dma_map_ops *ops)
39 dev->archdata.dma_ops = ops;
42 #define HAVE_ARCH_DMA_SUPPORTED 1
43 extern int dma_supported(struct device *dev, u64 mask);
46 * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent
47 * implementations, we don't provide a dma_cache_sync function so drivers using
48 * this API are highlighted with build warnings.
50 #include <asm-generic/dma-mapping-common.h>
52 #ifdef __arch_page_to_dma
53 #error Please update to __arch_pfn_to_dma
57 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
58 * functions used internally by the DMA-mapping API to provide DMA
59 * addresses. They must not be used by drivers.
61 #ifndef __arch_pfn_to_dma
62 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
65 pfn -= dev->dma_pfn_offset;
66 return (dma_addr_t)__pfn_to_bus(pfn);
69 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
71 unsigned long pfn = __bus_to_pfn(addr);
74 pfn += dev->dma_pfn_offset;
79 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
82 unsigned long pfn = dma_to_pfn(dev, addr);
84 return phys_to_virt(__pfn_to_phys(pfn));
87 return (void *)__bus_to_virt((unsigned long)addr);
90 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
93 return pfn_to_dma(dev, virt_to_pfn(addr));
95 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
99 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
101 return __arch_pfn_to_dma(dev, pfn);
104 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
106 return __arch_dma_to_pfn(dev, addr);
109 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
111 return __arch_dma_to_virt(dev, addr);
114 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
116 return __arch_virt_to_dma(dev, addr);
120 /* The ARM override for dma_max_pfn() */
121 static inline unsigned long dma_max_pfn(struct device *dev)
123 return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
125 #define dma_max_pfn(dev) dma_max_pfn(dev)
127 #define arch_setup_dma_ops arch_setup_dma_ops
128 extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
129 struct iommu_ops *iommu, bool coherent);
131 #define arch_teardown_dma_ops arch_teardown_dma_ops
132 extern void arch_teardown_dma_ops(struct device *dev);
134 /* do not use this function in a driver */
135 static inline bool is_device_dma_coherent(struct device *dev)
137 return dev->archdata.dma_coherent;
140 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
142 unsigned int offset = paddr & ~PAGE_MASK;
143 return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
146 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
148 unsigned int offset = dev_addr & ~PAGE_MASK;
149 return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
152 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
159 mask = *dev->dma_mask;
161 limit = (mask + 1) & ~mask;
162 if (limit && size > limit)
165 if ((addr | (addr + size - 1)) & ~mask)
171 static inline void dma_mark_clean(void *addr, size_t size) { }
173 extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
176 * arm_dma_alloc - allocate consistent memory for DMA
177 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
178 * @size: required memory size
179 * @handle: bus-specific DMA address
180 * @attrs: optinal attributes that specific mapping properties
182 * Allocate some memory for a device for performing DMA. This function
183 * allocates pages, and will return the CPU-viewed address, and sets @handle
184 * to be the device-viewed address.
186 extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
187 gfp_t gfp, struct dma_attrs *attrs);
190 * arm_dma_free - free memory allocated by arm_dma_alloc
191 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
192 * @size: size of memory originally requested in dma_alloc_coherent
193 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
194 * @handle: device-view address returned from dma_alloc_coherent
195 * @attrs: optinal attributes that specific mapping properties
197 * Free (and unmap) a DMA buffer previously allocated by
200 * References to memory and mappings associated with cpu_addr/handle
201 * during and after this call executing are illegal.
203 extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
204 dma_addr_t handle, struct dma_attrs *attrs);
207 * arm_dma_mmap - map a coherent DMA allocation into user space
208 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
209 * @vma: vm_area_struct describing requested user mapping
210 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
211 * @handle: device-view address returned from dma_alloc_coherent
212 * @size: size of memory originally requested in dma_alloc_coherent
213 * @attrs: optinal attributes that specific mapping properties
215 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
216 * into user space. The coherent DMA buffer must not be freed by the
217 * driver until the user space mapping has been released.
219 extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
220 void *cpu_addr, dma_addr_t dma_addr, size_t size,
221 struct dma_attrs *attrs);
224 * This can be called during early boot to increase the size of the atomic
225 * coherent DMA pool above the default value of 256KiB. It must be called
226 * before postcore_initcall.
228 extern void __init init_dma_coherent_pool_size(unsigned long size);
231 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
232 * and utilize bounce buffers as needed to work around limited DMA windows.
234 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
235 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
236 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
238 * The following are helper functions used by the dmabounce subystem
243 * dmabounce_register_dev
245 * @dev: valid struct device pointer
246 * @small_buf_size: size of buffers to use with small buffer pool
247 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
248 * @needs_bounce_fn: called to determine whether buffer needs bouncing
250 * This function should be called by low-level platform code to register
251 * a device as requireing DMA buffer bouncing. The function will allocate
252 * appropriate DMA pools for the device.
254 extern int dmabounce_register_dev(struct device *, unsigned long,
255 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
258 * dmabounce_unregister_dev
260 * @dev: valid struct device pointer
262 * This function should be called by low-level platform code when device
263 * that was previously registered with dmabounce_register_dev is removed
267 extern void dmabounce_unregister_dev(struct device *);
272 * The scatter list versions of the above methods.
274 extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
275 enum dma_data_direction, struct dma_attrs *attrs);
276 extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
277 enum dma_data_direction, struct dma_attrs *attrs);
278 extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
279 enum dma_data_direction);
280 extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
281 enum dma_data_direction);
282 extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
283 void *cpu_addr, dma_addr_t dma_addr, size_t size,
284 struct dma_attrs *attrs);
287 * The DMA API is built upon the notion of "buffer ownership". A buffer
288 * is either exclusively owned by the CPU (and therefore may be accessed
289 * by it) or exclusively owned by the DMA device. These helper functions
290 * represent the transitions between these two ownership states.
292 * Note, however, that on later ARMs, this notion does not work due to
293 * speculative prefetches. We model our approach on the assumption that
294 * the CPU does do speculative prefetches, which means we clean caches
295 * before transfers and delay cache invalidation until transfer completion.
298 extern void __dma_page_cpu_to_dev(struct page *, unsigned long, size_t,
299 enum dma_data_direction);
300 extern void __dma_page_dev_to_cpu(struct page *, unsigned long, size_t,
301 enum dma_data_direction);
303 static inline void arch_flush_page(struct device *dev, const void *virt,
306 dmac_flush_range(virt, virt + PAGE_SIZE);
307 outer_flush_range(phys, phys + PAGE_SIZE);
310 static inline void arch_dma_map_area(phys_addr_t phys, size_t size,
311 enum dma_data_direction dir)
313 unsigned int offset = phys & ~PAGE_MASK;
314 __dma_page_cpu_to_dev(phys_to_page(phys & PAGE_MASK), offset, size, dir);
317 static inline void arch_dma_unmap_area(phys_addr_t phys, size_t size,
318 enum dma_data_direction dir)
320 unsigned int offset = phys & ~PAGE_MASK;
321 __dma_page_dev_to_cpu(phys_to_page(phys & PAGE_MASK), offset, size, dir);
324 static inline pgprot_t arch_get_dma_pgprot(struct dma_attrs *attrs,
325 pgprot_t prot, bool coherent)
330 prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
331 pgprot_writecombine(prot) :
332 pgprot_dmacoherent(prot);
336 extern void *arch_alloc_from_atomic_pool(size_t size, struct page **ret_page,
338 extern bool arch_in_atomic_pool(void *start, size_t size);
339 extern int arch_free_from_atomic_pool(void *start, size_t size);
342 #endif /* __KERNEL__ */