2 * drivers/gpu/ion/ion_priv.h
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
20 #include <linux/device.h>
21 #include <linux/dma-direction.h>
22 #include <linux/kref.h>
23 #include <linux/mm_types.h>
24 #include <linux/mutex.h>
25 #include <linux/rbtree.h>
26 #include <linux/sched.h>
27 #include <linux/shrinker.h>
28 #include <linux/types.h>
29 #ifdef CONFIG_ION_POOL_CACHE_POLICY
30 #include <asm/cacheflush.h>
35 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
38 * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
39 * @iova_addr - iommu virtual address
40 * @node - rb node to exist in the buffer's tree of iommu mappings
41 * @key - contains the iommu device info
42 * @ref - for reference counting this mapping
43 * @mapped_size - size of the iova space mapped
44 * (may not be the same as the buffer size)
46 * Represents a mapping of one ion buffer to a particular iommu domain
47 * and address range. There may exist other mappings of this buffer in
48 * different domains or address ranges. All mappings will have the same
49 * cacheability and security.
51 struct ion_iommu_map {
52 unsigned long iova_addr;
55 struct ion_buffer *buffer;
61 * struct ion_buffer - metadata for a particular buffer
62 * @ref: refernce count
63 * @node: node in the ion_device buffers tree
64 * @dev: back pointer to the ion_device
65 * @heap: back pointer to the heap the buffer came from
66 * @flags: buffer specific flags
67 * @private_flags: internal buffer specific flags
68 * @size: size of the buffer
69 * @priv_virt: private data to the buffer representable as
71 * @priv_phys: private data to the buffer representable as
72 * an ion_phys_addr_t (and someday a phys_addr_t)
73 * @lock: protects the buffers cnt fields
74 * @kmap_cnt: number of times the buffer is mapped to the kernel
75 * @vaddr: the kenrel mapping if kmap_cnt is not zero
76 * @dmap_cnt: number of times the buffer is mapped for dma
77 * @sg_table: the sg table for the buffer if dmap_cnt is not zero
78 * @pages: flat array of pages in the buffer -- used by fault
79 * handler and only valid for buffers that are faulted in
80 * @vmas: list of vma's mapping this buffer
81 * @handle_count: count of handles referencing this buffer
82 * @task_comm: taskcomm of last client to reference this buffer in a
83 * handle, used for debugging
84 * @pid: pid of last client to reference this buffer in a
85 * handle, used for debugging
91 struct list_head list;
93 struct ion_device *dev;
94 struct ion_heap *heap;
96 unsigned long private_flags;
100 ion_phys_addr_t priv_phys;
106 struct sg_table *sg_table;
108 struct list_head vmas;
109 /* used to track orphaned buffers */
111 char task_comm[TASK_COMM_LEN];
113 unsigned int iommu_map_cnt;
114 struct rb_root iommu_maps;
116 void ion_buffer_destroy(struct ion_buffer *buffer);
119 * struct ion_heap_ops - ops to operate on a given heap
120 * @allocate: allocate memory
122 * @phys get physical address of a buffer (only define on
123 * physically contiguous heaps)
124 * @map_dma map the memory for dma to a scatterlist
125 * @unmap_dma unmap the memory for dma
126 * @map_kernel map memory to the kernel
127 * @unmap_kernel unmap memory to the kernel
128 * @map_user map memory to userspace
130 * allocate, phys, and map_user return 0 on success, -errno on error.
131 * map_dma and map_kernel return pointer on success, ERR_PTR on
132 * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
133 * the buffer's private_flags when called from a shrinker. In that
134 * case, the pages being free'd must be truly free'd back to the
135 * system, not put in a page pool or otherwise cached.
137 struct ion_heap_ops {
138 int (*allocate) (struct ion_heap *heap,
139 struct ion_buffer *buffer, unsigned long len,
140 unsigned long align, unsigned long flags);
141 void (*free) (struct ion_buffer *buffer);
142 int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
143 ion_phys_addr_t *addr, size_t *len);
144 struct sg_table *(*map_dma) (struct ion_heap *heap,
145 struct ion_buffer *buffer);
146 void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
147 void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
148 void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
149 int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
150 struct vm_area_struct *vma);
151 int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
152 int (*map_iommu)(struct ion_buffer *buffer,
153 struct device *iommu_dev,
154 struct ion_iommu_map *map_data,
155 unsigned long iova_length,
156 unsigned long flags);
157 void (*unmap_iommu)(struct device *iommu_dev, struct ion_iommu_map *data);
161 * heap flags - flags between the heaps and core ion code
163 #define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
166 * private flags - flags internal to ion
169 * Buffer is being freed from a shrinker function. Skip any possible
170 * heap-specific caching mechanism (e.g. page pools). Guarantees that
171 * any buffer storage that came from the system allocator will be
172 * returned to the system allocator.
174 #define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
177 * struct ion_heap - represents a heap in the system
178 * @node: rb node to put the heap on the device's tree of heaps
179 * @dev: back pointer to the ion_device
180 * @type: type of heap
181 * @ops: ops struct as above
183 * @id: id of heap, also indicates priority of this heap when
184 * allocating. These are specified by platform data and
186 * @name: used for debugging
187 * @shrinker: a shrinker for the heap
188 * @free_list: free list head if deferred free is used
189 * @free_list_size size of the deferred free list in bytes
190 * @lock: protects the free list
191 * @waitqueue: queue to wait on from deferred free thread
192 * @task: task struct of deferred free thread
193 * @debug_show: called when heap debug file is read to add any
194 * heap specific debug info to output
196 * Represents a pool of memory from which buffers can be made. In some
197 * systems the only heap is regular system memory allocated via vmalloc.
198 * On others, some blocks might require large physically contiguous buffers
199 * that are allocated from a specially reserved heap.
202 struct plist_node node;
203 struct ion_device *dev;
204 enum ion_heap_type type;
205 struct ion_heap_ops *ops;
209 struct shrinker shrinker;
210 struct list_head free_list;
211 size_t free_list_size;
212 spinlock_t free_lock;
213 wait_queue_head_t waitqueue;
214 struct task_struct *task;
216 int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
220 * ion_buffer_cached - this ion buffer is cached
223 * indicates whether this ion buffer is cached
225 bool ion_buffer_cached(struct ion_buffer *buffer);
228 * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
231 * indicates whether userspace mappings of this buffer will be faulted
232 * in, this can affect how buffers are allocated from the heap.
234 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
237 * ion_device_create - allocates and returns an ion device
238 * @custom_ioctl: arch specific ioctl function if applicable
240 * returns a valid device or -PTR_ERR
242 struct ion_device *ion_device_create(long (*custom_ioctl)
243 (struct ion_client *client,
248 * ion_device_destroy - free and device and it's resource
251 void ion_device_destroy(struct ion_device *dev);
254 * ion_device_add_heap - adds a heap to the ion device
256 * @heap: the heap to add
258 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
261 * some helpers for common operations on buffers using the sg_table
264 void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
265 void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
266 int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
267 struct vm_area_struct *);
268 int ion_heap_buffer_zero(struct ion_buffer *buffer);
269 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
272 * ion_heap_init_shrinker
275 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
276 * this function will be called to setup a shrinker to shrink the freelists
277 * and call the heap's shrink op.
279 void ion_heap_init_shrinker(struct ion_heap *heap);
282 * ion_heap_init_deferred_free -- initialize deferred free functionality
285 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
286 * be called to setup deferred frees. Calls to free the buffer will
287 * return immediately and the actual free will occur some time later
289 int ion_heap_init_deferred_free(struct ion_heap *heap);
292 * ion_heap_freelist_add - add a buffer to the deferred free list
294 * @buffer: the buffer
296 * Adds an item to the deferred freelist.
298 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
301 * ion_heap_freelist_drain - drain the deferred free list
303 * @size: ammount of memory to drain in bytes
305 * Drains the indicated amount of memory from the deferred freelist immediately.
306 * Returns the total amount freed. The total freed may be higher depending
307 * on the size of the items in the list, or lower if there is insufficient
308 * total memory on the freelist.
310 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
313 * ion_heap_freelist_shrink - drain the deferred free
314 * list, skipping any heap-specific
315 * pooling or caching mechanisms
318 * @size: amount of memory to drain in bytes
320 * Drains the indicated amount of memory from the deferred freelist immediately.
321 * Returns the total amount freed. The total freed may be higher depending
322 * on the size of the items in the list, or lower if there is insufficient
323 * total memory on the freelist.
325 * Unlike with @ion_heap_freelist_drain, don't put any pages back into
326 * page pools or otherwise cache the pages. Everything must be
327 * genuinely free'd back to the system. If you're free'ing from a
328 * shrinker you probably want to use this. Note that this relies on
329 * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
332 size_t ion_heap_freelist_shrink(struct ion_heap *heap,
336 * ion_heap_freelist_size - returns the size of the freelist in bytes
339 size_t ion_heap_freelist_size(struct ion_heap *heap);
343 * functions for creating and destroying the built in ion heaps.
344 * architectures can add their own custom architecture specific
345 * heaps as appropriate.
348 struct ion_heap *ion_heap_create(struct ion_platform_heap *);
349 void ion_heap_destroy(struct ion_heap *);
351 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
352 void ion_system_heap_destroy(struct ion_heap *);
354 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
355 void ion_system_contig_heap_destroy(struct ion_heap *);
357 struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
358 void ion_carveout_heap_destroy(struct ion_heap *);
360 struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
361 void ion_chunk_heap_destroy(struct ion_heap *);
363 struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
364 void ion_cma_heap_destroy(struct ion_heap *);
366 struct ion_heap *ion_drm_heap_create(struct ion_platform_heap *);
367 void ion_drm_heap_destroy(struct ion_heap *);
370 * kernel api to allocate/free from carveout -- used when carveout is
371 * used to back an architecture specific custom heap
373 ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
374 unsigned long align);
375 void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
378 * The carveout heap returns physical addresses, since 0 may be a valid
379 * physical address, this is used to indicate allocation failed
381 #define ION_CARVEOUT_ALLOCATE_FAIL -1
384 * functions for creating and destroying a heap pool -- allows you
385 * to keep a pool of pre allocated memory to use from your heap. Keeping
386 * a pool of memory that is ready for dma, ie any cached mapping have been
387 * invalidated from the cache, provides a significant peformance benefit on
391 * struct ion_page_pool - pagepool struct
392 * @high_count: number of highmem items in the pool
393 * @low_count: number of lowmem items in the pool
394 * @high_items: list of highmem items
395 * @low_items: list of lowmem items
396 * @mutex: lock protecting this struct and especially the count
398 * @gfp_mask: gfp_mask to use from alloc
399 * @order: order of pages in the pool
400 * @list: plist node for list of pools
402 * Allows you to keep a pool of pre allocated pages to use from your heap.
403 * Keeping a pool of pages that is ready for dma, ie any cached mapping have
404 * been invalidated from the cache, provides a significant peformance benefit
407 struct ion_page_pool {
410 struct list_head high_items;
411 struct list_head low_items;
415 struct plist_node list;
418 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
419 void ion_page_pool_destroy(struct ion_page_pool *);
420 void *ion_page_pool_alloc(struct ion_page_pool *);
421 void ion_page_pool_free(struct ion_page_pool *, struct page *);
422 void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *);
424 #ifdef CONFIG_ION_POOL_CACHE_POLICY
425 static inline void ion_page_pool_alloc_set_cache_policy
426 (struct ion_page_pool *pool,
428 void *va = page_address(page);
431 set_memory_wc((unsigned long)va, 1 << pool->order);
434 static inline void ion_page_pool_free_set_cache_policy
435 (struct ion_page_pool *pool,
437 void *va = page_address(page);
440 set_memory_wb((unsigned long)va, 1 << pool->order);
444 static inline void ion_page_pool_alloc_set_cache_policy
445 (struct ion_page_pool *pool,
446 struct page *page){ }
448 static inline void ion_page_pool_free_set_cache_policy
449 (struct ion_page_pool *pool,
450 struct page *page){ }
454 /** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
456 * @gfp_mask: the memory type to reclaim
457 * @nr_to_scan: number of items to shrink in pages
459 * returns the number of items freed in pages
461 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
465 * ion_pages_sync_for_device - cache flush pages for use with the specified
467 * @dev: the device the pages will be used with
468 * @page: the first page to be flushed
469 * @size: size in bytes of region to be flushed
470 * @dir: direction of dma transfer
472 void ion_pages_sync_for_device(struct device *dev, struct page *page,
473 size_t size, enum dma_data_direction dir);
475 #endif /* _ION_PRIV_H */