2 * drivers/gpu/ion/ion_system_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include <linux/rockchip-iovmm.h>
30 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
31 __GFP_NORETRY) & ~__GFP_WAIT;
32 static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
33 static const unsigned int orders[] = {8, 4, 0};
34 static const int num_orders = ARRAY_SIZE(orders);
35 static int order_to_index(unsigned int order)
38 for (i = 0; i < num_orders; i++)
39 if (order == orders[i])
45 static unsigned int order_to_size(int order)
47 return PAGE_SIZE << order;
50 struct ion_system_heap {
52 struct ion_page_pool **pools;
58 struct list_head list;
61 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
62 struct ion_buffer *buffer,
65 bool cached = ion_buffer_cached(buffer);
66 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
70 page = ion_page_pool_alloc(pool);
72 gfp_t gfp_flags = low_order_gfp_flags;
75 gfp_flags = high_order_gfp_flags;
76 page = alloc_pages(gfp_flags, order);
79 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
88 static void free_buffer_page(struct ion_system_heap *heap,
89 struct ion_buffer *buffer, struct page *page,
92 bool cached = ion_buffer_cached(buffer);
94 if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
95 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
96 ion_page_pool_free(pool, page);
98 __free_pages(page, order);
103 static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
104 struct ion_buffer *buffer,
106 unsigned int max_order)
109 struct page_info *info;
112 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
116 for (i = 0; i < num_orders; i++) {
117 if (size < order_to_size(orders[i]))
119 if (max_order < orders[i])
122 page = alloc_buffer_page(heap, buffer, orders[i]);
127 info->order = orders[i];
128 INIT_LIST_HEAD(&info->list);
136 static int ion_system_heap_allocate(struct ion_heap *heap,
137 struct ion_buffer *buffer,
138 unsigned long size, unsigned long align,
141 struct ion_system_heap *sys_heap = container_of(heap,
142 struct ion_system_heap,
144 struct sg_table *table;
145 struct scatterlist *sg;
147 struct list_head pages;
148 struct page_info *info, *tmp_info;
150 unsigned long size_remaining = PAGE_ALIGN(size);
151 unsigned int max_order = orders[0];
153 if (align > PAGE_SIZE)
156 if (size / PAGE_SIZE > totalram_pages / 2)
159 INIT_LIST_HEAD(&pages);
160 while (size_remaining > 0) {
161 info = alloc_largest_available(sys_heap, buffer, size_remaining,
165 list_add_tail(&info->list, &pages);
166 size_remaining -= (1 << info->order) * PAGE_SIZE;
167 max_order = info->order;
170 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
174 ret = sg_alloc_table(table, i, GFP_KERNEL);
179 list_for_each_entry_safe(info, tmp_info, &pages, list) {
180 struct page *page = info->page;
181 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
183 list_del(&info->list);
187 buffer->priv_virt = table;
192 list_for_each_entry_safe(info, tmp_info, &pages, list) {
193 free_buffer_page(sys_heap, buffer, info->page, info->order);
199 static void ion_system_heap_free(struct ion_buffer *buffer)
201 struct ion_heap *heap = buffer->heap;
202 struct ion_system_heap *sys_heap = container_of(heap,
203 struct ion_system_heap,
205 struct sg_table *table = buffer->sg_table;
206 bool cached = ion_buffer_cached(buffer);
207 struct scatterlist *sg;
211 /* uncached pages come from the page pools, zero them before returning
212 for security purposes (other allocations are zerod at alloc time */
213 if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
214 ion_heap_buffer_zero(buffer);
216 for_each_sg(table->sgl, sg, table->nents, i)
217 free_buffer_page(sys_heap, buffer, sg_page(sg),
218 get_order(sg->length));
219 sg_free_table(table);
223 static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
224 struct ion_buffer *buffer)
226 return buffer->priv_virt;
229 static void ion_system_heap_unmap_dma(struct ion_heap *heap,
230 struct ion_buffer *buffer)
235 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
238 struct ion_system_heap *sys_heap;
242 sys_heap = container_of(heap, struct ion_system_heap, heap);
244 for (i = 0; i < num_orders; i++) {
245 struct ion_page_pool *pool = sys_heap->pools[i];
246 nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
252 #ifdef CONFIG_ROCKCHIP_IOMMU
253 // get device's vaddr
254 static int ion_system_map_iommu(struct ion_buffer *buffer,
255 struct device *iommu_dev,
256 struct ion_iommu_map *data,
257 unsigned long iova_length,
261 struct sg_table *table = (struct sg_table*)buffer->priv_virt;
263 data->iova_addr = rockchip_iovmm_map(iommu_dev, table->sgl, 0, iova_length);
264 pr_debug("%s: map %x -> %lx\n", __func__, table->sgl->dma_address, data->iova_addr);
265 if (!data->iova_addr || IS_ERR_VALUE(data->iova_addr)) {
266 pr_err("%s: rockchip_iovmm_map() failed: %lx\n", __func__, data->iova_addr);
271 data->mapped_size = iova_length;
277 void ion_system_unmap_iommu(struct device *iommu_dev, struct ion_iommu_map *data)
279 pr_debug("%s: unmap %x@%lx\n", __func__, data->mapped_size, data->iova_addr);
280 rockchip_iovmm_unmap(iommu_dev, data->iova_addr);
286 static struct ion_heap_ops system_heap_ops = {
287 .allocate = ion_system_heap_allocate,
288 .free = ion_system_heap_free,
289 .map_dma = ion_system_heap_map_dma,
290 .unmap_dma = ion_system_heap_unmap_dma,
291 .map_kernel = ion_heap_map_kernel,
292 .unmap_kernel = ion_heap_unmap_kernel,
293 .map_user = ion_heap_map_user,
294 .shrink = ion_system_heap_shrink,
295 #ifdef CONFIG_ROCKCHIP_IOMMU
296 .map_iommu = ion_system_map_iommu,
297 .unmap_iommu = ion_system_unmap_iommu,
301 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
305 struct ion_system_heap *sys_heap = container_of(heap,
306 struct ion_system_heap,
309 for (i = 0; i < num_orders; i++) {
310 struct ion_page_pool *pool = sys_heap->pools[i];
311 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
312 pool->high_count, pool->order,
313 (1 << pool->order) * PAGE_SIZE * pool->high_count);
314 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
315 pool->low_count, pool->order,
316 (1 << pool->order) * PAGE_SIZE * pool->low_count);
321 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
323 struct ion_system_heap *heap;
326 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
328 return ERR_PTR(-ENOMEM);
329 heap->heap.ops = &system_heap_ops;
330 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
331 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
332 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
335 goto err_alloc_pools;
336 for (i = 0; i < num_orders; i++) {
337 struct ion_page_pool *pool;
338 gfp_t gfp_flags = low_order_gfp_flags;
341 gfp_flags = high_order_gfp_flags;
342 pool = ion_page_pool_create(gfp_flags, orders[i]);
344 goto err_create_pool;
345 heap->pools[i] = pool;
348 heap->heap.debug_show = ion_system_heap_debug_show;
351 for (i = 0; i < num_orders; i++)
353 ion_page_pool_destroy(heap->pools[i]);
357 return ERR_PTR(-ENOMEM);
360 void ion_system_heap_destroy(struct ion_heap *heap)
362 struct ion_system_heap *sys_heap = container_of(heap,
363 struct ion_system_heap,
367 for (i = 0; i < num_orders; i++)
368 ion_page_pool_destroy(sys_heap->pools[i]);
369 kfree(sys_heap->pools);
373 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
374 struct ion_buffer *buffer,
379 int order = get_order(len);
381 struct sg_table *table;
385 if (align > (PAGE_SIZE << order))
388 page = alloc_pages(low_order_gfp_flags, order);
392 split_page(page, order);
394 len = PAGE_ALIGN(len);
395 for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
396 __free_page(page + i);
398 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
404 ret = sg_alloc_table(table, 1, GFP_KERNEL);
408 sg_set_page(table->sgl, page, len, 0);
410 buffer->priv_virt = table;
412 ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
417 for (i = 0; i < len >> PAGE_SHIFT; i++)
418 __free_page(page + i);
423 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
425 struct sg_table *table = buffer->priv_virt;
426 struct page *page = sg_page(table->sgl);
427 unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
430 for (i = 0; i < pages; i++)
431 __free_page(page + i);
432 sg_free_table(table);
436 static int ion_system_contig_heap_phys(struct ion_heap *heap,
437 struct ion_buffer *buffer,
438 ion_phys_addr_t *addr, size_t *len)
440 struct sg_table *table = buffer->priv_virt;
441 struct page *page = sg_page(table->sgl);
442 *addr = page_to_phys(page);
447 static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
448 struct ion_buffer *buffer)
450 return buffer->priv_virt;
453 static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
454 struct ion_buffer *buffer)
458 static struct ion_heap_ops kmalloc_ops = {
459 .allocate = ion_system_contig_heap_allocate,
460 .free = ion_system_contig_heap_free,
461 .phys = ion_system_contig_heap_phys,
462 .map_dma = ion_system_contig_heap_map_dma,
463 .unmap_dma = ion_system_contig_heap_unmap_dma,
464 .map_kernel = ion_heap_map_kernel,
465 .unmap_kernel = ion_heap_unmap_kernel,
466 .map_user = ion_heap_map_user,
469 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
471 struct ion_heap *heap;
473 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
475 return ERR_PTR(-ENOMEM);
476 heap->ops = &kmalloc_ops;
477 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
481 void ion_system_contig_heap_destroy(struct ion_heap *heap)