2 * drivers/gpu/ion/ion_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/err.h>
18 #include <linux/freezer.h>
19 #include <linux/kthread.h>
21 #include <linux/rtmutex.h>
22 #include <linux/sched.h>
23 #include <linux/scatterlist.h>
24 #include <linux/vmalloc.h>
28 void *ion_heap_map_kernel(struct ion_heap *heap,
29 struct ion_buffer *buffer)
31 struct scatterlist *sg;
35 struct sg_table *table = buffer->sg_table;
36 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37 struct page **pages = vmalloc(sizeof(struct page *) * npages);
38 struct page **tmp = pages;
43 if (buffer->flags & ION_FLAG_CACHED)
46 pgprot = pgprot_writecombine(PAGE_KERNEL);
48 for_each_sg(table->sgl, sg, table->nents, i) {
49 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
50 struct page *page = sg_page(sg);
52 for (j = 0; j < npages_this_entry; j++)
55 vaddr = vmap(pages, npages, VM_MAP, pgprot);
59 return ERR_PTR(-ENOMEM);
64 void ion_heap_unmap_kernel(struct ion_heap *heap,
65 struct ion_buffer *buffer)
67 vunmap(buffer->vaddr);
70 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
71 struct vm_area_struct *vma)
73 struct sg_table *table = buffer->sg_table;
74 unsigned long addr = vma->vm_start;
75 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
76 struct scatterlist *sg;
80 for_each_sg(table->sgl, sg, table->nents, i) {
81 struct page *page = sg_page(sg);
82 unsigned long remainder = vma->vm_end - addr;
83 unsigned long len = sg->length;
85 if (offset >= sg->length) {
89 page += offset / PAGE_SIZE;
90 len = sg->length - offset;
93 len = min(len, remainder);
94 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
99 if (addr >= vma->vm_end)
105 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
107 void *addr = vm_map_ram(pages, num, -1, pgprot);
110 memset(addr, 0, PAGE_SIZE * num);
111 vm_unmap_ram(addr, num);
116 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
121 struct sg_page_iter piter;
122 struct page *pages[32];
124 for_each_sg_page(sgl, &piter, nents, 0) {
125 pages[p++] = sg_page_iter_page(&piter);
126 if (p == ARRAY_SIZE(pages)) {
127 ret = ion_heap_clear_pages(pages, p, pgprot);
134 ret = ion_heap_clear_pages(pages, p, pgprot);
139 int ion_heap_buffer_zero(struct ion_buffer *buffer)
141 struct sg_table *table = buffer->sg_table;
144 if (buffer->flags & ION_FLAG_CACHED)
145 pgprot = PAGE_KERNEL;
147 pgprot = pgprot_writecombine(PAGE_KERNEL);
149 return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
152 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
154 struct scatterlist sg;
156 sg_init_table(&sg, 1);
157 sg_set_page(&sg, page, size, 0);
158 return ion_heap_sglist_zero(&sg, 1, pgprot);
161 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
163 spin_lock(&heap->free_lock);
164 list_add(&buffer->list, &heap->free_list);
165 heap->free_list_size += buffer->size;
166 spin_unlock(&heap->free_lock);
167 wake_up(&heap->waitqueue);
170 size_t ion_heap_freelist_size(struct ion_heap *heap)
174 spin_lock(&heap->free_lock);
175 size = heap->free_list_size;
176 spin_unlock(&heap->free_lock);
181 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
184 struct ion_buffer *buffer;
185 size_t total_drained = 0;
187 if (ion_heap_freelist_size(heap) == 0)
190 spin_lock(&heap->free_lock);
192 size = heap->free_list_size;
194 while (!list_empty(&heap->free_list)) {
195 if (total_drained >= size)
197 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
199 list_del(&buffer->list);
200 heap->free_list_size -= buffer->size;
202 buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
203 total_drained += buffer->size;
204 spin_unlock(&heap->free_lock);
205 ion_buffer_destroy(buffer);
206 spin_lock(&heap->free_lock);
208 spin_unlock(&heap->free_lock);
210 return total_drained;
213 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
215 return _ion_heap_freelist_drain(heap, size, false);
218 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
220 return _ion_heap_freelist_drain(heap, size, true);
223 static int ion_heap_deferred_free(void *data)
225 struct ion_heap *heap = data;
228 struct ion_buffer *buffer;
230 wait_event_freezable(heap->waitqueue,
231 ion_heap_freelist_size(heap) > 0);
233 spin_lock(&heap->free_lock);
234 if (list_empty(&heap->free_list)) {
235 spin_unlock(&heap->free_lock);
238 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
240 list_del(&buffer->list);
241 heap->free_list_size -= buffer->size;
242 spin_unlock(&heap->free_lock);
243 ion_buffer_destroy(buffer);
249 int ion_heap_init_deferred_free(struct ion_heap *heap)
251 struct sched_param param = { .sched_priority = 0 };
253 INIT_LIST_HEAD(&heap->free_list);
254 heap->free_list_size = 0;
255 spin_lock_init(&heap->free_lock);
256 init_waitqueue_head(&heap->waitqueue);
257 heap->task = kthread_run(ion_heap_deferred_free, heap,
259 sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
260 if (IS_ERR(heap->task)) {
261 pr_err("%s: creating thread for deferred free failed\n",
263 return PTR_RET(heap->task);
268 static int ion_heap_shrink(struct shrinker *shrinker, struct shrink_control *sc)
270 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
274 int to_scan = sc->nr_to_scan;
280 * shrink the free list first, no point in zeroing the memory if we're
281 * just going to reclaim it. Also, skip any possible page pooling.
283 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
284 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
292 total = ion_heap_freelist_size(heap) / PAGE_SIZE;
293 if (heap->ops->shrink)
294 total += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
298 void ion_heap_init_shrinker(struct ion_heap *heap)
300 heap->shrinker.shrink = ion_heap_shrink;
301 heap->shrinker.seeks = DEFAULT_SEEKS;
302 heap->shrinker.batch = 0;
303 register_shrinker(&heap->shrinker);
306 struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
308 struct ion_heap *heap = NULL;
310 switch (heap_data->type) {
311 case ION_HEAP_TYPE_SYSTEM_CONTIG:
312 heap = ion_system_contig_heap_create(heap_data);
314 case ION_HEAP_TYPE_SYSTEM:
315 heap = ion_system_heap_create(heap_data);
317 case ION_HEAP_TYPE_CARVEOUT:
318 heap = ion_carveout_heap_create(heap_data);
320 case ION_HEAP_TYPE_CHUNK:
321 heap = ion_chunk_heap_create(heap_data);
323 case ION_HEAP_TYPE_DMA:
324 heap = ion_cma_heap_create(heap_data);
327 pr_err("%s: Invalid heap type %d\n", __func__,
329 return ERR_PTR(-EINVAL);
332 if (IS_ERR_OR_NULL(heap)) {
333 pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
334 __func__, heap_data->name, heap_data->type,
335 heap_data->base, heap_data->size);
336 return ERR_PTR(-EINVAL);
339 heap->name = heap_data->name;
340 heap->id = heap_data->id;
344 void ion_heap_destroy(struct ion_heap *heap)
349 switch (heap->type) {
350 case ION_HEAP_TYPE_SYSTEM_CONTIG:
351 ion_system_contig_heap_destroy(heap);
353 case ION_HEAP_TYPE_SYSTEM:
354 ion_system_heap_destroy(heap);
356 case ION_HEAP_TYPE_CARVEOUT:
357 ion_carveout_heap_destroy(heap);
359 case ION_HEAP_TYPE_CHUNK:
360 ion_chunk_heap_destroy(heap);
362 case ION_HEAP_TYPE_DMA:
363 ion_cma_heap_destroy(heap);
366 pr_err("%s: Invalid heap type %d\n", __func__,