Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion_heap.c
1 /*
2  * drivers/gpu/ion/ion_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/err.h>
18 #include <linux/freezer.h>
19 #include <linux/kthread.h>
20 #include <linux/mm.h>
21 #include <linux/rtmutex.h>
22 #include <linux/sched.h>
23 #include <linux/scatterlist.h>
24 #include <linux/vmalloc.h>
25 #include "ion.h"
26 #include "ion_priv.h"
27
28 void *ion_heap_map_kernel(struct ion_heap *heap,
29                           struct ion_buffer *buffer)
30 {
31         struct scatterlist *sg;
32         int i, j;
33         void *vaddr;
34         pgprot_t pgprot;
35         struct sg_table *table = buffer->sg_table;
36         int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37         struct page **pages = vmalloc(sizeof(struct page *) * npages);
38         struct page **tmp = pages;
39
40         if (!pages)
41                 return NULL;
42
43         if (buffer->flags & ION_FLAG_CACHED)
44                 pgprot = PAGE_KERNEL;
45         else
46                 pgprot = pgprot_writecombine(PAGE_KERNEL);
47
48         for_each_sg(table->sgl, sg, table->nents, i) {
49                 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
50                 struct page *page = sg_page(sg);
51                 BUG_ON(i >= npages);
52                 for (j = 0; j < npages_this_entry; j++)
53                         *(tmp++) = page++;
54         }
55         vaddr = vmap(pages, npages, VM_MAP, pgprot);
56         vfree(pages);
57
58         if (vaddr == NULL)
59                 return ERR_PTR(-ENOMEM);
60
61         return vaddr;
62 }
63
64 void ion_heap_unmap_kernel(struct ion_heap *heap,
65                            struct ion_buffer *buffer)
66 {
67         vunmap(buffer->vaddr);
68 }
69
70 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
71                       struct vm_area_struct *vma)
72 {
73         struct sg_table *table = buffer->sg_table;
74         unsigned long addr = vma->vm_start;
75         unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
76         struct scatterlist *sg;
77         int i;
78         int ret;
79
80         for_each_sg(table->sgl, sg, table->nents, i) {
81                 struct page *page = sg_page(sg);
82                 unsigned long remainder = vma->vm_end - addr;
83                 unsigned long len = sg->length;
84
85                 if (offset >= sg->length) {
86                         offset -= sg->length;
87                         continue;
88                 } else if (offset) {
89                         page += offset / PAGE_SIZE;
90                         len = sg->length - offset;
91                         offset = 0;
92                 }
93                 len = min(len, remainder);
94                 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
95                                 vma->vm_page_prot);
96                 if (ret)
97                         return ret;
98                 addr += len;
99                 if (addr >= vma->vm_end)
100                         return 0;
101         }
102         return 0;
103 }
104
105 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
106 {
107         void *addr = vm_map_ram(pages, num, -1, pgprot);
108         if (!addr)
109                 return -ENOMEM;
110         memset(addr, 0, PAGE_SIZE * num);
111         vm_unmap_ram(addr, num);
112
113         return 0;
114 }
115
116 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
117                                                 pgprot_t pgprot)
118 {
119         int p = 0;
120         int ret = 0;
121         struct sg_page_iter piter;
122         struct page *pages[32];
123
124         for_each_sg_page(sgl, &piter, nents, 0) {
125                 pages[p++] = sg_page_iter_page(&piter);
126                 if (p == ARRAY_SIZE(pages)) {
127                         ret = ion_heap_clear_pages(pages, p, pgprot);
128                         if (ret)
129                                 return ret;
130                         p = 0;
131                 }
132         }
133         if (p)
134                 ret = ion_heap_clear_pages(pages, p, pgprot);
135
136         return ret;
137 }
138
139 int ion_heap_buffer_zero(struct ion_buffer *buffer)
140 {
141         struct sg_table *table = buffer->sg_table;
142         pgprot_t pgprot;
143
144         if (buffer->flags & ION_FLAG_CACHED)
145                 pgprot = PAGE_KERNEL;
146         else
147                 pgprot = pgprot_writecombine(PAGE_KERNEL);
148
149         return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
150 }
151
152 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
153 {
154         struct scatterlist sg;
155
156         sg_init_table(&sg, 1);
157         sg_set_page(&sg, page, size, 0);
158         return ion_heap_sglist_zero(&sg, 1, pgprot);
159 }
160
161 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
162 {
163         spin_lock(&heap->free_lock);
164         list_add(&buffer->list, &heap->free_list);
165         heap->free_list_size += buffer->size;
166         spin_unlock(&heap->free_lock);
167         wake_up(&heap->waitqueue);
168 }
169
170 size_t ion_heap_freelist_size(struct ion_heap *heap)
171 {
172         size_t size;
173
174         spin_lock(&heap->free_lock);
175         size = heap->free_list_size;
176         spin_unlock(&heap->free_lock);
177
178         return size;
179 }
180
181 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
182                                 bool skip_pools)
183 {
184         struct ion_buffer *buffer;
185         size_t total_drained = 0;
186
187         if (ion_heap_freelist_size(heap) == 0)
188                 return 0;
189
190         spin_lock(&heap->free_lock);
191         if (size == 0)
192                 size = heap->free_list_size;
193
194         while (!list_empty(&heap->free_list)) {
195                 if (total_drained >= size)
196                         break;
197                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
198                                           list);
199                 list_del(&buffer->list);
200                 heap->free_list_size -= buffer->size;
201                 if (skip_pools)
202                         buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
203                 total_drained += buffer->size;
204                 spin_unlock(&heap->free_lock);
205                 ion_buffer_destroy(buffer);
206                 spin_lock(&heap->free_lock);
207         }
208         spin_unlock(&heap->free_lock);
209
210         return total_drained;
211 }
212
213 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
214 {
215         return _ion_heap_freelist_drain(heap, size, false);
216 }
217
218 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
219 {
220         return _ion_heap_freelist_drain(heap, size, true);
221 }
222
223 static int ion_heap_deferred_free(void *data)
224 {
225         struct ion_heap *heap = data;
226
227         while (true) {
228                 struct ion_buffer *buffer;
229
230                 wait_event_freezable(heap->waitqueue,
231                                      ion_heap_freelist_size(heap) > 0);
232
233                 spin_lock(&heap->free_lock);
234                 if (list_empty(&heap->free_list)) {
235                         spin_unlock(&heap->free_lock);
236                         continue;
237                 }
238                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
239                                           list);
240                 list_del(&buffer->list);
241                 heap->free_list_size -= buffer->size;
242                 spin_unlock(&heap->free_lock);
243                 ion_buffer_destroy(buffer);
244         }
245
246         return 0;
247 }
248
249 int ion_heap_init_deferred_free(struct ion_heap *heap)
250 {
251         struct sched_param param = { .sched_priority = 0 };
252
253         INIT_LIST_HEAD(&heap->free_list);
254         heap->free_list_size = 0;
255         spin_lock_init(&heap->free_lock);
256         init_waitqueue_head(&heap->waitqueue);
257         heap->task = kthread_run(ion_heap_deferred_free, heap,
258                                  "%s", heap->name);
259         sched_setscheduler(heap->task, SCHED_IDLE, &param);
260         if (IS_ERR(heap->task)) {
261                 pr_err("%s: creating thread for deferred free failed\n",
262                        __func__);
263                 return PTR_RET(heap->task);
264         }
265         return 0;
266 }
267
268 static int ion_heap_shrink(struct shrinker *shrinker, struct shrink_control *sc)
269 {
270         struct ion_heap *heap = container_of(shrinker, struct ion_heap,
271                                              shrinker);
272         int total = 0;
273         int freed = 0;
274         int to_scan = sc->nr_to_scan;
275
276         if (to_scan == 0)
277                 goto out;
278
279         /*
280          * shrink the free list first, no point in zeroing the memory if we're
281          * just going to reclaim it. Also, skip any possible page pooling.
282          */
283         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
284                 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
285                                 PAGE_SIZE;
286
287         to_scan -= freed;
288         if (to_scan < 0)
289                 to_scan = 0;
290
291 out:
292         total = ion_heap_freelist_size(heap) / PAGE_SIZE;
293         if (heap->ops->shrink)
294                 total += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
295         return total;
296 }
297
298 void ion_heap_init_shrinker(struct ion_heap *heap)
299 {
300         heap->shrinker.shrink = ion_heap_shrink;
301         heap->shrinker.seeks = DEFAULT_SEEKS;
302         heap->shrinker.batch = 0;
303         register_shrinker(&heap->shrinker);
304 }
305
306 struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
307 {
308         struct ion_heap *heap = NULL;
309
310         switch (heap_data->type) {
311         case ION_HEAP_TYPE_SYSTEM_CONTIG:
312                 heap = ion_system_contig_heap_create(heap_data);
313                 break;
314         case ION_HEAP_TYPE_SYSTEM:
315                 heap = ion_system_heap_create(heap_data);
316                 break;
317         case ION_HEAP_TYPE_CARVEOUT:
318                 heap = ion_carveout_heap_create(heap_data);
319                 break;
320         case ION_HEAP_TYPE_CHUNK:
321                 heap = ion_chunk_heap_create(heap_data);
322                 break;
323         case ION_HEAP_TYPE_DMA:
324                 heap = ion_cma_heap_create(heap_data);
325                 break;
326         default:
327                 pr_err("%s: Invalid heap type %d\n", __func__,
328                        heap_data->type);
329                 return ERR_PTR(-EINVAL);
330         }
331
332         if (IS_ERR_OR_NULL(heap)) {
333                 pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
334                        __func__, heap_data->name, heap_data->type,
335                        heap_data->base, heap_data->size);
336                 return ERR_PTR(-EINVAL);
337         }
338
339         heap->name = heap_data->name;
340         heap->id = heap_data->id;
341         return heap;
342 }
343
344 void ion_heap_destroy(struct ion_heap *heap)
345 {
346         if (!heap)
347                 return;
348
349         switch (heap->type) {
350         case ION_HEAP_TYPE_SYSTEM_CONTIG:
351                 ion_system_contig_heap_destroy(heap);
352                 break;
353         case ION_HEAP_TYPE_SYSTEM:
354                 ion_system_heap_destroy(heap);
355                 break;
356         case ION_HEAP_TYPE_CARVEOUT:
357                 ion_carveout_heap_destroy(heap);
358                 break;
359         case ION_HEAP_TYPE_CHUNK:
360                 ion_chunk_heap_destroy(heap);
361                 break;
362         case ION_HEAP_TYPE_DMA:
363                 ion_cma_heap_destroy(heap);
364                 break;
365         default:
366                 pr_err("%s: Invalid heap type %d\n", __func__,
367                        heap->type);
368         }
369 }