rk: restore file mode
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion_heap.c
1 /*
2  * drivers/gpu/ion/ion_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/err.h>
18 #include <linux/freezer.h>
19 #include <linux/kthread.h>
20 #include <linux/mm.h>
21 #include <linux/rtmutex.h>
22 #include <linux/sched.h>
23 #include <linux/scatterlist.h>
24 #include <linux/vmalloc.h>
25 #include "ion.h"
26 #include "ion_priv.h"
27
28 void *ion_heap_map_kernel(struct ion_heap *heap,
29                           struct ion_buffer *buffer)
30 {
31         struct scatterlist *sg;
32         int i, j;
33         void *vaddr;
34         pgprot_t pgprot;
35         struct sg_table *table = buffer->sg_table;
36         int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37         struct page **pages = vmalloc(sizeof(struct page *) * npages);
38         struct page **tmp = pages;
39
40         if (!pages)
41                 return NULL;
42
43         if (buffer->flags & ION_FLAG_CACHED)
44                 pgprot = PAGE_KERNEL;
45         else
46                 pgprot = pgprot_writecombine(PAGE_KERNEL);
47
48         for_each_sg(table->sgl, sg, table->nents, i) {
49                 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
50                 struct page *page = sg_page(sg);
51
52                 BUG_ON(i >= npages);
53                 for (j = 0; j < npages_this_entry; j++)
54                         *(tmp++) = page++;
55         }
56         vaddr = vmap(pages, npages, VM_MAP, pgprot);
57         vfree(pages);
58
59         if (vaddr == NULL)
60                 return ERR_PTR(-ENOMEM);
61
62         return vaddr;
63 }
64
65 void ion_heap_unmap_kernel(struct ion_heap *heap,
66                            struct ion_buffer *buffer)
67 {
68         vunmap(buffer->vaddr);
69 }
70
71 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
72                       struct vm_area_struct *vma)
73 {
74         struct sg_table *table = buffer->sg_table;
75         unsigned long addr = vma->vm_start;
76         unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
77         struct scatterlist *sg;
78         int i;
79         int ret;
80
81         for_each_sg(table->sgl, sg, table->nents, i) {
82                 struct page *page = sg_page(sg);
83                 unsigned long remainder = vma->vm_end - addr;
84                 unsigned long len = sg->length;
85
86                 if (offset >= sg->length) {
87                         offset -= sg->length;
88                         continue;
89                 } else if (offset) {
90                         page += offset / PAGE_SIZE;
91                         len = sg->length - offset;
92                         offset = 0;
93                 }
94                 len = min(len, remainder);
95                 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
96                                 vma->vm_page_prot);
97                 if (ret)
98                         return ret;
99                 addr += len;
100                 if (addr >= vma->vm_end)
101                         return 0;
102         }
103         return 0;
104 }
105
106 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
107 {
108         void *addr = vm_map_ram(pages, num, -1, pgprot);
109
110         if (!addr)
111                 return -ENOMEM;
112         memset(addr, 0, PAGE_SIZE * num);
113         vm_unmap_ram(addr, num);
114
115         return 0;
116 }
117
118 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
119                                                 pgprot_t pgprot)
120 {
121         int p = 0;
122         int ret = 0;
123         struct sg_page_iter piter;
124         struct page *pages[32];
125
126         for_each_sg_page(sgl, &piter, nents, 0) {
127                 pages[p++] = sg_page_iter_page(&piter);
128                 if (p == ARRAY_SIZE(pages)) {
129                         ret = ion_heap_clear_pages(pages, p, pgprot);
130                         if (ret)
131                                 return ret;
132                         p = 0;
133                 }
134         }
135         if (p)
136                 ret = ion_heap_clear_pages(pages, p, pgprot);
137
138         return ret;
139 }
140
141 int ion_heap_buffer_zero(struct ion_buffer *buffer)
142 {
143         struct sg_table *table = buffer->sg_table;
144         pgprot_t pgprot;
145
146         if (buffer->flags & ION_FLAG_CACHED)
147                 pgprot = PAGE_KERNEL;
148         else
149                 pgprot = pgprot_writecombine(PAGE_KERNEL);
150
151         return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
152 }
153
154 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
155 {
156         struct scatterlist sg;
157
158         sg_init_table(&sg, 1);
159         sg_set_page(&sg, page, size, 0);
160         return ion_heap_sglist_zero(&sg, 1, pgprot);
161 }
162
163 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
164 {
165         spin_lock(&heap->free_lock);
166         list_add(&buffer->list, &heap->free_list);
167         heap->free_list_size += buffer->size;
168         spin_unlock(&heap->free_lock);
169         wake_up(&heap->waitqueue);
170 }
171
172 size_t ion_heap_freelist_size(struct ion_heap *heap)
173 {
174         size_t size;
175
176         spin_lock(&heap->free_lock);
177         size = heap->free_list_size;
178         spin_unlock(&heap->free_lock);
179
180         return size;
181 }
182
183 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
184                                 bool skip_pools)
185 {
186         struct ion_buffer *buffer;
187         size_t total_drained = 0;
188
189         if (ion_heap_freelist_size(heap) == 0)
190                 return 0;
191
192         spin_lock(&heap->free_lock);
193         if (size == 0)
194                 size = heap->free_list_size;
195
196         while (!list_empty(&heap->free_list)) {
197                 if (total_drained >= size)
198                         break;
199                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
200                                           list);
201                 list_del(&buffer->list);
202                 heap->free_list_size -= buffer->size;
203                 if (skip_pools)
204                         buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
205                 total_drained += buffer->size;
206                 spin_unlock(&heap->free_lock);
207                 ion_buffer_destroy(buffer);
208                 spin_lock(&heap->free_lock);
209         }
210         spin_unlock(&heap->free_lock);
211
212         return total_drained;
213 }
214
215 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
216 {
217         return _ion_heap_freelist_drain(heap, size, false);
218 }
219
220 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
221 {
222         return _ion_heap_freelist_drain(heap, size, true);
223 }
224
225 static int ion_heap_deferred_free(void *data)
226 {
227         struct ion_heap *heap = data;
228
229         while (true) {
230                 struct ion_buffer *buffer;
231
232                 wait_event_freezable(heap->waitqueue,
233                                      ion_heap_freelist_size(heap) > 0);
234
235                 spin_lock(&heap->free_lock);
236                 if (list_empty(&heap->free_list)) {
237                         spin_unlock(&heap->free_lock);
238                         continue;
239                 }
240                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
241                                           list);
242                 list_del(&buffer->list);
243                 heap->free_list_size -= buffer->size;
244                 spin_unlock(&heap->free_lock);
245                 ion_buffer_destroy(buffer);
246         }
247
248         return 0;
249 }
250
251 int ion_heap_init_deferred_free(struct ion_heap *heap)
252 {
253         struct sched_param param = { .sched_priority = 0 };
254
255         INIT_LIST_HEAD(&heap->free_list);
256         heap->free_list_size = 0;
257         spin_lock_init(&heap->free_lock);
258         init_waitqueue_head(&heap->waitqueue);
259         heap->task = kthread_run(ion_heap_deferred_free, heap,
260                                  "%s", heap->name);
261         sched_setscheduler(heap->task, SCHED_IDLE, &param);
262         if (IS_ERR(heap->task)) {
263                 pr_err("%s: creating thread for deferred free failed\n",
264                        __func__);
265                 return PTR_RET(heap->task);
266         }
267         return 0;
268 }
269
270 static int ion_heap_shrink(struct shrinker *shrinker, struct shrink_control *sc)
271 {
272         struct ion_heap *heap = container_of(shrinker, struct ion_heap,
273                                              shrinker);
274         int total = 0;
275         int freed = 0;
276         int to_scan = sc->nr_to_scan;
277
278         if (to_scan == 0)
279                 goto out;
280
281         /*
282          * shrink the free list first, no point in zeroing the memory if we're
283          * just going to reclaim it. Also, skip any possible page pooling.
284          */
285         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
286                 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
287                                 PAGE_SIZE;
288
289         to_scan -= freed;
290         if (to_scan < 0)
291                 to_scan = 0;
292
293 out:
294         total = ion_heap_freelist_size(heap) / PAGE_SIZE;
295         if (heap->ops->shrink)
296                 total += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
297         return total;
298 }
299
300 void ion_heap_init_shrinker(struct ion_heap *heap)
301 {
302         heap->shrinker.shrink = ion_heap_shrink;
303         heap->shrinker.seeks = DEFAULT_SEEKS;
304         heap->shrinker.batch = 0;
305         register_shrinker(&heap->shrinker);
306 }
307
308 struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
309 {
310         struct ion_heap *heap = NULL;
311
312         switch (heap_data->type) {
313         case ION_HEAP_TYPE_SYSTEM_CONTIG:
314                 heap = ion_system_contig_heap_create(heap_data);
315                 break;
316         case ION_HEAP_TYPE_SYSTEM:
317                 heap = ion_system_heap_create(heap_data);
318                 break;
319         case ION_HEAP_TYPE_CARVEOUT:
320                 heap = ion_carveout_heap_create(heap_data);
321                 break;
322         case ION_HEAP_TYPE_CHUNK:
323                 heap = ion_chunk_heap_create(heap_data);
324                 break;
325         case ION_HEAP_TYPE_DMA:
326                 heap = ion_cma_heap_create(heap_data);
327                 break;
328         case ION_HEAP_TYPE_DRM:
329                 heap = ion_drm_heap_create(heap_data);
330                 break;
331         default:
332                 pr_err("%s: Invalid heap type %d\n", __func__,
333                        heap_data->type);
334                 return ERR_PTR(-EINVAL);
335         }
336
337         if (IS_ERR_OR_NULL(heap)) {
338                 pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
339                        __func__, heap_data->name, heap_data->type,
340                        heap_data->base, heap_data->size);
341                 return ERR_PTR(-EINVAL);
342         }
343
344         heap->name = heap_data->name;
345         heap->id = heap_data->id;
346         return heap;
347 }
348
349 void ion_heap_destroy(struct ion_heap *heap)
350 {
351         if (!heap)
352                 return;
353
354         switch (heap->type) {
355         case ION_HEAP_TYPE_SYSTEM_CONTIG:
356                 ion_system_contig_heap_destroy(heap);
357                 break;
358         case ION_HEAP_TYPE_SYSTEM:
359                 ion_system_heap_destroy(heap);
360                 break;
361         case ION_HEAP_TYPE_CARVEOUT:
362                 ion_carveout_heap_destroy(heap);
363                 break;
364         case ION_HEAP_TYPE_CHUNK:
365                 ion_chunk_heap_destroy(heap);
366                 break;
367         case ION_HEAP_TYPE_DMA:
368                 ion_cma_heap_destroy(heap);
369                 break;
370         case ION_HEAP_TYPE_DRM:
371                 ion_drm_heap_destroy(heap);
372                 break;
373         default:
374                 pr_err("%s: Invalid heap type %d\n", __func__,
375                        heap->type);
376         }
377 }