Merge remote-tracking branch 'lsk/linux-linaro-lsk-v4.4-android' into linux-linaro...
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion_system_heap.c
1 /*
2  * drivers/staging/android/ion/ion_system_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <asm/page.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include "ion.h"
27 #include "ion_priv.h"
28
29 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
30                                      __GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM;
31 static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
32 static const unsigned int orders[] = {8, 4, 0};
33 static const int num_orders = ARRAY_SIZE(orders);
34 static int order_to_index(unsigned int order)
35 {
36         int i;
37
38         for (i = 0; i < num_orders; i++)
39                 if (order == orders[i])
40                         return i;
41         BUG();
42         return -1;
43 }
44
45 static inline unsigned int order_to_size(int order)
46 {
47         return PAGE_SIZE << order;
48 }
49
50 struct ion_system_heap {
51         struct ion_heap heap;
52         struct ion_page_pool *pools[0];
53 };
54
55 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
56                                       struct ion_buffer *buffer,
57                                       unsigned long order)
58 {
59         bool cached = ion_buffer_cached(buffer);
60         struct ion_page_pool *pool = heap->pools[order_to_index(order)];
61         struct page *page;
62
63         if (!cached) {
64                 page = ion_page_pool_alloc(pool);
65         } else {
66                 gfp_t gfp_flags = low_order_gfp_flags;
67
68                 if (order > 4)
69                         gfp_flags = high_order_gfp_flags;
70                 page = alloc_pages(gfp_flags | __GFP_COMP, order);
71                 if (!page)
72                         return NULL;
73                 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
74                                                 DMA_BIDIRECTIONAL);
75         }
76
77         return page;
78 }
79
80 static void free_buffer_page(struct ion_system_heap *heap,
81                              struct ion_buffer *buffer, struct page *page)
82 {
83         unsigned int order = compound_order(page);
84         bool cached = ion_buffer_cached(buffer);
85
86         if (!cached) {
87                 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
88                 if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)
89                         ion_page_pool_free_immediate(pool, page);
90                 else
91                         ion_page_pool_free(pool, page);
92         } else {
93                 __free_pages(page, order);
94         }
95 }
96
97
98 static struct page *alloc_largest_available(struct ion_system_heap *heap,
99                                             struct ion_buffer *buffer,
100                                             unsigned long size,
101                                             unsigned int max_order)
102 {
103         struct page *page;
104         int i;
105
106         for (i = 0; i < num_orders; i++) {
107                 if (size < order_to_size(orders[i]))
108                         continue;
109                 if (max_order < orders[i])
110                         continue;
111
112                 page = alloc_buffer_page(heap, buffer, orders[i]);
113                 if (!page)
114                         continue;
115
116                 return page;
117         }
118
119         return NULL;
120 }
121
122 static int ion_system_heap_allocate(struct ion_heap *heap,
123                                      struct ion_buffer *buffer,
124                                      unsigned long size, unsigned long align,
125                                      unsigned long flags)
126 {
127         struct ion_system_heap *sys_heap = container_of(heap,
128                                                         struct ion_system_heap,
129                                                         heap);
130         struct sg_table *table;
131         struct scatterlist *sg;
132         struct list_head pages;
133         struct page *page, *tmp_page;
134         int i = 0;
135         unsigned long size_remaining = PAGE_ALIGN(size);
136         unsigned int max_order = orders[0];
137
138         if (align > PAGE_SIZE)
139                 return -EINVAL;
140
141         if (size / PAGE_SIZE > totalram_pages / 2)
142                 return -ENOMEM;
143
144         INIT_LIST_HEAD(&pages);
145         while (size_remaining > 0) {
146                 page = alloc_largest_available(sys_heap, buffer, size_remaining,
147                                                 max_order);
148                 if (!page)
149                         goto free_pages;
150                 list_add_tail(&page->lru, &pages);
151                 size_remaining -= PAGE_SIZE << compound_order(page);
152                 max_order = compound_order(page);
153                 i++;
154         }
155         table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
156         if (!table)
157                 goto free_pages;
158
159         if (sg_alloc_table(table, i, GFP_KERNEL))
160                 goto free_table;
161
162         sg = table->sgl;
163         list_for_each_entry_safe(page, tmp_page, &pages, lru) {
164                 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
165                 sg = sg_next(sg);
166                 list_del(&page->lru);
167         }
168
169         buffer->priv_virt = table;
170         return 0;
171
172 free_table:
173         kfree(table);
174 free_pages:
175         list_for_each_entry_safe(page, tmp_page, &pages, lru)
176                 free_buffer_page(sys_heap, buffer, page);
177         return -ENOMEM;
178 }
179
180 static void ion_system_heap_free(struct ion_buffer *buffer)
181 {
182         struct ion_system_heap *sys_heap = container_of(buffer->heap,
183                                                         struct ion_system_heap,
184                                                         heap);
185         struct sg_table *table = buffer->sg_table;
186         bool cached = ion_buffer_cached(buffer);
187         struct scatterlist *sg;
188         int i;
189
190         /*
191          *  uncached pages come from the page pools, zero them before returning
192          *  for security purposes (other allocations are zerod at
193          *  alloc time
194          */
195         if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
196                 ion_heap_buffer_zero(buffer);
197
198         for_each_sg(table->sgl, sg, table->nents, i)
199                 free_buffer_page(sys_heap, buffer, sg_page(sg));
200         sg_free_table(table);
201         kfree(table);
202 }
203
204 static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
205                                                 struct ion_buffer *buffer)
206 {
207         return buffer->priv_virt;
208 }
209
210 static void ion_system_heap_unmap_dma(struct ion_heap *heap,
211                                       struct ion_buffer *buffer)
212 {
213 }
214
215 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
216                                         int nr_to_scan)
217 {
218         struct ion_system_heap *sys_heap;
219         int nr_total = 0;
220         int i, nr_freed;
221         int only_scan = 0;
222
223         sys_heap = container_of(heap, struct ion_system_heap, heap);
224
225         if (!nr_to_scan)
226                 only_scan = 1;
227
228         for (i = 0; i < num_orders; i++) {
229                 struct ion_page_pool *pool = sys_heap->pools[i];
230
231                 nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
232                 nr_total += nr_freed;
233
234                 if (!only_scan) {
235                         nr_to_scan -= nr_freed;
236                         /* shrink completed */
237                         if (nr_to_scan <= 0)
238                                 break;
239                 }
240         }
241
242         return nr_total;
243 }
244
245 static struct ion_heap_ops system_heap_ops = {
246         .allocate = ion_system_heap_allocate,
247         .free = ion_system_heap_free,
248         .map_dma = ion_system_heap_map_dma,
249         .unmap_dma = ion_system_heap_unmap_dma,
250         .map_kernel = ion_heap_map_kernel,
251         .unmap_kernel = ion_heap_unmap_kernel,
252         .map_user = ion_heap_map_user,
253         .shrink = ion_system_heap_shrink,
254 };
255
256 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
257                                       void *unused)
258 {
259
260         struct ion_system_heap *sys_heap = container_of(heap,
261                                                         struct ion_system_heap,
262                                                         heap);
263         int i;
264
265         for (i = 0; i < num_orders; i++) {
266                 struct ion_page_pool *pool = sys_heap->pools[i];
267
268                 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
269                            pool->high_count, pool->order,
270                            (PAGE_SIZE << pool->order) * pool->high_count);
271                 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
272                            pool->low_count, pool->order,
273                            (PAGE_SIZE << pool->order) * pool->low_count);
274         }
275         return 0;
276 }
277
278 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
279 {
280         struct ion_system_heap *heap;
281         int i;
282
283         heap = kzalloc(sizeof(struct ion_system_heap) +
284                         sizeof(struct ion_page_pool *) * num_orders,
285                         GFP_KERNEL);
286         if (!heap)
287                 return ERR_PTR(-ENOMEM);
288         heap->heap.ops = &system_heap_ops;
289         heap->heap.type = ION_HEAP_TYPE_SYSTEM;
290         heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
291
292         for (i = 0; i < num_orders; i++) {
293                 struct ion_page_pool *pool;
294                 gfp_t gfp_flags = low_order_gfp_flags;
295
296                 if (orders[i] > 4)
297                         gfp_flags = high_order_gfp_flags;
298                 pool = ion_page_pool_create(gfp_flags, orders[i]);
299                 if (!pool)
300                         goto destroy_pools;
301                 heap->pools[i] = pool;
302         }
303
304         heap->heap.debug_show = ion_system_heap_debug_show;
305         return &heap->heap;
306
307 destroy_pools:
308         while (i--)
309                 ion_page_pool_destroy(heap->pools[i]);
310         kfree(heap);
311         return ERR_PTR(-ENOMEM);
312 }
313
314 void ion_system_heap_destroy(struct ion_heap *heap)
315 {
316         struct ion_system_heap *sys_heap = container_of(heap,
317                                                         struct ion_system_heap,
318                                                         heap);
319         int i;
320
321         for (i = 0; i < num_orders; i++)
322                 ion_page_pool_destroy(sys_heap->pools[i]);
323         kfree(sys_heap);
324 }
325
326 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
327                                            struct ion_buffer *buffer,
328                                            unsigned long len,
329                                            unsigned long align,
330                                            unsigned long flags)
331 {
332         int order = get_order(len);
333         struct page *page;
334         struct sg_table *table;
335         unsigned long i;
336         int ret;
337
338         if (align > (PAGE_SIZE << order))
339                 return -EINVAL;
340
341         page = alloc_pages(low_order_gfp_flags, order);
342         if (!page)
343                 return -ENOMEM;
344
345         split_page(page, order);
346
347         len = PAGE_ALIGN(len);
348         for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
349                 __free_page(page + i);
350
351         table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
352         if (!table) {
353                 ret = -ENOMEM;
354                 goto free_pages;
355         }
356
357         ret = sg_alloc_table(table, 1, GFP_KERNEL);
358         if (ret)
359                 goto free_table;
360
361         sg_set_page(table->sgl, page, len, 0);
362
363         buffer->priv_virt = table;
364
365         ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
366
367         return 0;
368
369 free_table:
370         kfree(table);
371 free_pages:
372         for (i = 0; i < len >> PAGE_SHIFT; i++)
373                 __free_page(page + i);
374
375         return ret;
376 }
377
378 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
379 {
380         struct sg_table *table = buffer->priv_virt;
381         struct page *page = sg_page(table->sgl);
382         unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
383         unsigned long i;
384
385         for (i = 0; i < pages; i++)
386                 __free_page(page + i);
387         sg_free_table(table);
388         kfree(table);
389 }
390
391 static int ion_system_contig_heap_phys(struct ion_heap *heap,
392                                        struct ion_buffer *buffer,
393                                        ion_phys_addr_t *addr, size_t *len)
394 {
395         struct sg_table *table = buffer->priv_virt;
396         struct page *page = sg_page(table->sgl);
397         *addr = page_to_phys(page);
398         *len = buffer->size;
399         return 0;
400 }
401
402 static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
403                                                 struct ion_buffer *buffer)
404 {
405         return buffer->priv_virt;
406 }
407
408 static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
409                                              struct ion_buffer *buffer)
410 {
411 }
412
413 static struct ion_heap_ops kmalloc_ops = {
414         .allocate = ion_system_contig_heap_allocate,
415         .free = ion_system_contig_heap_free,
416         .phys = ion_system_contig_heap_phys,
417         .map_dma = ion_system_contig_heap_map_dma,
418         .unmap_dma = ion_system_contig_heap_unmap_dma,
419         .map_kernel = ion_heap_map_kernel,
420         .unmap_kernel = ion_heap_unmap_kernel,
421         .map_user = ion_heap_map_user,
422 };
423
424 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
425 {
426         struct ion_heap *heap;
427
428         heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
429         if (!heap)
430                 return ERR_PTR(-ENOMEM);
431         heap->ops = &kmalloc_ops;
432         heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
433         return heap;
434 }
435
436 void ion_system_contig_heap_destroy(struct ion_heap *heap)
437 {
438         kfree(heap);
439 }