rk: ion: update iovmm_map to rockchip_iovmm_map
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion_system_heap.c
1 /*
2  * drivers/gpu/ion/ion_system_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <asm/page.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include <linux/rockchip-iovmm.h>
27 #include "ion.h"
28 #include "ion_priv.h"
29
30 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
31                                      __GFP_NORETRY) & ~__GFP_WAIT;
32 static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
33 static const unsigned int orders[] = {8, 4, 0};
34 static const int num_orders = ARRAY_SIZE(orders);
35 static int order_to_index(unsigned int order)
36 {
37         int i;
38         for (i = 0; i < num_orders; i++)
39                 if (order == orders[i])
40                         return i;
41         BUG();
42         return -1;
43 }
44
45 static unsigned int order_to_size(int order)
46 {
47         return PAGE_SIZE << order;
48 }
49
50 struct ion_system_heap {
51         struct ion_heap heap;
52         struct ion_page_pool **pools;
53 };
54
55 struct page_info {
56         struct page *page;
57         unsigned int order;
58         struct list_head list;
59 };
60
61 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
62                                       struct ion_buffer *buffer,
63                                       unsigned long order)
64 {
65         bool cached = ion_buffer_cached(buffer);
66         struct ion_page_pool *pool = heap->pools[order_to_index(order)];
67         struct page *page;
68
69         if (!cached) {
70                 page = ion_page_pool_alloc(pool);
71         } else {
72                 gfp_t gfp_flags = low_order_gfp_flags;
73
74                 if (order > 4)
75                         gfp_flags = high_order_gfp_flags;
76                 page = alloc_pages(gfp_flags, order);
77                 if (!page)
78                         return NULL;
79                 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
80                                                 DMA_BIDIRECTIONAL);
81         }
82         if (!page)
83                 return NULL;
84
85         return page;
86 }
87
88 static void free_buffer_page(struct ion_system_heap *heap,
89                              struct ion_buffer *buffer, struct page *page,
90                              unsigned int order)
91 {
92         bool cached = ion_buffer_cached(buffer);
93
94         if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
95                 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
96                 ion_page_pool_free(pool, page);
97         } else {
98                 __free_pages(page, order);
99         }
100 }
101
102
103 static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
104                                                  struct ion_buffer *buffer,
105                                                  unsigned long size,
106                                                  unsigned int max_order)
107 {
108         struct page *page;
109         struct page_info *info;
110         int i;
111
112         info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
113         if (!info)
114                 return NULL;
115
116         for (i = 0; i < num_orders; i++) {
117                 if (size < order_to_size(orders[i]))
118                         continue;
119                 if (max_order < orders[i])
120                         continue;
121
122                 page = alloc_buffer_page(heap, buffer, orders[i]);
123                 if (!page)
124                         continue;
125
126                 info->page = page;
127                 info->order = orders[i];
128                 INIT_LIST_HEAD(&info->list);
129                 return info;
130         }
131         kfree(info);
132
133         return NULL;
134 }
135
136 static int ion_system_heap_allocate(struct ion_heap *heap,
137                                      struct ion_buffer *buffer,
138                                      unsigned long size, unsigned long align,
139                                      unsigned long flags)
140 {
141         struct ion_system_heap *sys_heap = container_of(heap,
142                                                         struct ion_system_heap,
143                                                         heap);
144         struct sg_table *table;
145         struct scatterlist *sg;
146         int ret;
147         struct list_head pages;
148         struct page_info *info, *tmp_info;
149         int i = 0;
150         unsigned long size_remaining = PAGE_ALIGN(size);
151         unsigned int max_order = orders[0];
152
153         if (align > PAGE_SIZE)
154                 return -EINVAL;
155
156         if (size / PAGE_SIZE > totalram_pages / 2)
157                 return -ENOMEM;
158
159         INIT_LIST_HEAD(&pages);
160         while (size_remaining > 0) {
161                 info = alloc_largest_available(sys_heap, buffer, size_remaining,
162                                                 max_order);
163                 if (!info)
164                         goto err;
165                 list_add_tail(&info->list, &pages);
166                 size_remaining -= (1 << info->order) * PAGE_SIZE;
167                 max_order = info->order;
168                 i++;
169         }
170         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
171         if (!table)
172                 goto err;
173
174         ret = sg_alloc_table(table, i, GFP_KERNEL);
175         if (ret)
176                 goto err1;
177
178         sg = table->sgl;
179         list_for_each_entry_safe(info, tmp_info, &pages, list) {
180                 struct page *page = info->page;
181                 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
182                 sg = sg_next(sg);
183                 list_del(&info->list);
184                 kfree(info);
185         }
186
187         buffer->priv_virt = table;
188         return 0;
189 err1:
190         kfree(table);
191 err:
192         list_for_each_entry_safe(info, tmp_info, &pages, list) {
193                 free_buffer_page(sys_heap, buffer, info->page, info->order);
194                 kfree(info);
195         }
196         return -ENOMEM;
197 }
198
199 static void ion_system_heap_free(struct ion_buffer *buffer)
200 {
201         struct ion_heap *heap = buffer->heap;
202         struct ion_system_heap *sys_heap = container_of(heap,
203                                                         struct ion_system_heap,
204                                                         heap);
205         struct sg_table *table = buffer->sg_table;
206         bool cached = ion_buffer_cached(buffer);
207         struct scatterlist *sg;
208         LIST_HEAD(pages);
209         int i;
210
211         /* uncached pages come from the page pools, zero them before returning
212            for security purposes (other allocations are zerod at alloc time */
213         if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
214                 ion_heap_buffer_zero(buffer);
215
216         for_each_sg(table->sgl, sg, table->nents, i)
217                 free_buffer_page(sys_heap, buffer, sg_page(sg),
218                                 get_order(sg->length));
219         sg_free_table(table);
220         kfree(table);
221 }
222
223 static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
224                                                 struct ion_buffer *buffer)
225 {
226         return buffer->priv_virt;
227 }
228
229 static void ion_system_heap_unmap_dma(struct ion_heap *heap,
230                                       struct ion_buffer *buffer)
231 {
232         return;
233 }
234
235 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
236                                         int nr_to_scan)
237 {
238         struct ion_system_heap *sys_heap;
239         int nr_total = 0;
240         int i;
241
242         sys_heap = container_of(heap, struct ion_system_heap, heap);
243
244         for (i = 0; i < num_orders; i++) {
245                 struct ion_page_pool *pool = sys_heap->pools[i];
246                 nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
247         }
248
249         return nr_total;
250 }
251
252 #ifdef CONFIG_ROCKCHIP_IOMMU
253 // get device's vaddr
254 static int ion_system_map_iommu(struct ion_buffer *buffer,
255                                 struct device *iommu_dev,
256                                 struct ion_iommu_map *data,
257                                 unsigned long iova_length,
258                                 unsigned long flags)
259 {
260         int ret = 0;
261         struct sg_table *table = (struct sg_table*)buffer->priv_virt;
262
263         data->iova_addr = rockchip_iovmm_map(iommu_dev, table->sgl, 0, iova_length);
264         pr_debug("%s: map %x -> %lx\n", __func__, table->sgl->dma_address, data->iova_addr);
265         if (!data->iova_addr || IS_ERR_VALUE(data->iova_addr)) {
266                 pr_err("%s: rockchip_iovmm_map() failed: %lx\n", __func__, data->iova_addr);
267                 ret = -EINVAL;
268                 goto out;
269         }
270
271         data->mapped_size = iova_length;
272
273 out:
274         return ret;
275 }
276
277 void ion_system_unmap_iommu(struct device *iommu_dev, struct ion_iommu_map *data)
278 {
279         pr_debug("%s: unmap %x@%lx\n", __func__, data->mapped_size, data->iova_addr);
280         rockchip_iovmm_unmap(iommu_dev, data->iova_addr);
281
282         return;
283 }
284 #endif
285
286 static struct ion_heap_ops system_heap_ops = {
287         .allocate = ion_system_heap_allocate,
288         .free = ion_system_heap_free,
289         .map_dma = ion_system_heap_map_dma,
290         .unmap_dma = ion_system_heap_unmap_dma,
291         .map_kernel = ion_heap_map_kernel,
292         .unmap_kernel = ion_heap_unmap_kernel,
293         .map_user = ion_heap_map_user,
294         .shrink = ion_system_heap_shrink,
295 #ifdef CONFIG_ROCKCHIP_IOMMU
296         .map_iommu = ion_system_map_iommu,
297         .unmap_iommu = ion_system_unmap_iommu,
298 #endif
299 };
300
301 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
302                                       void *unused)
303 {
304
305         struct ion_system_heap *sys_heap = container_of(heap,
306                                                         struct ion_system_heap,
307                                                         heap);
308         int i;
309         for (i = 0; i < num_orders; i++) {
310                 struct ion_page_pool *pool = sys_heap->pools[i];
311                 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
312                            pool->high_count, pool->order,
313                            (1 << pool->order) * PAGE_SIZE * pool->high_count);
314                 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
315                            pool->low_count, pool->order,
316                            (1 << pool->order) * PAGE_SIZE * pool->low_count);
317         }
318         return 0;
319 }
320
321 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
322 {
323         struct ion_system_heap *heap;
324         int i;
325
326         heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
327         if (!heap)
328                 return ERR_PTR(-ENOMEM);
329         heap->heap.ops = &system_heap_ops;
330         heap->heap.type = ION_HEAP_TYPE_SYSTEM;
331         heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
332         heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
333                               GFP_KERNEL);
334         if (!heap->pools)
335                 goto err_alloc_pools;
336         for (i = 0; i < num_orders; i++) {
337                 struct ion_page_pool *pool;
338                 gfp_t gfp_flags = low_order_gfp_flags;
339
340                 if (orders[i] > 4)
341                         gfp_flags = high_order_gfp_flags;
342                 pool = ion_page_pool_create(gfp_flags, orders[i]);
343                 if (!pool)
344                         goto err_create_pool;
345                 heap->pools[i] = pool;
346         }
347
348         heap->heap.debug_show = ion_system_heap_debug_show;
349         return &heap->heap;
350 err_create_pool:
351         for (i = 0; i < num_orders; i++)
352                 if (heap->pools[i])
353                         ion_page_pool_destroy(heap->pools[i]);
354         kfree(heap->pools);
355 err_alloc_pools:
356         kfree(heap);
357         return ERR_PTR(-ENOMEM);
358 }
359
360 void ion_system_heap_destroy(struct ion_heap *heap)
361 {
362         struct ion_system_heap *sys_heap = container_of(heap,
363                                                         struct ion_system_heap,
364                                                         heap);
365         int i;
366
367         for (i = 0; i < num_orders; i++)
368                 ion_page_pool_destroy(sys_heap->pools[i]);
369         kfree(sys_heap->pools);
370         kfree(sys_heap);
371 }
372
373 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
374                                            struct ion_buffer *buffer,
375                                            unsigned long len,
376                                            unsigned long align,
377                                            unsigned long flags)
378 {
379         int order = get_order(len);
380         struct page *page;
381         struct sg_table *table;
382         unsigned long i;
383         int ret;
384
385         if (align > (PAGE_SIZE << order))
386                 return -EINVAL;
387
388         page = alloc_pages(low_order_gfp_flags, order);
389         if (!page)
390                 return -ENOMEM;
391
392         split_page(page, order);
393
394         len = PAGE_ALIGN(len);
395         for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
396                 __free_page(page + i);
397
398         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
399         if (!table) {
400                 ret = -ENOMEM;
401                 goto out;
402         }
403
404         ret = sg_alloc_table(table, 1, GFP_KERNEL);
405         if (ret)
406                 goto out;
407
408         sg_set_page(table->sgl, page, len, 0);
409
410         buffer->priv_virt = table;
411
412         ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
413
414         return 0;
415
416 out:
417         for (i = 0; i < len >> PAGE_SHIFT; i++)
418                 __free_page(page + i);
419         kfree(table);
420         return ret;
421 }
422
423 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
424 {
425         struct sg_table *table = buffer->priv_virt;
426         struct page *page = sg_page(table->sgl);
427         unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
428         unsigned long i;
429
430         for (i = 0; i < pages; i++)
431                 __free_page(page + i);
432         sg_free_table(table);
433         kfree(table);
434 }
435
436 static int ion_system_contig_heap_phys(struct ion_heap *heap,
437                                        struct ion_buffer *buffer,
438                                        ion_phys_addr_t *addr, size_t *len)
439 {
440         struct sg_table *table = buffer->priv_virt;
441         struct page *page = sg_page(table->sgl);
442         *addr = page_to_phys(page);
443         *len = buffer->size;
444         return 0;
445 }
446
447 static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
448                                                 struct ion_buffer *buffer)
449 {
450         return buffer->priv_virt;
451 }
452
453 static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
454                                              struct ion_buffer *buffer)
455 {
456 }
457
458 static struct ion_heap_ops kmalloc_ops = {
459         .allocate = ion_system_contig_heap_allocate,
460         .free = ion_system_contig_heap_free,
461         .phys = ion_system_contig_heap_phys,
462         .map_dma = ion_system_contig_heap_map_dma,
463         .unmap_dma = ion_system_contig_heap_unmap_dma,
464         .map_kernel = ion_heap_map_kernel,
465         .unmap_kernel = ion_heap_unmap_kernel,
466         .map_user = ion_heap_map_user,
467 };
468
469 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
470 {
471         struct ion_heap *heap;
472
473         heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
474         if (!heap)
475                 return ERR_PTR(-ENOMEM);
476         heap->ops = &kmalloc_ops;
477         heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
478         return heap;
479 }
480
481 void ion_system_contig_heap_destroy(struct ion_heap *heap)
482 {
483         kfree(heap);
484 }
485