Merge remote-tracking branch 'origin/upstream/android-common-3.10' into linux-linaro...
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion_chunk_heap.c
1 /*
2  * drivers/gpu/ion/ion_chunk_heap.c
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 //#include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
20 #include <linux/io.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include "ion.h"
26 #include "ion_priv.h"
27
28 #include <asm/mach/map.h>
29
30 struct ion_chunk_heap {
31         struct ion_heap heap;
32         struct gen_pool *pool;
33         ion_phys_addr_t base;
34         unsigned long chunk_size;
35         unsigned long size;
36         unsigned long allocated;
37 };
38
39 static int ion_chunk_heap_allocate(struct ion_heap *heap,
40                                       struct ion_buffer *buffer,
41                                       unsigned long size, unsigned long align,
42                                       unsigned long flags)
43 {
44         struct ion_chunk_heap *chunk_heap =
45                 container_of(heap, struct ion_chunk_heap, heap);
46         struct sg_table *table;
47         struct scatterlist *sg;
48         int ret, i;
49         unsigned long num_chunks;
50         unsigned long allocated_size;
51
52         if (ion_buffer_fault_user_mappings(buffer))
53                 return -ENOMEM;
54
55         allocated_size = ALIGN(size, chunk_heap->chunk_size);
56         num_chunks = allocated_size / chunk_heap->chunk_size;
57
58         if (allocated_size > chunk_heap->size - chunk_heap->allocated)
59                 return -ENOMEM;
60
61         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
62         if (!table)
63                 return -ENOMEM;
64         ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
65         if (ret) {
66                 kfree(table);
67                 return ret;
68         }
69
70         sg = table->sgl;
71         for (i = 0; i < num_chunks; i++) {
72                 unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
73                                                      chunk_heap->chunk_size);
74                 if (!paddr)
75                         goto err;
76                 sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
77                 sg = sg_next(sg);
78         }
79
80         buffer->priv_virt = table;
81         chunk_heap->allocated += allocated_size;
82         return 0;
83 err:
84         sg = table->sgl;
85         for (i -= 1; i >= 0; i--) {
86                 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
87                               sg_dma_len(sg));
88                 sg = sg_next(sg);
89         }
90         sg_free_table(table);
91         kfree(table);
92         return -ENOMEM;
93 }
94
95 static void ion_chunk_heap_free(struct ion_buffer *buffer)
96 {
97         struct ion_heap *heap = buffer->heap;
98         struct ion_chunk_heap *chunk_heap =
99                 container_of(heap, struct ion_chunk_heap, heap);
100         struct sg_table *table = buffer->priv_virt;
101         struct scatterlist *sg;
102         int i;
103         unsigned long allocated_size;
104
105         allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
106
107         ion_heap_buffer_zero(buffer);
108
109         for_each_sg(table->sgl, sg, table->nents, i) {
110                 if (ion_buffer_cached(buffer))
111                         arm_dma_ops.sync_single_for_device(NULL,
112                                 pfn_to_dma(NULL, page_to_pfn(sg_page(sg))),
113                                 sg_dma_len(sg), DMA_BIDIRECTIONAL);
114                 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
115                               sg_dma_len(sg));
116         }
117         chunk_heap->allocated -= allocated_size;
118         sg_free_table(table);
119         kfree(table);
120 }
121
122 struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
123                                          struct ion_buffer *buffer)
124 {
125         return buffer->priv_virt;
126 }
127
128 void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
129                                struct ion_buffer *buffer)
130 {
131         return;
132 }
133
134 static struct ion_heap_ops chunk_heap_ops = {
135         .allocate = ion_chunk_heap_allocate,
136         .free = ion_chunk_heap_free,
137         .map_dma = ion_chunk_heap_map_dma,
138         .unmap_dma = ion_chunk_heap_unmap_dma,
139         .map_user = ion_heap_map_user,
140         .map_kernel = ion_heap_map_kernel,
141         .unmap_kernel = ion_heap_unmap_kernel,
142 };
143
144 struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
145 {
146         struct ion_chunk_heap *chunk_heap;
147         struct vm_struct *vm_struct;
148         pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
149         int i, ret;
150
151
152         chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
153         if (!chunk_heap)
154                 return ERR_PTR(-ENOMEM);
155
156         chunk_heap->chunk_size = (unsigned long)heap_data->priv;
157         chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
158                                            PAGE_SHIFT, -1);
159         if (!chunk_heap->pool) {
160                 ret = -ENOMEM;
161                 goto error_gen_pool_create;
162         }
163         chunk_heap->base = heap_data->base;
164         chunk_heap->size = heap_data->size;
165         chunk_heap->allocated = 0;
166
167         vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
168         if (!vm_struct) {
169                 ret = -ENOMEM;
170                 goto error;
171         }
172         for (i = 0; i < chunk_heap->size; i += PAGE_SIZE) {
173                 struct page *page = phys_to_page(chunk_heap->base + i);
174                 struct page **pages = &page;
175
176                 ret = map_vm_area(vm_struct, pgprot, &pages);
177                 if (ret)
178                         goto error_map_vm_area;
179                 memset(vm_struct->addr, 0, PAGE_SIZE);
180                 unmap_kernel_range((unsigned long)vm_struct->addr, PAGE_SIZE);
181         }
182         free_vm_area(vm_struct);
183
184         arm_dma_ops.sync_single_for_device(NULL,
185                 pfn_to_dma(NULL, page_to_pfn(phys_to_page(heap_data->base))),
186                 heap_data->size, DMA_BIDIRECTIONAL);
187         gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
188         chunk_heap->heap.ops = &chunk_heap_ops;
189         chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
190         chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
191         pr_info("%s: base %lu size %zu align %ld\n", __func__,
192                           chunk_heap->base, heap_data->size, heap_data->align);
193
194         return &chunk_heap->heap;
195
196 error_map_vm_area:
197         free_vm_area(vm_struct);
198 error:
199         gen_pool_destroy(chunk_heap->pool);
200 error_gen_pool_create:
201         kfree(chunk_heap);
202         return ERR_PTR(ret);
203 }
204
205 void ion_chunk_heap_destroy(struct ion_heap *heap)
206 {
207         struct ion_chunk_heap *chunk_heap =
208              container_of(heap, struct  ion_chunk_heap, heap);
209
210         gen_pool_destroy(chunk_heap->pool);
211         kfree(chunk_heap);
212         chunk_heap = NULL;
213 }