Merge remote-tracking branch 'origin/develop-3.10' into develop-3.10-next
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion_drm_heap.c
1 /*
2  * drivers/gpu/ion/ion_drm_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 #include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
20 #include <linux/io.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/rockchip-iovmm.h>
26 #include "ion.h"
27 #include "ion_priv.h"
28
29 #define ION_DRM_ALLOCATE_FAILED -1
30
31 struct ion_drm_heap {
32         struct ion_heap heap;
33         struct gen_pool *pool;
34         ion_phys_addr_t base;
35 };
36
37 ion_phys_addr_t ion_drm_allocate(struct ion_heap *heap,
38                                       unsigned long size,
39                                       unsigned long align)
40 {
41         struct ion_drm_heap *drm_heap =
42                 container_of(heap, struct ion_drm_heap, heap);
43         unsigned long offset = gen_pool_alloc(drm_heap->pool, size);
44
45         if (!offset)
46                 return ION_DRM_ALLOCATE_FAILED;
47
48         return offset;
49 }
50
51 void ion_drm_free(struct ion_heap *heap, ion_phys_addr_t addr,
52                        unsigned long size)
53 {
54         struct ion_drm_heap *drm_heap =
55                 container_of(heap, struct ion_drm_heap, heap);
56
57         if (addr == ION_DRM_ALLOCATE_FAILED)
58                 return;
59         gen_pool_free(drm_heap->pool, addr, size);
60 }
61
62 static int ion_drm_heap_phys(struct ion_heap *heap,
63                                   struct ion_buffer *buffer,
64                                   ion_phys_addr_t *addr, size_t *len)
65 {
66         struct sg_table *table = buffer->priv_virt;
67         struct page *page = sg_page(table->sgl);
68         ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
69
70         *addr = paddr;
71         *len = buffer->size;
72         return 0;
73 }
74
75 static int ion_drm_heap_allocate(struct ion_heap *heap,
76                                       struct ion_buffer *buffer,
77                                       unsigned long size, unsigned long align,
78                                       unsigned long flags)
79 {
80         struct sg_table *table;
81         ion_phys_addr_t paddr;
82         int ret;
83
84         if (align > PAGE_SIZE)
85                 return -EINVAL;
86
87         if (ion_buffer_cached(buffer)) {
88                 pr_err("%s: cannot allocate cached memory from secure heap %s\n",
89                         __func__, heap->name);
90                 return -ENOMEM;
91         }
92
93         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
94         if (!table)
95                 return -ENOMEM;
96         ret = sg_alloc_table(table, 1, GFP_KERNEL);
97         if (ret)
98                 goto err_free;
99
100         paddr = ion_drm_allocate(heap, size, align);
101         if (paddr == ION_DRM_ALLOCATE_FAILED) {
102                 ret = -ENOMEM;
103                 goto err_free_table;
104         }
105
106         sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
107         buffer->priv_virt = table;
108
109         return 0;
110
111 err_free_table:
112         sg_free_table(table);
113 err_free:
114         kfree(table);
115         return ret;
116 }
117
118 static void ion_drm_heap_free(struct ion_buffer *buffer)
119 {
120         struct ion_heap *heap = buffer->heap;
121         struct sg_table *table = buffer->priv_virt;
122         struct page *page = sg_page(table->sgl);
123         ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
124
125         ion_heap_buffer_zero(buffer);
126         ion_drm_free(heap, paddr, buffer->size);
127         sg_free_table(table);
128         kfree(table);
129 }
130
131 static struct sg_table *ion_drm_heap_map_dma(struct ion_heap *heap,
132                                                   struct ion_buffer *buffer)
133 {
134         return buffer->priv_virt;
135 }
136
137 static void ion_drm_heap_unmap_dma(struct ion_heap *heap,
138                                         struct ion_buffer *buffer)
139 {
140         return;
141 }
142
143 static int ion_drm_heap_mmap(struct ion_heap *mapper,
144                         struct ion_buffer *buffer,
145                         struct vm_area_struct *vma)
146 {
147         pr_info("%s: mmaping from secure heap %s disallowed\n",
148                 __func__, mapper->name);
149         return -EINVAL;
150 }
151
152 static void *ion_drm_heap_map_kernel(struct ion_heap *heap,
153                                 struct ion_buffer *buffer)
154 {
155         pr_info("%s: kernel mapping from secure heap %s disallowed\n",
156                 __func__, heap->name);
157         return NULL;
158 }
159
160 static void ion_drm_heap_unmap_kernel(struct ion_heap *heap,
161                                  struct ion_buffer *buffer)
162 {
163         return;
164 }
165
166 #ifdef CONFIG_ROCKCHIP_IOMMU
167 static int ion_drm_heap_map_iommu(struct ion_buffer *buffer,
168                                 struct device *iommu_dev,
169                                 struct ion_iommu_map *data,
170                                 unsigned long iova_length,
171                                 unsigned long flags)
172 {
173         int ret = 0;
174         struct sg_table *table = (struct sg_table*)buffer->priv_virt;
175
176         data->iova_addr = rockchip_iovmm_map(iommu_dev, table->sgl, 0, iova_length);
177         pr_debug("%s: map %x -> %lx\n", __func__, table->sgl->dma_address,
178                 data->iova_addr);
179         if (IS_ERR_VALUE(data->iova_addr)) {
180                 pr_err("%s: rockchip_iovmm_map() failed: %lx\n", __func__,
181                         data->iova_addr);
182                 ret = data->iova_addr;
183                 goto out;
184         }
185
186         data->mapped_size = iova_length;
187
188 out:
189         return ret;
190 }
191
192 void ion_drm_heap_unmap_iommu(struct device *iommu_dev,
193                         struct ion_iommu_map *data)
194 {
195         pr_debug("%s: unmap %x@%lx\n", __func__, data->mapped_size,
196                 data->iova_addr);
197         rockchip_iovmm_unmap(iommu_dev, data->iova_addr);
198
199         return;
200 }
201 #endif
202
203 static struct ion_heap_ops drm_heap_ops = {
204         .allocate = ion_drm_heap_allocate,
205         .free = ion_drm_heap_free,
206         .phys = ion_drm_heap_phys,
207         .map_dma = ion_drm_heap_map_dma,
208         .unmap_dma = ion_drm_heap_unmap_dma,
209         .map_user = ion_drm_heap_mmap,
210         .map_kernel = ion_drm_heap_map_kernel,
211         .unmap_kernel = ion_drm_heap_unmap_kernel,
212 #ifdef CONFIG_ROCKCHIP_IOMMU
213         .map_iommu = ion_drm_heap_map_iommu,
214         .unmap_iommu = ion_drm_heap_unmap_iommu,
215 #endif
216 };
217
218 struct ion_heap *ion_drm_heap_create(struct ion_platform_heap *heap_data)
219 {
220         struct ion_drm_heap *drm_heap;
221         int ret;
222
223         struct page *page;
224         size_t size;
225
226         page = pfn_to_page(PFN_DOWN(heap_data->base));
227         size = heap_data->size;
228
229         printk("%s: %x@%lx\n", __func__, size, heap_data->base);
230
231         ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
232
233         ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
234         if (ret)
235                 return ERR_PTR(ret);
236
237         drm_heap = kzalloc(sizeof(struct ion_drm_heap), GFP_KERNEL);
238         if (!drm_heap)
239                 return ERR_PTR(-ENOMEM);
240
241         drm_heap->pool = gen_pool_create(8, -1); // 256KB align
242         if (!drm_heap->pool) {
243                 kfree(drm_heap);
244                 return ERR_PTR(-ENOMEM);
245         }
246         drm_heap->base = heap_data->base;
247         gen_pool_add(drm_heap->pool, drm_heap->base, heap_data->size, -1);
248         drm_heap->heap.ops = &drm_heap_ops;
249         drm_heap->heap.type = ION_HEAP_TYPE_DRM;
250 //      drm_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
251
252         return &drm_heap->heap;
253 }
254
255 void ion_drm_heap_destroy(struct ion_heap *heap)
256 {
257         struct ion_drm_heap *drm_heap =
258              container_of(heap, struct  ion_drm_heap, heap);
259
260         gen_pool_destroy(drm_heap->pool);
261         kfree(drm_heap);
262         drm_heap = NULL;
263 }